def test_workflow_definition_public(self): # Create a workflow(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) # Assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0.project_id, auth_context.ctx().project_id) self.assertNotEqual( WF_DEFINITIONS[0]['project_id'], auth_context.ctx().project_id ) # Create a new user. ctx = auth_context.MistralContext( user_id='9-0-44-5', project_id='99-88-33', user_name='test-user', project_name='test-another', is_admin=False ) auth_context.set_ctx(ctx) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) self.assertEqual('public', created0.scope)
def post(self, event_trigger): """Creates a new event trigger.""" acl.enforce('event_triggers:create', auth_ctx.ctx()) values = event_trigger.to_dict() input_keys = [k for k in values if values[k]] if CREATE_MANDATORY - set(input_keys): raise exc.EventTriggerException( "Params %s must be provided for creating event trigger." % CREATE_MANDATORY ) if values.get('scope') == 'public': acl.enforce('event_triggers:create:public', auth_ctx.ctx()) LOG.debug('Create event trigger: %s', values) db_model = rest_utils.rest_retry_on_db_error( triggers.create_event_trigger )( name=values.get('name', ''), exchange=values.get('exchange'), topic=values.get('topic'), event=values.get('event'), workflow_id=values.get('workflow_id'), scope=values.get('scope'), workflow_input=values.get('workflow_input'), workflow_params=values.get('workflow_params'), ) return resources.EventTrigger.from_db_model(db_model)
def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', all_projects=False, **filters): """Return all event triggers.""" acl.enforce('event_triggers:list', auth_ctx.ctx()) if all_projects: acl.enforce('event_triggers:list:all_projects', auth_ctx.ctx()) LOG.debug( "Fetch event triggers. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, fields=%s, all_projects=%s, filters=%s", marker, limit, sort_keys, sort_dirs, fields, all_projects, filters ) return rest_utils.get_all( resources.EventTriggers, resources.EventTrigger, db_api.get_event_triggers, db_api.get_event_trigger, resource_function=None, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, all_projects=all_projects, **filters )
def test_workflow_definition_public(self): # Create a workflow(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) # Assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0.project_id, auth_context.ctx().project_id) self.assertNotEqual( WF_DEFINITIONS[0]['project_id'], auth_context.ctx().project_id ) # Create a new user. auth_context.set_ctx(test_base.get_context(default=False)) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) self.assertEqual('public', created0.scope)
def test_workbook_public(self): # create a workbook(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.workbook_create(WORKBOOKS[0]) fetched = db_api.workbooks_get_all() self.assertEqual(1, len(fetched)) self.assertDictEqual(created0, fetched[0]) # assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0['project_id'], auth_context.ctx().project_id) self.assertNotEqual(WORKBOOKS[0]['project_id'], auth_context.ctx().project_id) # create a new user. ctx = auth_context.MistralContext(user_id='9-0-44-5', project_id='99-88-33', user_name='test-user', project_name='test-another', is_admin=False) auth_context.set_ctx(ctx) fetched = db_api.workbooks_get_all() self.assertEqual(1, len(fetched)) self.assertDictEqual(created0, fetched[0]) self.assertEqual('public', created0['scope'])
def post(self, namespace=''): """Create a new workflow. :param namespace: Optional. The namespace to create the workflow in. Workflows with the same name can be added to a given project if they are in two different namespaces. The text is allowed to have definitions of multiple workflows. In such case, they all will be created. """ acl.enforce('workflows:create', context.ctx()) definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') pecan.response.status = 201 resources.Workflow.validate_scope(scope) if scope == 'public': acl.enforce('workflows:publicize', context.ctx()) LOG.debug("Create workflow(s) [definition=%s]", definition) db_wfs = rest_utils.rest_retry_on_db_error(workflows.create_workflows)( definition, scope=scope, namespace=namespace ) workflow_list = [ resources.Workflow.from_db_model(db_wf) for db_wf in db_wfs ] return resources.Workflows(workflows=workflow_list).to_json()
def post(self): """Create a new action. NOTE: This text is allowed to have definitions of multiple actions. In this case they all will be created. """ acl.enforce('actions:create', context.ctx()) definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') pecan.response.status = 201 resources.Action.validate_scope(scope) if scope == 'public': acl.enforce('actions:publicize', context.ctx()) LOG.debug("Create action(s) [definition=%s]", definition) @rest_utils.rest_retry_on_db_error def _create_action_definitions(): with db_api.transaction(): return actions.create_actions(definition, scope=scope) db_acts = _create_action_definitions() action_list = [ resources.Action.from_db_model(db_act) for db_act in db_acts ] return resources.Actions(actions=action_list).to_json()
def put(self, id, event_trigger): """Updates an existing event trigger. The exchange, topic and event can not be updated. The right way to change them is to delete the event trigger first, then create a new event trigger with new params. """ acl.enforce('event_triggers:update', auth_ctx.ctx()) values = event_trigger.to_dict() for field in UPDATE_NOT_ALLOWED: if values.get(field): raise exc.EventTriggerException( "Can not update fields %s of event trigger." % UPDATE_NOT_ALLOWED ) LOG.debug('Update event trigger: [id=%s, values=%s]', id, values) @rest_utils.rest_retry_on_db_error def _update_event_trigger(): with db_api.transaction(): # ensure that event trigger exists db_api.get_event_trigger(id) return triggers.update_event_trigger(id, values) db_model = _update_event_trigger() return resources.EventTrigger.from_db_model(db_model)
def post(self, member_info): """Shares the resource to a new member.""" acl.enforce('members:create', context.ctx()) LOG.info( "Share resource to a member. [resource_id=%s, " "resource_type=%s, member_info=%s].", self.resource_id, self.type, member_info ) if not member_info.member_id: msg = "Member id must be provided." raise exc.WorkflowException(msg) wf_db = db_api.get_workflow_definition(self.resource_id) if wf_db.scope != 'private': msg = "Only private resource could be shared." raise exc.WorkflowException(msg) resource_member = { 'resource_id': self.resource_id, 'resource_type': self.type, 'member_id': member_info.member_id, 'status': 'pending' } db_member = db_api.create_resource_member(resource_member) return resources.Member.from_dict(db_member.to_dict())
def on_task_state_change(self, task_ex_id, state): return self._client.call( auth_ctx.ctx(), 'on_task_state_change', task_ex_id=task_ex_id, state=state )
def delete(self, name): """Delete the named workbook.""" acl.enforce('workbooks:delete', context.ctx()) LOG.info("Delete workbook [name=%s]" % name) db_api.delete_workbook(name)
def post(self, event_trigger): """Creates a new event trigger.""" acl.enforce('event_trigger:create', auth_ctx.ctx()) values = event_trigger.to_dict() input_keys = [k for k in values if values[k]] if CREATE_MANDATORY - set(input_keys): raise exc.EventTriggerException( "Params %s must be provided for creating event trigger." % CREATE_MANDATORY ) LOG.info('Create event trigger: %s', values) db_model = triggers.create_event_trigger( values.get('name', ''), values.get('exchange'), values.get('topic'), values.get('event'), values.get('workflow_id'), workflow_input=values.get('workflow_input'), workflow_params=values.get('workflow_params'), ) return resources.EventTrigger.from_dict(db_model.to_dict())
def post(self): """Create a new action. NOTE: This text is allowed to have definitions of multiple actions. In this case they all will be created. """ acl.enforce('actions:create', context.ctx()) definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') pecan.response.status = 201 if scope not in SCOPE_TYPES.values: raise exc.InvalidModelException( "Scope must be one of the following: %s; actual: " "%s" % (SCOPE_TYPES.values, scope) ) LOG.info("Create action(s) [definition=%s]" % definition) db_acts = actions.create_actions(definition, scope=scope) models_dicts = [db_act.to_dict() for db_act in db_acts] action_list = [Action.from_dict(act) for act in models_dicts] return Actions(actions=action_list).to_json()
def delete(self, identifier): """Delete a workflow.""" acl.enforce('workflows:delete', context.ctx()) LOG.info("Delete workflow [identifier=%s]" % identifier) with db_api.transaction(): db_api.delete_workflow_definition(identifier)
def update_event_trigger(self, trigger): return self._client.async_call( auth_ctx.ctx(), 'update_event_trigger', trigger=trigger, fanout=True, )
def post(self): """Create a new workflow. NOTE: The text is allowed to have definitions of multiple workflows. In this case they all will be created. """ acl.enforce('workflows:create', context.ctx()) definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') pecan.response.status = 201 if scope not in resources.SCOPE_TYPES.values: raise exc.InvalidModelException( "Scope must be one of the following: %s; actual: " "%s" % (resources.SCOPE_TYPES.values, scope) ) LOG.info("Create workflow(s) [definition=%s]" % definition) db_wfs = workflows.create_workflows(definition, scope=scope) models_dicts = [db_wf.to_dict() for db_wf in db_wfs] workflow_list = [ resources.Workflow.from_dict(wf) for wf in models_dicts ] return resources.Workflows(workflows=workflow_list).to_json()
def on_action_complete(self, action_ex_id, result, wf_action=False, async_=False): """Conveys action result to Mistral Engine. This method should be used by clients of Mistral Engine to update the state of an action execution once action has executed. One of the clients of this method is Mistral REST API server that receives action result from the outside action handlers. Note: calling this method serves an event notifying Mistral that it possibly needs to move the workflow on, i.e. run other workflow tasks for which all dependencies are satisfied. :param action_ex_id: Action execution id. :param result: Action execution result. :param wf_action: If True it means that the given id points to a workflow execution rather than action execution. It happens when a nested workflow execution sends its result to a parent workflow. :param async_: If True, run action in asynchronous mode (w/o waiting for completion). :return: Action(or workflow if wf_action=True) execution object. """ call = self._client.async_call if async_ else self._client.sync_call return call( auth_ctx.ctx(), 'on_action_complete', action_ex_id=action_ex_id, result=result, wf_action=wf_action )
def on_action_update(self, action_ex_id, state, wf_action=False, async_=False): """Conveys update of action state to Mistral Engine. This method should be used by clients of Mistral Engine to update the state of an action execution once action has executed. Note: calling this method serves an event notifying Mistral that it may need to change the state of the parent task and workflow. Use on_action_complete if the action execution reached completion state. :param action_ex_id: Action execution id. :param action_ex_id: Updated state. :param wf_action: If True it means that the given id points to a workflow execution rather than action execution. It happens when a nested workflow execution sends its result to a parent workflow. :param async_: If True, run action in asynchronous mode (w/o waiting for completion). :return: Action(or workflow if wf_action=True) execution object. """ call = self._client.async_call if async_ else self._client.sync_call return call( auth_ctx.ctx(), 'on_action_update', action_ex_id=action_ex_id, state=state, wf_action=wf_action )
def start_workflow(self, wf_identifier, wf_namespace='', wf_ex_id=None, wf_input=None, description='', async_=False, **params): """Starts workflow sending a request to engine over RPC. :param wf_identifier: Workflow identifier. :param wf_namespace: Workflow namespace. :param wf_input: Workflow input data as a dictionary. :param wf_ex_id: Workflow execution id. If passed, it will be set in the new execution object. :param description: Execution description. :param async_: If True, start workflow in asynchronous mode (w/o waiting for completion). :param params: Additional workflow type specific parameters. :return: Workflow execution. """ call = self._client.async_call if async_ else self._client.sync_call return call( auth_ctx.ctx(), 'start_workflow', wf_identifier=wf_identifier, wf_namespace=wf_namespace, wf_ex_id=wf_ex_id, wf_input=wf_input or {}, description=description, params=params )
def put(self, id, action_ex): """Update the specified action_execution.""" acl.enforce('action_executions:update', context.ctx()) LOG.info( "Update action_execution [id=%s, action_execution=%s]" % (id, action_ex) ) output = action_ex.output if action_ex.state == states.SUCCESS: result = wf_utils.Result(data=output) elif action_ex.state == states.ERROR: if not output: output = 'Unknown error' result = wf_utils.Result(error=output) else: raise exc.InvalidResultException( "Error. Expected on of %s, actual: %s" % ([states.SUCCESS, states.ERROR], action_ex.state) ) values = rpc.get_engine_client().on_action_complete(id, result) return resources.ActionExecution.from_dict(values)
def _get_client(self): ctx = context.ctx() LOG.debug("Cinder action security context: %s" % ctx) cinder_endpoint = keystone_utils.get_endpoint_for_project( service_type='volume' ) cinder_url = keystone_utils.format_url( cinder_endpoint.url, {'tenant_id': ctx.project_id} ) client = self._client_class( ctx.user_name, ctx.auth_token, project_id=ctx.project_id, auth_url=cinder_url, region_name=cinder_endpoint.region ) client.client.auth_token = ctx.auth_token client.client.management_url = cinder_url return client
def delete(self, name): """Delete the named environment.""" acl.enforce('environments:delete', context.ctx()) LOG.info("Delete environment [name=%s]" % name) db_api.delete_environment(name)
def put(self, id, event_trigger): """Updates an existing event trigger. The exchange, topic and event can not be updated. The right way to change them is to delete the event trigger first, then create a new event trigger with new params. """ acl.enforce('event_trigger:update', auth_ctx.ctx()) values = event_trigger.to_dict() for field in UPDATE_NOT_ALLOWED: if values.get(field, None): raise exc.EventTriggerException( "Can not update fields %s of event trigger." % UPDATE_NOT_ALLOWED ) LOG.info('Update event trigger: [id=%s, values=%s]', id, values) with db_api.transaction(): db_api.ensure_event_trigger_exists(id) db_model = triggers.update_event_trigger(id, values) return resources.EventTrigger.from_dict(db_model.to_dict())
def schedule_call(factory_method_path, target_method_name, run_after, serializers=None, **method_args): """Schedules call and lately invokes target_method. Add this call specification to DB, and then after run_after seconds service CallScheduler invokes the target_method. :param factory_method_path: Full python-specific path to factory method for target object construction. :param target_method_name: Name of target object method which will be invoked. :param run_after: Value in seconds. param serializers: map of argument names and their serializer class paths. Use when an argument is an object of specific type, and needs to be serialized. Example: { "result": "mistral.utils.serializer.ResultSerializer"} Serializer for the object type must implement serializer interface in mistral/utils/serializer.py :param method_args: Target method keyword arguments. """ ctx_serializer = context.RpcContextSerializer( context.JsonPayloadSerializer() ) ctx = ( ctx_serializer.serialize_context(context.ctx()) if context.has_ctx() else {} ) execution_time = (datetime.datetime.now() + datetime.timedelta(seconds=run_after)) if serializers: for arg_name, serializer_path in serializers.items(): if arg_name not in method_args: raise exc.MistralException( "Serializable method argument %s" " not found in method_args=%s" % (arg_name, method_args)) try: serializer = importutils.import_class(serializer_path)() except ImportError as e: raise ImportError("Cannot import class %s: %s" % (serializer_path, e)) method_args[arg_name] = serializer.serialize( method_args[arg_name] ) values = { 'factory_method_path': factory_method_path, 'target_method_name': target_method_name, 'execution_time': execution_time, 'auth_context': ctx, 'serializers': serializers, 'method_arguments': method_args, 'processing': False } db_api.create_delayed_call(values)
def put(self, identifier=None): """Update one or more workflows. :param identifier: Optional. If provided, it's UUID of a workflow. Only one workflow can be updated with identifier param. The text is allowed to have definitions of multiple workflows. In this case they all will be updated. """ acl.enforce('workflows:update', context.ctx()) definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') if scope not in SCOPE_TYPES.values: raise exc.InvalidModelException( "Scope must be one of the following: %s; actual: " "%s" % (SCOPE_TYPES.values, scope) ) LOG.info("Update workflow(s) [definition=%s]" % definition) db_wfs = workflows.update_workflows( definition, scope=scope, identifier=identifier ) models_dicts = [db_wf.to_dict() for db_wf in db_wfs] workflow_list = [Workflow.from_dict(wf) for wf in models_dicts] return (workflow_list[0].to_json() if identifier else Workflows(workflows=workflow_list).to_json())
def post(self, wf_ex): """Create a new Execution. :param wf_ex: Execution object with input content. """ acl.enforce('executions:create', context.ctx()) LOG.info('Create execution [execution=%s]' % wf_ex) engine = rpc.get_engine_client() exec_dict = wf_ex.to_dict() if not (exec_dict.get('workflow_id') or exec_dict.get('workflow_name')): raise exc.WorkflowException( "Workflow ID or workflow name must be provided. Workflow ID is" " recommended." ) result = engine.start_workflow( exec_dict.get('workflow_id', exec_dict.get('workflow_name')), exec_dict.get('input'), exec_dict.get('description', ''), **exec_dict.get('params') or {} ) return Execution.from_dict(result)
def delete(self, id): """Delete the specified action_execution. :param id: UUID of action execution to delete """ acl.enforce('action_executions:delete', context.ctx()) LOG.debug("Delete action_execution [id=%s]", id) if not cfg.CONF.api.allow_action_execution_deletion: raise exc.NotAllowedException("Action execution deletion is not " "allowed.") with db_api.transaction(): action_ex = db_api.get_action_execution(id) if action_ex.task_execution_id: raise exc.NotAllowedException( "Only ad-hoc action execution can be deleted." ) if not states.is_completed(action_ex.state): raise exc.NotAllowedException( "Only completed action execution can be deleted." ) return db_api.delete_action_execution(id)
def get_all(self): """Return all services.""" acl.enforce('services:list', context.ctx()) LOG.info("Fetch services.") if not cfg.CONF.coordination.backend_url: raise exc.CoordinationException("Service API is not supported.") service_coordinator = coordination.get_service_coordinator() if not service_coordinator.is_active(): raise exc.CoordinationException( "Failed to connect to coordination backend." ) services_list = [] service_group = ['%s_group' % i for i in launch.LAUNCH_OPTIONS] try: for group in service_group: members = service_coordinator.get_members(group) services_list.extend( [Service.from_dict({'type': group, 'name': member}) for member in members] ) except tooz.coordination.ToozError as e: # In the scenario of network interruption or manually shutdown # connection shutdown, ToozError will be raised. raise exc.CoordinationException( "Failed to get service members from coordination backend. %s" % six.text_type(e) ) return Services(services=services_list)
def post(self, action_ex): """Create new action_execution. :param action_ex: Action to execute """ acl.enforce('action_executions:create', context.ctx()) LOG.debug( "Create action_execution [action_execution=%s]", action_ex ) name = action_ex.name description = action_ex.description or None action_input = action_ex.input or {} params = action_ex.params or {} if not name: raise exc.InputException( "Please provide at least action name to run action." ) values = rpc.get_engine_client().start_action( name, action_input, description=description, **params ) return resources.ActionExecution.from_dict(values)
def _get_client(self): ctx = context.ctx() LOG.debug("Nova action security context: %s" % ctx) keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() nova_endpoint = keystone_utils.get_endpoint_for_project('nova') client = self._client_class( username=None, api_key=None, endpoint_type='publicURL', service_type='compute', auth_token=ctx.auth_token, tenant_id=ctx.project_id, region_name=keystone_endpoint.region, auth_url=keystone_endpoint.url ) client.client.management_url = keystone_utils.format_url( nova_endpoint.url, {'tenant_id': ctx.project_id} ) return client
def _get_client(self): ctx = context.ctx() LOG.debug("Swift action security context: %s" % ctx) swift_endpoint = keystone_utils.get_endpoint_for_project('swift') kwargs = { 'preauthurl': swift_endpoint.url % { 'tenant_id': ctx.project_id }, 'preauthtoken': ctx.auth_token } return self._client_class(**kwargs)
def start_workflow(self, wf_identifier, wf_input, description='', **params): """Starts workflow sending a request to engine over RPC. :return: Workflow execution. """ return self._client.sync_call(auth_ctx.ctx(), 'start_workflow', workflow_identifier=wf_identifier, workflow_input=wf_input or {}, description=description, params=params)
def get(self, name, namespace=''): """Return the named workbook. :param name: Name of workbook to retrieve. :param namespace: Optional. Namespace of workbook to retrieve. """ acl.enforce('workbooks:get', context.ctx()) LOG.debug("Fetch workbook [name=%s, namespace=%s]", name, namespace) # Use retries to prevent possible failures. r = rest_utils.create_db_retry_object() db_model = r.call(db_api.get_workbook, name, namespace=namespace) return resources.Workbook.from_db_model(db_model)
def get(self, id): """Return the specified Execution.""" acl.enforce("executions:get", context.ctx()) LOG.info("Fetch execution [id=%s]" % id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(id) # If a single object is requested we need to explicitly load # 'output' attribute. We don't do this for collections to reduce # amount of DB queries and network traffic. hasattr(wf_ex, 'output') return resources.Execution.from_dict(wf_ex.to_dict())
def test_workflow_definition_public(self): # Create a workflow(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) # Assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0.project_id, auth_context.ctx().project_id) self.assertNotEqual(WF_DEFINITIONS[0]['project_id'], auth_context.ctx().project_id) # Create a new user. auth_context.set_ctx(test_base.get_context(default=False)) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) self.assertEqual('public', created0.scope)
def _get_client(self): ctx = context.ctx() LOG.debug("Ceilometer action security context: %s" % ctx) ceilometer_endpoint = keystone_utils.get_endpoint_for_project( 'ceilometer') endpoint_url = keystone_utils.format_url(ceilometer_endpoint.url, {'tenant_id': ctx.project_id}) return self._client_class(endpoint_url, region_name=ceilometer_endpoint.region, token=ctx.auth_token, username=ctx.user_name)
def post(self, cmd): LOG.info("POST" * 5) LOG.info(cmd) ctx = context.ctx() definition = pecan.request.text data = json.loads(definition) LOG.info("cmd:" + str(cmd)) LOG.info("ctx:" + str(ctx)) LOG.info("body:" + str(definition)) LOG.info("data:" + str(data)) LOG.info("POST" * 6) if cmd == "sessionRollback": # Only support rollback return doSession(data)
def delete_trust(trust_id=None): if not trust_id: # Try to retrieve trust from context. if auth_ctx.has_ctx(): trust_id = auth_ctx.ctx().trust_id if not trust_id: return keystone_client = keystone.client_for_trusts(trust_id) try: keystone_client.trusts.delete(trust_id) except Exception as e: LOG.warning("Failed to delete trust [id=%s]: %s", trust_id, e)
def start_action(self, action_name, action_input, description=None, **params): """Starts action sending a request to engine over RPC. :return: Action execution. """ return self._client.sync_call(auth_ctx.ctx(), 'start_action', action_name=action_name, action_input=action_input or {}, description=description, params=params)
def get_object_client(self): ctx = context.ctx() obj_ep = keystone_utils.get_endpoint_for_project('swift') kwargs = { 'preauthurl': obj_ep.url % { 'tenant_id': ctx.project_id }, 'preauthtoken': ctx.auth_token, 'retries': 10, 'starting_backoff': 3, 'max_backoff': 120 } return swift_client.Connection(**kwargs)
def run_action(self, action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, redelivered=False, target=None, async_=True, timeout=None): """Sends a request to run action to executor. :param action_ex_id: Action execution id. :param action_cls_str: Action class name. :param action_cls_attrs: Action class attributes. :param params: Action input parameters. :param safe_rerun: If true, action would be re-run if executor dies during execution. :param execution_context: A dict of values providing information about the current execution. :param redelivered: Tells if given action was run before on another executor. :param target: Target (group of action executors). :param async_: If True, run action in asynchronous mode (w/o waiting for completion). :param timeout: a period of time in seconds after which execution of action will be interrupted :return: Action result. """ rpc_kwargs = { 'action_ex_id': action_ex_id, 'action_cls_str': action_cls_str, 'action_cls_attrs': action_cls_attrs, 'params': params, 'safe_rerun': safe_rerun, 'execution_context': execution_context, 'timeout': timeout } rpc_client_method = (self._client.async_call if async_ else self._client.sync_call) LOG.debug( "Sending an action to executor [action_ex_id=%s, action_cls=%s]", action_ex_id, action_cls_str) return rpc_client_method(auth_ctx.ctx(), 'run_action', **rpc_kwargs)
def get(self, identifier): """Return the named action. :param identifier: ID or name of the Action to get. """ acl.enforce('actions:get', context.ctx()) LOG.debug("Fetch action [identifier=%s]", identifier) # Use retries to prevent possible failures. db_model = rest_utils.rest_retry_on_db_error( db_api.get_action_definition )(identifier) return resources.Action.from_db_model(db_model)
def get(self, identifier, namespace=''): """Return the named workflow. :param identifier: Name or UUID of the workflow to retrieve. :param namespace: Optional. Namespace of the workflow to retrieve. """ acl.enforce('workflows:get', context.ctx()) LOG.info("Fetch workflow [identifier=%s]", identifier) db_model = db_api.get_workflow_definition( identifier, namespace=namespace ) return resources.Workflow.from_db_model(db_model)
def get(self, identifier, namespace=''): """Return a code source. :param identifier: Name or UUID of the code source to retrieve. :param namespace: Optional. Namespace of the code source to retrieve. """ acl.enforce('code_sources:get', context.ctx()) LOG.debug('Fetch code source [identifier=%s, namespace=%s]', identifier, namespace) db_model = rest_utils.rest_retry_on_db_error(db_api.get_code_source)( identifier=identifier, namespace=namespace) return resources.CodeSource.from_db_model(db_model)
def post(self, env): """Create a new environment. :param env: Required. Environment structure to create """ acl.enforce('environments:create', context.ctx()) LOG.debug("Create environment [env=%s]", cut(env)) self._validate_environment( json.loads(wsme_pecan.pecan.request.body.decode()), ['name', 'description', 'variables']) db_model = db_api.create_environment(env.to_dict()) return resources.Environment.from_db_model(db_model)
def _create_client(self): ctx = context.ctx() LOG.debug("Magnum action security context: %s" % ctx) keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() auth_url = keystone_endpoint.url magnum_url = keystone_utils.get_endpoint_for_project('magnum').url return self._get_client_class()( magnum_url=magnum_url, auth_token=ctx.auth_token, project_id=ctx.project_id, user_id=ctx.user_id, auth_url=auth_url )
def check_db_obj_access(db_obj): """Check accessibility to db object.""" ctx = context.ctx() is_admin = ctx.is_admin if not is_admin and db_obj.project_id != security.get_project_id(): raise exc.NotAllowedException( "Can not access %s resource of other projects, ID: %s" % (db_obj.__class__.__name__, db_obj.id) ) if not is_admin and hasattr(db_obj, 'is_system') and db_obj.is_system: raise exc.InvalidActionException( "Can not modify a system %s resource, ID: %s" % (db_obj.__class__.__name__, db_obj.id) )
def _create_client(self): ctx = context.ctx() LOG.debug("Heat action security context: %s" % ctx) heat_endpoint = keystone_utils.get_endpoint_for_project('heat') endpoint_url = keystone_utils.format_url(heat_endpoint.url, { 'tenant_id': ctx.project_id, 'project_id': ctx.project_id }) return self._get_client_class()(endpoint_url, region_name=heat_endpoint.region, token=ctx.auth_token, username=ctx.user_name)
def get(self, identifier, namespace=''): """Return the named action. :param identifier: Name or UUID of the action to retrieve. :param namespace: Optional. Namespace of the action to retrieve. """ acl.enforce('dynamic_actions:get', context.ctx()) LOG.debug('Fetch dynamic action [identifier=%s, namespace=%s]', identifier, namespace) db_model = rest_utils.rest_retry_on_db_error( db_api.get_dynamic_action_definition)(identifier=identifier, namespace=namespace) return resources.DynamicAction.from_db_model(db_model)
def _create_client(self): ctx = context.ctx() LOG.debug("Barbican action security context: %s" % ctx) barbican_endpoint = keystone_utils.get_endpoint_for_project('barbican') keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() auth = identity.v2.Token(auth_url=keystone_endpoint.url, tenant_name=ctx.user_name, token=ctx.auth_token, tenant_id=ctx.project_id) return self._get_client_class()(project_id=ctx.project_id, endpoint=barbican_endpoint.url, auth=auth)
class ExecutorClient(base.Executor): """RPC Executor client.""" def __init__(self, transport): """Constructs an RPC client for the Executor. :param transport: Messaging transport. :type transport: Transport. """ serializer = auth_ctx.RpcContextSerializer( auth_ctx.JsonPayloadSerializer()) self.topic = cfg.CONF.executor.topic self._client = messaging.RPCClient(transport, messaging.Target(), serializer=serializer) def run_action(self, action_ex_id, action_class_str, attributes, action_params, target=None, async=True): """Sends a request to run action to executor. :param action_ex_id: Action execution id. :param action_class_str: Action class name. :param attributes: Action class attributes. :param action_params: Action input parameters. :param target: Target (group of action executors). :param async: If True, run action in asynchronous mode (w/o waiting for completion). :return: Action result. """ kwargs = { 'action_ex_id': action_ex_id, 'action_class_str': action_class_str, 'attributes': attributes, 'params': action_params } call_ctx = self._client.prepare(topic=self.topic, server=target) rpc_client_method = call_ctx.cast if async else call_ctx.call return rpc_client_method(auth_ctx.ctx(), 'run_action', **kwargs)
def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', **filters): """Return all environments. Where project_id is the same as the requestor or project_id is different but the scope is public. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param filters: Optional. A list of filters to apply to the result. """ acl.enforce('environments:list', context.ctx()) LOG.info( "Fetch environments. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs, filters) return rest_utils.get_all(Environments, Environment, db_api.get_environments, db_api.get_environment, resource_function=None, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **filters)
def delete(self, identifier, namespace=''): """Delete a workflow. :param identifier: Name or ID of workflow to delete. :param namespace: Optional. Namespace of the workflow to delete. """ acl.enforce('workflows:delete', context.ctx()) LOG.debug("Delete workflow [identifier=%s, namespace=%s]", identifier, namespace) @rest_utils.rest_retry_on_db_error def _delete_workflow_definition(): with db_api.transaction(): db_api.delete_workflow_definition(identifier, namespace) _delete_workflow_definition()
def get_all(self): """Return all services.""" acl.enforce('services:list', context.ctx()) LOG.debug("Fetch services.") if not cfg.CONF.coordination.backend_url: raise exc.CoordinationNotSupportedException("Service API " "is not supported.") service_coordinator = coordination.get_service_coordinator() if not service_coordinator.is_active(): raise exc.CoordinationException( "Failed to connect to coordination backend.") # Should be the same as LAUNCH_OPTIONS in launch.py # At the moment there is a duplication, need to solve it. # We cannot depend on launch.py since it uses eventlet monkey patch # under wsgi it causes problems mistral_services = { 'api', 'engine', 'executor', 'event-engine', 'notifier' } services_list = [] service_group = ['%s_group' % i for i in mistral_services] try: for group in service_group: members = service_coordinator.get_members(group) members_list = [ resources.Service.from_dict({ 'type': group, 'name': member }) for member in members ] services_list.extend(members_list) except tooz.coordination.ToozError as e: # In the scenario of network interruption or manually shutdown # connection shutdown, ToozError will be raised. raise exc.CoordinationException( "Failed to get service members from coordination backend. %s" % six.text_type(e)) return resources.Services(services=services_list)
def get(self, id): """Return the specified Execution. :param id: UUID of execution to retrieve. """ acl.enforce("executions:get", context.ctx()) LOG.debug("Fetch execution [id=%s]", id) wf_ex = _get_workflow_execution(id) resource = resources.Execution.from_db_model(wf_ex) resource.published_global = ( data_flow.get_workflow_execution_published_global(wf_ex)) return resource
def _create_client(self): ctx = context.ctx() LOG.debug("Senlin action security context: %s" % ctx) keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() senlin_endpoint = keystone_utils.get_endpoint_for_project('senlin') return self._get_client_class()(endpoint_url=senlin_endpoint.url, token=ctx.auth_token, tenant_id=ctx.project_id, region_name=senlin_endpoint.region, auth_url=keystone_endpoint.url) @classmethod def _get_fake_client(cls): return cls._get_client_class()("http://127.0.0.1:8778")
def post(self): """Create a new workbook.""" acl.enforce('workbooks:create', context.ctx()) definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') resources.Workbook.validate_scope(scope) LOG.debug("Create workbook [definition=%s]", definition) wb_db = rest_utils.rest_retry_on_db_error( workbooks.create_workbook_v2)(definition, scope=scope) pecan.response.status = 201 return resources.Workbook.from_db_model(wb_db).to_json()
def delete(self, member_id): """Deletes a member from the member list of a resource.""" acl.enforce('members:delete', context.ctx()) LOG.debug( "Delete resource member. [resource_id=%s, " "resource_type=%s, member_id=%s].", self.resource_id, self.type, member_id ) rest_utils.rest_retry_on_db_error(db_api.delete_resource_member)( self.resource_id, self.type, member_id )
def notify(self, ex_id, data, event, timestamp, publishers): ctx = auth_ctx.ctx() for entry in publishers: params = copy.deepcopy(entry) publisher_name = params.pop('type', None) if not publisher_name: LOG.error('Notification publisher type is not specified.') continue try: publisher = base.get_notification_publisher(publisher_name) publisher.publish(ctx, ex_id, data, event, timestamp, **params) except Exception: LOG.exception('Unable to process event for publisher "%s".', publisher_name)
def get(self, identifier, namespace=''): """Return the named workflow. :param identifier: Name or UUID of the workflow to retrieve. :param namespace: Optional. Namespace of the workflow to retrieve. """ acl.enforce('workflows:get', context.ctx()) LOG.debug("Fetch workflow [identifier=%s]", identifier) # Use retries to prevent possible failures. r = rest_utils.create_db_retry_object() db_model = r.call(db_api.get_workflow_definition, identifier, namespace=namespace) return resources.Workflow.from_db_model(db_model)