def patch(self, node_instance_id): """ Update node instance by id """ verify_json_content_type() if request.json.__class__ is not dict or \ 'version' not in request.json or \ request.json['version'].__class__ is not int: if request.json.__class__ is not dict: message = 'Request body is expected to be a map containing ' \ 'a "version" field and optionally ' \ '"runtimeProperties" and/or "state" fields' elif 'version' not in request.json: message = 'Request body must be a map containing a ' \ '"version" field' else: message = \ "request body's 'version' field must be an int but" \ " is of type {0}".format(request.json['version'] .__class__.__name__) raise manager_exceptions.BadParametersError(message) node = models.DeploymentNodeInstance( id=node_instance_id, node_id=None, relationships=None, host_id=None, deployment_id=None, runtime_properties=request.json.get('runtime_properties'), state=request.json.get('state'), version=request.json['version']) get_storage_manager().update_node_instance(node) return responses.NodeInstance(**get_storage_manager( ).get_node_instance(node_instance_id).to_dict())
def execute_workflow(self, deployment_id, workflow_id, parameters=None, allow_custom_parameters=False, force=False): deployment = self.get_deployment(deployment_id) if workflow_id not in deployment.workflows: raise manager_exceptions.NonexistentWorkflowError( 'Workflow {0} does not exist in deployment {1}'.format( workflow_id, deployment_id)) workflow = deployment.workflows[workflow_id] self._verify_deployment_environment_created_successfully(deployment_id) # validate no execution is currently in progress if not force: executions = get_storage_manager().executions_list( deployment_id=deployment_id) running = [ e.id for e in executions if get_storage_manager().get_execution(e.id).status not in models.Execution.END_STATES ] if len(running) > 0: raise manager_exceptions.ExistingRunningExecutionError( 'The following executions are currently running for this ' 'deployment: {0}. To execute this workflow anyway, pass ' '"force=true" as a query parameter to this request'.format( running)) execution_parameters = \ BlueprintsManager._merge_and_validate_execution_parameters( workflow, workflow_id, parameters, allow_custom_parameters) execution_id = str(uuid.uuid4()) new_execution = models.Execution( id=execution_id, status=models.Execution.PENDING, created_at=str(datetime.now()), blueprint_id=deployment.blueprint_id, workflow_id=workflow_id, deployment_id=deployment_id, error='', parameters=self._get_only_user_execution_parameters( execution_parameters)) get_storage_manager().put_execution(new_execution.id, new_execution) workflow_client().execute_workflow( workflow_id, workflow, blueprint_id=deployment.blueprint_id, deployment_id=deployment_id, execution_id=execution_id, execution_parameters=execution_parameters) return new_execution
def get(self, deployment_id, _include=None): """ List deployment executions """ get_storage_manager().get_deployment(deployment_id, include=['id']) executions = get_blueprints_manager().get_deployment_executions( deployment_id, include=_include) return [responses.Execution(**e.to_dict()) for e in executions]
def execute_workflow(self, deployment_id, workflow_id, parameters=None, allow_custom_parameters=False, force=False): deployment = self.get_deployment(deployment_id) if workflow_id not in deployment.workflows: raise manager_exceptions.NonexistentWorkflowError( 'Workflow {0} does not exist in deployment {1}'.format( workflow_id, deployment_id)) workflow = deployment.workflows[workflow_id] self._verify_deployment_environment_created_successfully(deployment_id) # validate no execution is currently in progress if not force: executions = get_storage_manager().executions_list( deployment_id=deployment_id) running = [ e.id for e in executions if get_storage_manager().get_execution(e.id).status not in models.Execution.END_STATES] if len(running) > 0: raise manager_exceptions.ExistingRunningExecutionError( 'The following executions are currently running for this ' 'deployment: {0}. To execute this workflow anyway, pass ' '"force=true" as a query parameter to this request'.format( running)) execution_parameters = \ BlueprintsManager._merge_and_validate_execution_parameters( workflow, workflow_id, parameters, allow_custom_parameters) execution_id = str(uuid.uuid4()) new_execution = models.Execution( id=execution_id, status=models.Execution.PENDING, created_at=str(datetime.now()), blueprint_id=deployment.blueprint_id, workflow_id=workflow_id, deployment_id=deployment_id, error='', parameters=self._get_only_user_execution_parameters( execution_parameters)) get_storage_manager().put_execution(new_execution.id, new_execution) workflow_client().execute_workflow( workflow_id, workflow, blueprint_id=deployment.blueprint_id, deployment_id=deployment_id, execution_id=execution_id, execution_parameters=execution_parameters) return new_execution
def delete(self, plugin_id, **kwargs): """ Delete plugin by ID """ # Verify plugin exists. plugin = get_blueprints_manager().get_plugin(plugin_id) archive_name = plugin.archive_name archive_path = _get_plugin_archive_path(plugin_id, archive_name) shutil.rmtree(os.path.dirname(archive_path), ignore_errors=True) get_storage_manager().delete_plugin(plugin_id) return plugin
def delete(self, plugin_id, **kwargs): """ Delete plugin by ID """ # Verify plugin exists. plugin = get_blueprints_manager().get_plugin(plugin_id) archive_name = plugin.archive_name archive_path = _get_plugin_archive_path(plugin_id, archive_name) shutil.rmtree(os.path.dirname(archive_path), ignore_errors=True) get_storage_manager().delete_plugin(plugin_id) return plugin
def delete_blueprint(self, blueprint_id): blueprint_deployments = get_storage_manager()\ .get_blueprint_deployments(blueprint_id) if len(blueprint_deployments) > 0: raise manager_exceptions.DependentExistsError( "Deleting blueprint {0} not allowed - There exist " "deployments for this blueprint; Deployments ids: {0}" .format(','.join([dep.id for dep in blueprint_deployments]))) return get_storage_manager().delete_blueprint(blueprint_id)
def post(self): """ Create provider context """ verify_json_content_type() request_json = request.json verify_parameter_in_request_body('context', request_json) verify_parameter_in_request_body('name', request_json) context = models.ProviderContext(name=request.json['name'], context=request.json['context']) get_storage_manager().put_provider_context(context) return responses.ProviderContextPostStatus(status='ok'), 201
def delete_blueprint(self, blueprint_id): blueprint_deployments = get_storage_manager()\ .get_blueprint_deployments(blueprint_id) if len(blueprint_deployments) > 0: raise manager_exceptions.DependentExistsError( "Can't delete blueprint {0} - There exist " "deployments for this blueprint; Deployments ids: {1}".format( blueprint_id, ','.join([dep.id for dep in blueprint_deployments]))) return get_storage_manager().delete_blueprint(blueprint_id)
def post(self): """ Create provider context """ verify_json_content_type() request_json = request.json verify_parameter_in_request_body('context', request_json) verify_parameter_in_request_body('name', request_json) context = models.ProviderContext(name=request.json['name'], context=request.json['context']) get_storage_manager().put_provider_context(context) return responses.ProviderContextPostStatus(status='ok'), 201
def patch(self, execution_id): """ Update execution status by id """ verify_json_content_type() request_json = request.json verify_parameter_in_request_body('status', request_json) get_storage_manager().update_execution_status( execution_id, request_json['status'], request_json.get('error', '')) return responses.Execution( **get_storage_manager().get_execution(execution_id).to_dict())
def patch(self, execution_id): """ Update execution status by id """ verify_json_content_type() request_json = request.json verify_parameter_in_request_body('status', request_json) get_storage_manager().update_execution_status( execution_id, request_json['status'], request_json.get('error', '')) return responses.Execution(**get_storage_manager().get_execution( execution_id).to_dict())
def __init__(self, plan, deployment_id, entity_type, node_id): self.sm = storage_manager.get_storage_manager() self._deployment_id = deployment_id self._entity_type = entity_type self._node_id = node_id self._plan = plan self._raw_node = utils.get_raw_node(self.blueprint, self._node_id)
def __init__(self): self.sm = storage_manager.get_storage_manager() self._validation_mapper = { ACTION_TYPES.ADD: self._validate_add, ACTION_TYPES.MODIFY: self._validate_modify, ACTION_TYPES.REMOVE: self._validate_remove }
def get(self, _include=None, filters=None, pagination=None, **kwargs): """ List node instances """ node_instances = get_storage_manager().get_node_instances( include=_include, filters=filters, pagination=pagination) return node_instances
def __init__(self): self.sm = storage_manager.get_storage_manager() self.workflow_client = wf_client.get_workflow_client() self._node_handler = DeploymentUpdateNodeHandler() self._node_instance_handler = DeploymentUpdateNodeInstanceHandler() self._deployment_handler = DeploymentUpdateDeploymentHandler() self._step_validator = StepValidator()
def get(self, _include=None, filters=None, **kwargs): """ List nodes """ nodes = get_storage_manager().get_nodes(include=_include, filters=filters) return nodes
def get(self, _include=None, filters=None, pagination=None, **kwargs): """ List deployment modifications """ modifications = get_storage_manager().deployment_modifications_list( include=_include, filters=filters, pagination=pagination) return modifications
def get(self, _include=None, **kwargs): """ List nodes """ args = self._args_parser.parse_args() deployment_id = args.get("deployment_id") node_id = args.get("node_id") if deployment_id and node_id: try: nodes = [get_storage_manager().get_node(deployment_id, node_id)] except manager_exceptions.NotFoundError: nodes = [] else: deployment_id_filter = BlueprintsManager.create_filters_dict(deployment_id=deployment_id) nodes = get_storage_manager().get_nodes(filters=deployment_id_filter, include=_include).items return nodes
def __init__(self): self.sm = storage_manager.get_storage_manager() self._validation_mapper = { ACTION_TYPES.ADD: self._validate_add, ACTION_TYPES.MODIFY: self._validate_modify, ACTION_TYPES.REMOVE: self._validate_remove }
def get(self, _include=None, **kwargs): args = self._args_parser.parse_args() deployment_id = args.get("deployment_id") deployment_id_filter = BlueprintsManager.create_filters_dict(deployment_id=deployment_id) modifications = get_storage_manager().deployment_modifications_list( filters=deployment_id_filter, include=_include ) return modifications.items
def get(self, _include=None, filters=None, pagination=None, **kwargs): """ List uploaded plugins """ plugins = get_storage_manager().get_plugins(include=_include, filters=filters, pagination=pagination) return plugins
def get(self, _include=None): """ List nodes """ args = self._args_parser.parse_args() deployment_id = args.get('deployment_id') node_id = args.get('node_id') if deployment_id and node_id: try: nodes = [get_storage_manager().get_node(deployment_id, node_id)] except manager_exceptions.NotFoundError: nodes = [] else: nodes = get_storage_manager().get_nodes(deployment_id, include=_include) return [responses.Node(**node.to_dict()) for node in nodes]
def get(self, _include=None): """ List node instances """ args = self._args_parser.parse_args() deployment_id = args.get('deployment_id') nodes = get_storage_manager().get_node_instances(deployment_id, include=_include) return [responses.NodeInstance(**node.to_dict()) for node in nodes]
def get(self, _include=None): """ List nodes """ args = self._args_parser.parse_args() deployment_id = args.get('deployment_id') node_id = args.get('node_id') if deployment_id and node_id: try: nodes = [ get_storage_manager().get_node(deployment_id, node_id) ] except manager_exceptions.NotFoundError: nodes = [] else: nodes = get_storage_manager().get_nodes(deployment_id, include=_include) return [responses.Node(**node.to_dict()) for node in nodes]
def get(self, _include=None): """ List node instances """ args = self._args_parser.parse_args() deployment_id = args.get('deployment_id') nodes = get_storage_manager().get_node_instances(deployment_id, include=_include) return [responses.NodeInstance(**node.to_dict()) for node in nodes]
def cancel_execution(self, execution_id, force=False): """ Cancel an execution by its id If force is False (default), this method will request the executed workflow to gracefully terminate. It is up to the workflow to follow up on that request. If force is used, this method will request the abrupt and immediate termination of the executed workflow. This is valid for all workflows, regardless of whether they provide support for graceful termination or not. Note that in either case, the execution is not yet cancelled upon returning from the method. Instead, it'll be in a 'cancelling' or 'force_cancelling' status (as can be seen in models.Execution). Once the execution is truly stopped, it'll be in 'cancelled' status (unless force was not used and the executed workflow doesn't support graceful termination, in which case it might simply continue regardless and end up with a 'terminated' status) :param execution_id: The execution id :param force: A boolean describing whether to force cancellation :return: The updated execution object :rtype: models.Execution :raises manager_exceptions.IllegalActionError """ execution = self.get_execution(execution_id) if execution.status not in (models.Execution.PENDING, models.Execution.STARTED) and \ (not force or execution.status != models.Execution .CANCELLING): raise manager_exceptions.IllegalActionError( "Can't {0}cancel execution {1} because it's in status {2}" .format( 'force-' if force else '', execution_id, execution.status)) new_status = models.Execution.CANCELLING if not force \ else models.Execution.FORCE_CANCELLING get_storage_manager().update_execution_status( execution_id, new_status, '') return self.get_execution(execution_id)
def get(self, _include=None, **kwargs): """ List node instances """ args = self._args_parser.parse_args() deployment_id = args.get("deployment_id") node_id = args.get("node_name") params_filter = BlueprintsManager.create_filters_dict(deployment_id=deployment_id, node_id=node_id) node_instances = get_storage_manager().get_node_instances(filters=params_filter, include=_include) return node_instances.items
def cancel_execution(self, execution_id, force=False): """ Cancel an execution by its id If force is False (default), this method will request the executed workflow to gracefully terminate. It is up to the workflow to follow up on that request. If force is used, this method will request the abrupt and immediate termination of the executed workflow. This is valid for all workflows, regardless of whether they provide support for graceful termination or not. Note that in either case, the execution is not yet cancelled upon returning from the method. Instead, it'll be in a 'cancelling' or 'force_cancelling' status (as can be seen in models.Execution). Once the execution is truly stopped, it'll be in 'cancelled' status (unless force was not used and the executed workflow doesn't support graceful termination, in which case it might simply continue regardless and end up with a 'terminated' status) :param execution_id: The execution id :param force: A boolean describing whether to force cancellation :return: The updated execution object :rtype: models.Execution :raises manager_exceptions.IllegalActionError """ execution = self.get_execution(execution_id) if execution.status not in (models.Execution.PENDING, models.Execution.STARTED) and \ (not force or execution.status != models.Execution .CANCELLING): raise manager_exceptions.IllegalActionError( "Can't {0}cancel execution {1} because it's in status {2}". format('force-' if force else '', execution_id, execution.status)) new_status = models.Execution.CANCELLING if not force \ else models.Execution.FORCE_CANCELLING get_storage_manager().update_execution_status(execution_id, new_status, '') return self.get_execution(execution_id)
def _delete_deployment_environment(self, deployment_id): deployment = get_storage_manager().get_deployment(deployment_id) deployment_env_deletion_task_id = str(uuid.uuid4()) wf_id = 'delete_deployment_environment' deployment_env_deletion_task_name = \ 'cloudify_system_workflows.deployment_environment.delete' context = self._build_context_from_deployment( deployment, deployment_env_deletion_task_id, wf_id, deployment_env_deletion_task_name) kwargs = {'__cloudify_context': context} new_execution = models.Execution( id=deployment_env_deletion_task_id, status=models.Execution.PENDING, created_at=str(datetime.now()), blueprint_id=deployment.blueprint_id, workflow_id=wf_id, deployment_id=deployment_id, error='', parameters=self._get_only_user_execution_parameters(kwargs)) get_storage_manager().put_execution(new_execution.id, new_execution) deployment_env_deletion_task_async_result = \ celery_client().execute_task( deployment_env_deletion_task_name, 'cloudify.management', deployment_env_deletion_task_id, kwargs=kwargs) # wait for deployment environment deletion to complete deployment_env_deletion_task_async_result.get(timeout=300, propagate=True) # verify deployment environment deletion completed successfully execution = get_storage_manager().get_execution( deployment_env_deletion_task_id) if execution.status != models.Execution.TERMINATED: raise RuntimeError('Failed to delete environment for deployment ' '{0}'.format(deployment_id))
def _prepare_and_process_doc(self, data_id, file_server_root, archive_target_path): new_plugin = self._create_plugin_from_archive(data_id, archive_target_path) filter_by_name = {'package_name': new_plugin.package_name} plugins = get_storage_manager().get_plugins(filters=filter_by_name) for plugin in plugins: if plugin.archive_name == new_plugin.archive_name: raise manager_exceptions.ConflictError( 'a plugin archive by the name of {archive_name} already ' 'exists for package with name {package_name} and version ' '{version}'.format(archive_name=new_plugin.archive_name, package_name=new_plugin.package_name, version=new_plugin.package_version)) else: get_storage_manager().put_plugin(new_plugin) return new_plugin, new_plugin.archive_name
def _delete_deployment_environment(self, deployment_id): deployment = get_storage_manager().get_deployment(deployment_id) deployment_env_deletion_task_id = str(uuid.uuid4()) wf_id = 'delete_deployment_environment' deployment_env_deletion_task_name = \ 'system_workflows.deployment_environment.delete' context = self._build_context_from_deployment( deployment, deployment_env_deletion_task_id, wf_id, deployment_env_deletion_task_name) kwargs = {'__cloudify_context': context} new_execution = models.Execution( id=deployment_env_deletion_task_id, status=models.Execution.PENDING, created_at=str(datetime.now()), blueprint_id=deployment.blueprint_id, workflow_id=wf_id, deployment_id=deployment_id, error='', parameters=self._get_only_user_execution_parameters(kwargs)) get_storage_manager().put_execution(new_execution.id, new_execution) deployment_env_deletion_task_async_result = \ celery_client().execute_task( deployment_env_deletion_task_name, 'cloudify.management', deployment_env_deletion_task_id, kwargs=kwargs) # wait for deployment environment deletion to complete deployment_env_deletion_task_async_result.get(timeout=300, propagate=True) # verify deployment environment deletion completed successfully execution = get_storage_manager().get_execution( deployment_env_deletion_task_id) if execution.status != models.Execution.TERMINATED: raise RuntimeError('Failed to delete environment for deployment ' '{0}'.format(deployment_id))
def patch(self, execution_id, **kwargs): """ Update execution status by id """ verify_json_content_type() request_json = request.json verify_parameter_in_request_body("status", request_json) get_blueprints_manager().update_execution_status( execution_id, request_json["status"], request_json.get("error", "") ) return get_storage_manager().get_execution(execution_id)
def _create_deployment_environment(self, deployment, deployment_plan, now): deployment_env_creation_task_id = str(uuid.uuid4()) wf_id = 'create_deployment_environment' deployment_env_creation_task_name = \ 'cloudify_system_workflows.deployment_environment.create' context = self._build_context_from_deployment( deployment, deployment_env_creation_task_id, wf_id, deployment_env_creation_task_name) kwargs = { DEPLOYMENT_PLUGINS_TO_INSTALL: deployment_plan[ DEPLOYMENT_PLUGINS_TO_INSTALL], 'workflow_plugins_to_install': deployment_plan[ 'workflow_plugins_to_install'], 'policy_configuration': { 'policy_types': deployment_plan['policy_types'], 'policy_triggers': deployment_plan['policy_triggers'], 'groups': deployment_plan['groups'], }, '__cloudify_context': context } new_execution = models.Execution( id=deployment_env_creation_task_id, status=models.Execution.PENDING, created_at=now, blueprint_id=deployment.blueprint_id, workflow_id=wf_id, deployment_id=deployment.id, error='', parameters=self._get_only_user_execution_parameters(kwargs)) get_storage_manager().put_execution(new_execution.id, new_execution) celery_client().execute_task( deployment_env_creation_task_name, 'cloudify.management', deployment_env_creation_task_id, kwargs=kwargs)
def _create_deployment_environment(self, deployment, deployment_plan, now): deployment_env_creation_task_id = str(uuid.uuid4()) wf_id = 'create_deployment_environment' deployment_env_creation_task_name = \ 'system_workflows.deployment_environment.create' context = self._build_context_from_deployment( deployment, deployment_env_creation_task_id, wf_id, deployment_env_creation_task_name) kwargs = { 'management_plugins_to_install': deployment_plan['management_plugins_to_install'], 'workflow_plugins_to_install': deployment_plan['workflow_plugins_to_install'], 'policy_configuration': { 'policy_types': deployment_plan['policy_types'], 'policy_triggers': deployment_plan['policy_triggers'], 'groups': deployment_plan['groups'], }, '__cloudify_context': context } new_execution = models.Execution( id=deployment_env_creation_task_id, status=models.Execution.PENDING, created_at=now, blueprint_id=deployment.blueprint_id, workflow_id=wf_id, deployment_id=deployment.id, error='', parameters=self._get_only_user_execution_parameters(kwargs)) get_storage_manager().put_execution(new_execution.id, new_execution) celery_client().execute_task(deployment_env_creation_task_name, 'cloudify.management', deployment_env_creation_task_id, kwargs=kwargs)
def patch(self, node_instance_id, **kwargs): """ Update node instance by id """ verify_json_content_type() if ( request.json.__class__ is not dict or "version" not in request.json or request.json["version"].__class__ is not int ): if request.json.__class__ is not dict: message = ( "Request body is expected to be a map containing " 'a "version" field and optionally ' '"runtimeProperties" and/or "state" fields' ) elif "version" not in request.json: message = "Request body must be a map containing a " '"version" field' else: message = "request body's 'version' field must be an int but" " is of type {0}".format( request.json["version"].__class__.__name__ ) raise manager_exceptions.BadParametersError(message) node = models.DeploymentNodeInstance( id=node_instance_id, node_id=None, relationships=None, host_id=None, deployment_id=None, runtime_properties=request.json.get("runtime_properties"), state=request.json.get("state"), version=request.json["version"], ) get_storage_manager().update_node_instance(node) return get_storage_manager().get_node_instance(node_instance_id)
def patch(self, **kwargs): """ modifies provider context configuration """ verify_json_content_type() request_json = request.json verify_parameter_in_request_body('global_parallel_executions_limit', request_json) provider_ctx = get_storage_manager().get_provider_context() bootstrap_ctx = provider_ctx.context.get('cloudify', {}) transient_dep_workers_mode_enabled = bootstrap_ctx.get( 'transient_deployment_workers_mode', {}).get( 'enabled', TRANSIENT_WORKERS_MODE_ENABLED_DEFAULT) if not transient_dep_workers_mode_enabled: raise manager_exceptions.BadParametersError( "can't modify global_parallel_executions_limit since transient" ' deployment workers mode is disabled') limit = request_json['global_parallel_executions_limit'] if type(limit) is not int: raise manager_exceptions.BadParametersError( 'global_parallel_executions_limit parameter should be of type' ' int, but is instead of type {0}'.format( type(limit).__name__)) trans_dep_workers_mode = bootstrap_ctx.get( 'transient_deployment_workers_mode', {}) trans_dep_workers_mode['global_parallel_executions_limit'] = limit bootstrap_ctx['transient_deployment_workers_mode'] = \ trans_dep_workers_mode provider_ctx.context['cloudify'] = bootstrap_ctx get_storage_manager().update_provider_context(provider_ctx) return get_storage_manager().get_provider_context()
def patch(self, **kwargs): """ modifies provider context configuration """ verify_json_content_type() request_json = request.json verify_parameter_in_request_body('global_parallel_executions_limit', request_json) provider_ctx = get_storage_manager().get_provider_context() bootstrap_ctx = provider_ctx.context.get('cloudify', {}) transient_dep_workers_mode_enabled = bootstrap_ctx.get( 'transient_deployment_workers_mode', {}).get( 'enabled', TRANSIENT_WORKERS_MODE_ENABLED_DEFAULT) if not transient_dep_workers_mode_enabled: raise manager_exceptions.BadParametersError( "can't modify global_parallel_executions_limit since transient" ' deployment workers mode is disabled') limit = request_json['global_parallel_executions_limit'] if type(limit) is not int: raise manager_exceptions.BadParametersError( 'global_parallel_executions_limit parameter should be of type' ' int, but is instead of type {0}'.format( type(limit).__name__)) trans_dep_workers_mode = bootstrap_ctx.get( 'transient_deployment_workers_mode', {}) trans_dep_workers_mode['global_parallel_executions_limit'] = limit bootstrap_ctx['transient_deployment_workers_mode'] = \ trans_dep_workers_mode provider_ctx.context['cloudify'] = bootstrap_ctx get_storage_manager().update_provider_context(provider_ctx) return get_storage_manager().get_provider_context()
def patch(self, node_instance_id): """ Update node instance by id """ verify_json_content_type() if request.json.__class__ is not dict or \ 'version' not in request.json or \ request.json['version'].__class__ is not int: if request.json.__class__ is not dict: message = 'Request body is expected to be a map containing ' \ 'a "version" field and optionally ' \ '"runtimeProperties" and/or "state" fields' elif 'version' not in request.json: message = 'Request body must be a map containing a ' \ '"version" field' else: message = \ "request body's 'version' field must be an int but" \ " is of type {0}".format(request.json['version'] .__class__.__name__) raise manager_exceptions.BadParametersError(message) node = models.DeploymentNodeInstance( id=node_instance_id, node_id=None, relationships=None, host_id=None, deployment_id=None, runtime_properties=request.json.get('runtime_properties'), state=request.json.get('state'), version=request.json['version']) get_storage_manager().update_node_instance(node) return responses.NodeInstance( **get_storage_manager().get_node_instance( node_instance_id).to_dict())
def get(self, node_instance_id, _include=None): """ Get node instance by id """ instance = get_storage_manager().get_node_instance(node_instance_id, include=_include) return responses.NodeInstance( id=node_instance_id, node_id=instance.node_id, host_id=instance.host_id, relationships=instance.relationships, deployment_id=instance.deployment_id, state=instance.state, runtime_properties=instance.runtime_properties, version=instance.version)
def get(self, node_instance_id, _include=None): """ Get node instance by id """ instance = get_storage_manager().get_node_instance(node_instance_id, include=_include) return responses.NodeInstance( id=node_instance_id, node_id=instance.node_id, host_id=instance.host_id, relationships=instance.relationships, deployment_id=instance.deployment_id, state=instance.state, runtime_properties=instance.runtime_properties, version=instance.version)
def delete_deployment(self, deployment_id, ignore_live_nodes=False): storage = get_storage_manager() # Verify deployment exists. storage.get_deployment(deployment_id) # validate there are no running executions for this deployment executions = storage.executions_list(deployment_id=deployment_id) if any(execution.status not in models.Execution.END_STATES for execution in executions): raise manager_exceptions.DependentExistsError( "Can't delete deployment {0} - There are running " "executions for this deployment. Running executions ids: {1}". format( deployment_id, ','.join([ execution.id for execution in executions if execution.status not in models.Execution.END_STATES ]))) if not ignore_live_nodes: node_instances = storage.get_node_instances( deployment_id=deployment_id) # validate either all nodes for this deployment are still # uninitialized or have been deleted if any(node.state not in ('uninitialized', 'deleted') for node in node_instances): raise manager_exceptions.DependentExistsError( "Can't delete deployment {0} - There are live nodes for " "this deployment. Live nodes ids: {1}".format( deployment_id, ','.join([ node.id for node in node_instances if node.state not in ('uninitialized', 'deleted') ]))) self._delete_deployment_environment(deployment_id) return storage.delete_deployment(deployment_id)
def delete_deployment(self, deployment_id, ignore_live_nodes=False): storage = get_storage_manager() # Verify deployment exists. storage.get_deployment(deployment_id) # validate there are no running executions for this deployment executions = storage.executions_list(deployment_id=deployment_id) if any(execution.status not in models.Execution.END_STATES for execution in executions): raise manager_exceptions.DependentExistsError( "Can't delete deployment {0} - There are running " "executions for this deployment. Running executions ids: {1}" .format( deployment_id, ','.join([execution.id for execution in executions if execution.status not in models.Execution.END_STATES]))) if not ignore_live_nodes: node_instances = storage.get_node_instances( deployment_id=deployment_id) # validate either all nodes for this deployment are still # uninitialized or have been deleted if any(node.state not in ('uninitialized', 'deleted') for node in node_instances): raise manager_exceptions.DependentExistsError( "Can't delete deployment {0} - There are live nodes for " "this deployment. Live nodes ids: {1}" .format(deployment_id, ','.join([node.id for node in node_instances if node.state not in ('uninitialized', 'deleted')]))) self._delete_deployment_environment(deployment_id) return storage.delete_deployment(deployment_id)
def _verify_deployment_environment_created_successfully(self, deployment_id, is_retry=False): deployment_env_creation_execution = next( (execution for execution in get_storage_manager().executions_list( deployment_id=deployment_id) if execution.workflow_id == 'create_deployment_environment'), None) if not deployment_env_creation_execution: raise RuntimeError('Failed to find "create_deployment_environment"' ' execution for deployment {0}'.format( deployment_id)) # Because of ES eventual consistency, we need to get the execution by # its id in order to make sure the read status is correct. deployment_env_creation_execution = \ get_storage_manager().get_execution( deployment_env_creation_execution.id) if deployment_env_creation_execution.status == \ models.Execution.TERMINATED: # deployment environment creation is complete return elif deployment_env_creation_execution.status == \ models.Execution.STARTED: # deployment environment creation is still in process raise manager_exceptions\ .DeploymentEnvironmentCreationInProgressError( 'Deployment environment creation is still in progress, ' 'try again in a minute') elif deployment_env_creation_execution.status == \ models.Execution.FAILED: # deployment environment creation execution failed raise RuntimeError( "Can't launch executions since environment creation for " "deployment {0} has failed: {1}".format( deployment_id, deployment_env_creation_execution.error)) elif deployment_env_creation_execution.status in ( models.Execution.CANCELLED, models.Execution.CANCELLING, models.Execution.FORCE_CANCELLING): # deployment environment creation execution got cancelled raise RuntimeError( "Can't launch executions since the environment creation for " "deployment {0} has been cancelled [status={1}]".format( deployment_id, deployment_env_creation_execution.status)) # status is 'pending'. Waiting for a few seconds and retrying to # verify (to avoid eventual consistency issues). If this is already a # failed retry, it might mean there was a problem with the Celery task if not is_retry: time.sleep(5) self._verify_deployment_environment_created_successfully( deployment_id, True) else: # deployment environment creation failed but not on the workflow # level - retrieving the celery task's status for the error # message, and the error object from celery if one is available celery_task_status = celery_client().get_task_status( deployment_env_creation_execution.id) error_message = \ "Can't launch executions since environment for deployment {" \ "0} hasn't been created (Execution status is still '{1}'). " \ "Celery task status is ".format( deployment_id, deployment_env_creation_execution.status) if celery_task_status != CELERY_TASK_STATE_FAILURE: raise RuntimeError( "{0} {1}".format(error_message, celery_task_status)) else: celery_error = celery_client().get_failed_task_error( deployment_env_creation_execution.id) raise RuntimeError( "{0} {1}; Error is of type {2}; Error message: {3}" .format(error_message, celery_task_status, celery_error.__class__.__name__, celery_error))
def get_parser_context(sm=None): sm = sm or storage_manager.get_storage_manager() if not hasattr(current_app, 'parser_context'): update_parser_context(sm.get_provider_context().context) return current_app.parser_context
def get(self, _include=None): """ Get provider context """ context = get_storage_manager().get_provider_context(include=_include) return responses.ProviderContext(**context.to_dict())
def _verify_deployment_environment_created_successfully( self, deployment_id, is_retry=False): deployment_env_creation_execution = next( (execution for execution in get_storage_manager().executions_list( deployment_id=deployment_id) if execution.workflow_id == 'create_deployment_environment'), None) if not deployment_env_creation_execution: raise RuntimeError( 'Failed to find "create_deployment_environment"' ' execution for deployment {0}'.format(deployment_id)) # Because of ES eventual consistency, we need to get the execution by # its id in order to make sure the read status is correct. deployment_env_creation_execution = \ get_storage_manager().get_execution( deployment_env_creation_execution.id) if deployment_env_creation_execution.status == \ models.Execution.TERMINATED: # deployment environment creation is complete return elif deployment_env_creation_execution.status == \ models.Execution.STARTED: # deployment environment creation is still in process raise manager_exceptions\ .DeploymentEnvironmentCreationInProgressError( 'Deployment environment creation is still in progress, ' 'try again in a minute') elif deployment_env_creation_execution.status == \ models.Execution.FAILED: # deployment environment creation execution failed raise RuntimeError( "Can't launch executions since environment creation for " "deployment {0} has failed: {1}".format( deployment_id, deployment_env_creation_execution.error)) elif deployment_env_creation_execution.status in ( models.Execution.CANCELLED, models.Execution.CANCELLING, models.Execution.FORCE_CANCELLING): # deployment environment creation execution got cancelled raise RuntimeError( "Can't launch executions since the environment creation for " "deployment {0} has been cancelled [status={1}]".format( deployment_id, deployment_env_creation_execution.status)) # status is 'pending'. Waiting for a few seconds and retrying to # verify (to avoid eventual consistency issues). If this is already a # failed retry, it might mean there was a problem with the Celery task if not is_retry: time.sleep(5) self._verify_deployment_environment_created_successfully( deployment_id, True) else: # deployment environment creation failed but not on the workflow # level - retrieving the celery task's status for the error # message, and the error object from celery if one is available celery_task_status = celery_client().get_task_status( deployment_env_creation_execution.id) error_message = \ "Can't launch executions since environment for deployment {" \ "0} hasn't been created (Execution status is still '{1}'). " \ "Celery task status is ".format( deployment_id, deployment_env_creation_execution.status) if celery_task_status != CELERY_TASK_STATE_FAILURE: raise RuntimeError("{0} {1}".format(error_message, celery_task_status)) else: celery_error = celery_client().get_failed_task_error( deployment_env_creation_execution.id) raise RuntimeError( "{0} {1}; Error is of type {2}; Error message: {3}".format( error_message, celery_task_status, celery_error.__class__.__name__, celery_error))
def get_node_instances(): return get_storage_manager().get_node_instances(deployment_id)
def __init__(self): self.sm = storage_manager.get_storage_manager() self.workflow_client = wf_client.get_workflow_client()
def get(self, plugin_id, _include=None, **kwargs): """ Returns plugin by ID """ return get_storage_manager().get_plugin(plugin_id, include=_include)
def sm(self): return get_storage_manager()
def sm(self): return get_storage_manager()
def __init__(self, plan, deployment_id, entity_type, top_level_entity_id): self.sm = storage_manager.get_storage_manager() self._deployment_id = deployment_id self._entity_type = entity_type self._top_level_entity_id = top_level_entity_id self._plan = plan
def execute_task(self, task_name, task_queue, task_id=None, kwargs=None): get_storage_manager().update_execution_status(task_id, task_state(), '') return MockAsyncResult(task_id)
def __init__(self): self.sm = storage_manager.get_storage_manager()