def _prepare_and_process_doc(self, data_id, file_server_root, archive_target_path, **kwargs): # support previous implementation wagon_target_path = archive_target_path # handle the archive_target_path, which may be zip or wagon if not self._is_wagon_file(archive_target_path): if not zipfile.is_zipfile(archive_target_path): raise manager_exceptions.InvalidPluginError( 'input can be only a wagon or a zip file.') archive_name = unzip(archive_target_path, logger=current_app.logger) os.remove(archive_target_path) shutil.move(archive_name, archive_target_path) try: wagon_target_path, _ = \ self._verify_archive(archive_target_path) except RuntimeError as re: raise manager_exceptions.InvalidPluginError(re.message) args = get_args_and_verify_arguments([ Argument('title'), Argument('private_resource', type=boolean), Argument('visibility')]) visibility = kwargs.get(_VISIBILITY, None) new_plugin = self._create_plugin_from_archive(data_id, args.title, wagon_target_path, args.private_resource, visibility) filter_by_name = {'package_name': new_plugin.package_name} sm = get_resource_manager().sm plugins = sm.list(Plugin, filters=filter_by_name) for plugin in plugins: if plugin.archive_name == new_plugin.archive_name: raise manager_exceptions.ConflictError( 'a plugin archive by the name of {archive_name} already ' 'exists for package with name {package_name} and version ' '{version}'.format(archive_name=new_plugin.archive_name, package_name=new_plugin.package_name, version=new_plugin.package_version)) if is_plugin_installing(new_plugin, plugin): raise manager_exceptions.ConflictError( 'a plugin archive by the name of {archive_name} for ' 'package with name {package_name} and version {version} ' 'is currently being installed'.format( archive_name=new_plugin.archive_name, package_name=new_plugin.package_name, version=new_plugin.package_version)) dest_path = new_plugin.archive_name new_plugin.archive_name = '{0}{1}'.format(INSTALLING_PREFIX, new_plugin.archive_name) sm.put(new_plugin) return new_plugin, dest_path
def _prepare_and_process_doc(self, data_id, file_server_root, archive_target_path, **kwargs): args = self._get_args() new_plugin = self._create_plugin_from_archive(data_id, archive_target_path, args.private_resource) filter_by_name = {'package_name': new_plugin.package_name} sm = get_resource_manager().sm plugins = sm.list(Plugin, filters=filter_by_name) for plugin in plugins: if plugin.archive_name == new_plugin.archive_name: raise manager_exceptions.ConflictError( 'a plugin archive by the name of {archive_name} already ' 'exists for package with name {package_name} and version ' '{version}'.format(archive_name=new_plugin.archive_name, package_name=new_plugin.package_name, version=new_plugin.package_version)) else: sm.put(new_plugin) return new_plugin, new_plugin.archive_name
def update_node_instance(self, node): new_state = node.state new_runtime_props = node.runtime_properties new_relationships = node.relationships current = self.get_node_instance(node.id) if new_state is not None: current.state = new_state if new_runtime_props is not None: current.runtime_properties = new_runtime_props if new_relationships is not None: current.relationships = new_relationships updated = current.to_dict() del updated['version'] try: self._connection.index(index=STORAGE_INDEX_NAME, doc_type=NODE_INSTANCE_TYPE, id=node.id, body=updated, version=node.version, **MUTATE_PARAMS) except elasticsearch.exceptions.TransportError as e: if e.status_code == 409: raise manager_exceptions.ConflictError( 'Node instance update conflict [current_version={0}, ' 'updated_version={1}]'.format( current.version, node.version)) else: raise
def _validate_unique_resource_id_per_tenant(self, instance): """Assert that only a single resource exists with a given id in a given tenant """ # Only relevant for resources that have unique IDs and are connected # to a tenant if not instance.is_resource or not instance.is_id_unique: return query = self._get_unique_resource_id_query(instance.__class__, instance.id) results = query.all() # There should be only one instance with this id on this tenant or # in another tenant if the resource is global if len(results) != 1: # Delete the newly added instance, and raise an error db.session.delete(instance) self._safe_commit() raise manager_exceptions.ConflictError( '{0} already exists on {1} or with global visibility'.format( instance, self.current_tenant ) )
def update_node_instance(self, node): new_state = node.state new_runtime_props = node.runtime_properties new_relationships = node.relationships current = self.get_node_instance(node.id) # Validate version - this is not 100% safe since elasticsearch # update doesn't accept the version field. if node.version != 0 and current.version != node.version: raise manager_exceptions.ConflictError( 'Node instance update conflict [current_version={0}, updated_' 'version={1}]'.format(current.version, node.version)) if new_state is not None: current.state = new_state if new_runtime_props is not None: current.runtime_properties = new_runtime_props if new_relationships is not None: current.relationships = new_relationships updated = current.to_dict() del updated['version'] self._connection.index(index=STORAGE_INDEX_NAME, doc_type=NODE_INSTANCE_TYPE, id=node.id, body=updated, **MUTATE_PARAMS)
def put_blueprint(self, blueprint_id, blueprint): data = self._load_data() if str(blueprint_id) in data[BLUEPRINTS]: raise manager_exceptions.ConflictError( 'Blueprint {0} already exists'.format(blueprint_id)) data[BLUEPRINTS][str(blueprint_id)] = blueprint self._dump_data(data)
def put_deployment(self, deployment_id, deployment): data = self._load_data() if str(deployment_id) in data[DEPLOYMENTS]: raise manager_exceptions.ConflictError( 'Deployment {0} already exists'.format(deployment_id)) data[DEPLOYMENTS][str(deployment_id)] = deployment self._dump_data(data)
def put_execution(self, execution_id, execution): data = self._load_data() if str(execution_id) in data[EXECUTIONS]: raise manager_exceptions.ConflictError( 'Execution {0} already exists'.format(execution_id)) data[EXECUTIONS][str(execution_id)] = execution self._dump_data(data)
def patch(self, node_instance_id, **kwargs): """Update node instance by id.""" request_dict = get_json_and_verify_params({'version': {'type': int}}) if not isinstance(request.json, collections.Mapping): raise manager_exceptions.BadParametersError( 'Request body is expected to be a map containing a "version" ' 'field and optionally "runtimeProperties" and/or "state" ' 'fields') # Added for backwards compatibility with older client versions that # had version=0 by default version = request_dict['version'] or 1 instance = get_storage_manager().get(models.NodeInstance, node_instance_id, locking=True) if instance.version > version: raise manager_exceptions.ConflictError( 'Node instance update conflict [current version={0}, ' 'update version={1}]'.format(instance.version, version)) # Only update if new values were included in the request instance.runtime_properties = request_dict.get( 'runtime_properties', instance.runtime_properties) instance.state = request_dict.get('state', instance.state) return get_storage_manager().update(instance)
def put_provider_context(self, provider_context): data = self._load_data() if PROVIDER_CONTEXT_ID in data[PROVIDER_CONTEXT]: raise manager_exceptions.ConflictError( 'Provider context already set') data[PROVIDER_CONTEXT][PROVIDER_CONTEXT_ID] = provider_context self._dump_data(data)
def put_plugin(self, plugin): data = self._load_data() if str(plugin.id) in data[PLUGINS]: raise manager_exceptions.ConflictError( 'Plugin {0} already exists'.format(plugin.id)) data[PLUGINS][str(plugin.id)] = plugin self._dump_data(data)
def _validate_unique_resource_id_per_tenant(self, instance): """Assert that only a single resource exists with a given id in a given tenant """ # Only relevant for resources that have unique IDs and are connected # to a tenant if not instance.is_resource or not instance.is_id_unique: return query = self._get_unique_resource_id_query(instance.__class__, instance.id, instance.tenant) results = query.all() instance_flushed = inspect(instance).persistent num_of_allowed_entries = 1 if instance_flushed else 0 if len(results) > num_of_allowed_entries: if instance_flushed: db.session.delete(instance) else: db.session.expunge(instance) self._safe_commit() raise manager_exceptions.ConflictError( '{0} already exists on {1} or with global visibility'.format( instance, self.current_tenant ) )
def assert_no_cyclic_dependencies(self, source_deployment, target_deployment): graph = copy.deepcopy(self.graph) graph.setdefault(target_deployment, set()).add(source_deployment) # DFS to find cycles v = list(graph)[0] recursion_stack = [v] while graph: while v not in graph.keys(): recursion_stack.pop() if recursion_stack: recursion_stack[-1] else: v = list(graph)[0] recursion_stack = [v] u = graph[v].pop() if not graph[v]: del graph[v] v = u if v in recursion_stack: raise manager_exceptions.ConflictError( 'Deployment creation results in cyclic inter-deployment ' 'dependencies.') recursion_stack.append(v)
def put_snapshot(self, snapshot_id, snapshot): data = self._load_data() if str(snapshot_id) in data[SNAPSHOTS]: raise manager_exceptions.ConflictError( 'Snapshot {0} already exists'.format(snapshot_id)) data[SNAPSHOTS][str(snapshot_id)] = snapshot self._dump_data(data)
def validate_no_active_updates_per_deployment(self, deployment_id, force=False): """ Validate there are no active updates for provided deployment. raises conflict error if there are any. :param deployment_id: deployment id :param force: force """ existing_updates = \ self.list_deployment_updates(filters={ 'deployment_id': deployment_id }).items active_updates = [ u for u in existing_updates if u.state not in (STATES.SUCCESSFUL, STATES.FAILED) ] if active_updates: if not force: raise manager_exceptions.ConflictError( 'there are deployment updates still active; ' 'update IDs: {0}'.format(', '.join( [u.id for u in active_updates]))) # real active updates are those with # an execution in a running status real_active_updates = \ [u for u in active_updates if u.execution_id is not None and self.sm.get(models.Execution, u.execution_id).status not in ExecutionState.END_STATES] if real_active_updates: raise manager_exceptions.ConflictError( 'there are deployment updates still active; the "force" ' 'flag was used yet these updates have actual executions ' 'running update IDs: {0}'.format(', '.join( [u.id for u in real_active_updates]))) else: # the active updates aren't really active - either their # executions were failed/cancelled, or the update failed at # the finalizing stage. # updating their states to failed and continuing. for dep_update in active_updates: dep_update.state = STATES.FAILED self.sm.update(dep_update)
def put_deployment_update(self, deployment_update): data = self._load_data() if str(deployment_update.id) in data[DEPLOYMENT_UPDATES]: raise manager_exceptions.ConflictError( 'Deployment Update {0} already exists'.format( deployment_update.id)) data[DEPLOYMENT_UPDATES][str(deployment_update.id)] = deployment_update self._dump_data(data)
def put_deployment_modification(self, modification_id, modification): data = self._load_data() if str(modification_id) in data[DEPLOYMENT_MODIFICATIONS]: raise manager_exceptions.ConflictError( 'Deployment modification {0} already exists' .format(modification_id)) data[DEPLOYMENT_MODIFICATIONS][str(modification_id)] = modification self._dump_data(data)
def put_node_instance(self, node): data = self._load_data() node_id = node.id if str(node_id) in data[NODE_INSTANCES]: raise manager_exceptions.ConflictError( 'Node {0} already exists'.format(node_id)) data[NODE_INSTANCES][str(node_id)] = node self._dump_data(data) return 1
def _put_doc_if_not_exists(self, doc_type, doc_id, value): try: self._connection.create(index=STORAGE_INDEX_NAME, doc_type=doc_type, id=doc_id, body=value, **MUTATE_PARAMS) except elasticsearch.exceptions.ConflictError: raise manager_exceptions.ConflictError( '{0} {1} already exists'.format(doc_type, doc_id))
def put_node(self, node): data = self._load_data() node_id = '{0}_{1}'.format(node.deployment_id, node.id) if str(node_id) in data[NODES]: raise manager_exceptions.ConflictError( 'Node {0} already exists'.format(node_id)) data[NODES][str(node_id)] = node self._dump_data(data) return 1
def validate_no_active_updates_per_deployment(self, deployment_id): existing_updates = self.list_deployment_updates( filters={'deployment_id': deployment_id}).items active_updates = [u for u in existing_updates if u.state not in (STATES.SUCCESSFUL, STATES.FAILED)] if not active_updates: return raise manager_exceptions.ConflictError( 'there are deployment updates still active; update IDs: {0}' .format(', '.join([u.id for u in active_updates])))
def _validate_new_name(self, request_dict, storage_manager, current_name): new_name = request_dict.get('new_name') if not new_name or current_name == new_name: return validate_inputs({'new_name': new_name}) if storage_manager.exists(models.Site, new_name): raise manager_exceptions.ConflictError( 'Invalid new name `{0}`, it already exists on {1} or ' 'with global visibility'.format(new_name, utils.current_tenant))
def _safe_add(self, instance): """Add `instance` to the DB session, and attempt to commit :param instance: Instance to be added to the DB """ db.session.add(instance) custom_exception = manager_exceptions.ConflictError( '{0} with ID `{1}` already exists'.format( instance.__class__.__name__, instance.id)) self._safe_commit(custom_exception) return instance
def update_node_instance(self, node_instance): with _lock_table(NodeInstance): current = self.get_node_instance(node_instance.id) if current.version != node_instance.version: raise manager_exceptions.ConflictError( 'Node instance update conflict for node instance {0} ' '[current_version={1}, updated_version={2}]'.format( current.id, current.version, node_instance.version) ) node_instance.version += 1 return self._safe_add(node_instance)
def update_deployment_update(self, depup_modification): deployment_update_id = depup_modification.id data = self._load_data() if str(deployment_update_id) not in data[DEPLOYMENT_UPDATES]: raise manager_exceptions.ConflictError( 'Deployment Update {0} does not exists'.format( depup_modification.id)) deployment_update = data[DEPLOYMENT_UPDATES][deployment_update_id] for att in DeploymentUpdate.fields - {'deployment_id'}: if getattr(depup_modification, att) is not None: new_value = getattr(depup_modification, att) setattr(deployment_update, att, new_value) self._dump_data(data)
def _safe_commit(self): """Try to commit changes in the session. Roll back if exception raised Excepts SQLAlchemy errors and rollbacks if they're caught """ try: db.session.commit() except sql_errors as e: exception_to_raise = manager_exceptions.SQLStorageException( 'SQL Storage error: {0}'.format(str(e))) db.session.rollback() if SQLStorageManager._is_unique_constraint_violation(e): problematic_instance_id = e.params['id'] # Session has been rolled back at this point self.refresh(self.current_tenant) exception_to_raise = manager_exceptions.ConflictError( 'Instance with ID {0} cannot be added on {1} or with ' 'global visibility' ''.format(problematic_instance_id, self.current_tenant)) raise exception_to_raise
def _prepare_and_process_doc(self, data_id, file_server_root, archive_target_path): new_plugin = self._create_plugin_from_archive(data_id, archive_target_path) filter_by_name = {'package_name': new_plugin.package_name} plugins = get_storage_manager().get_plugins(filters=filter_by_name) for plugin in plugins: if plugin.archive_name == new_plugin.archive_name: raise manager_exceptions.ConflictError( 'a plugin archive by the name of {archive_name} already ' 'exists for package with name {package_name} and version ' '{version}'.format(archive_name=new_plugin.archive_name, package_name=new_plugin.package_name, version=new_plugin.package_version)) else: get_storage_manager().put_plugin(new_plugin) return new_plugin, new_plugin.archive_name
def _validate_unique_resource_id_per_tenant(self, instance): """Assert that only a single resource exists with a given id in a given tenant """ # Only relevant for resources that have unique IDs and are connected # to a tenant if not instance.is_resource or not instance.is_id_unique: return filters = {'id': instance.id, '_tenant_id': self.current_tenant.id} # There should be only one instance with this id on this tenant if len(self.list(instance.__class__, filters=filters)) != 1: # Delete the newly added instance, and raise an error db.session.delete(instance) self._safe_commit() raise manager_exceptions.ConflictError( '{0} already exists on {1}'.format(instance, self.current_tenant))
def assert_cyclic_dependencies_on_graph(self, graph): # DFS to find cycles v = list(graph)[0] recursion_stack = [v] while graph: while v not in graph.keys(): recursion_stack.pop() if recursion_stack: recursion_stack[-1] else: v = list(graph)[0] recursion_stack = [v] u = graph[v].pop() if not graph[v]: del graph[v] v = u if v in recursion_stack: raise manager_exceptions.ConflictError( self.cyclic_error_message ) recursion_stack.append(v)
def _prepare_and_process_doc(self, data_id, file_server_root, archive_target_path, **kwargs): # support previous implementation wagon_target_path = archive_target_path # handle the archive_target_path, which may be zip or wagon if not self._is_wagon_file(archive_target_path): if not zipfile.is_zipfile(archive_target_path): raise manager_exceptions.InvalidPluginError( 'input can be only a wagon or a zip file.') archive_name = unzip(archive_target_path, logger=current_app.logger) os.remove(archive_target_path) shutil.move(archive_name, archive_target_path) wagon_target_path, _ = self._verify_archive(archive_target_path) args = self._get_args() visibility = kwargs.get('visibility', None) new_plugin = self._create_plugin_from_archive(data_id, wagon_target_path, args.private_resource, visibility) filter_by_name = {'package_name': new_plugin.package_name} sm = get_resource_manager().sm plugins = sm.list(Plugin, filters=filter_by_name) for plugin in plugins: if plugin.archive_name == new_plugin.archive_name: raise manager_exceptions.ConflictError( 'a plugin archive by the name of {archive_name} already ' 'exists for package with name {package_name} and version ' '{version}'.format(archive_name=new_plugin.archive_name, package_name=new_plugin.package_name, version=new_plugin.package_version)) sm.put(new_plugin) return new_plugin, new_plugin.archive_name