def validate(cls, data): d = cls.validate_json(data) if not isinstance(d, dict): raise errors.InvalidData( "Node data must be dict", log_message=True ) if "mac" not in d: raise errors.InvalidData( "No mac address specified", log_message=True ) else: q = db().query(Node) if q.filter(Node.mac == d["mac"]).first() or q.join( NodeNICInterface, Node.nic_interfaces).filter( NodeNICInterface.mac == d["mac"]).first(): raise errors.AlreadyExists( "Node with mac {0} already " "exists - doing nothing".format(d["mac"]), log_level="info" ) if cls.validate_existent_node_mac_create(d): raise errors.AlreadyExists( "Node with mac {0} already " "exists - doing nothing".format(d["mac"]), log_level="info" ) if 'meta' in d: MetaValidator.validate_create(d['meta']) return d
def validate(cls, data): # TODO(enchantner): rewrite validators to use Node object data = cls.validate_json(data) if data.get("status", "") != "discover": raise errors.NotAllowed( "Only bootstrap nodes are allowed to be registered.") if 'mac' not in data: raise errors.InvalidData("No mac address specified", log_message=True) if cls.does_node_exist_in_db(data): raise errors.AlreadyExists("Node with mac {0} already " "exists - doing nothing".format( data["mac"]), log_level="info") if cls.validate_existent_node_mac_create(data): raise errors.AlreadyExists("Node with mac {0} already " "exists - doing nothing".format( data["mac"]), log_level="info") if 'meta' in data: MetaValidator.validate_create(data['meta']) return data
def validate(cls, data): d = cls.validate_json(data) if not "name" in d: raise errors.InvalidData("No release name specified", log_message=True) if not "version" in d: raise errors.InvalidData("No release version specified", log_message=True) if db().query(Release).filter_by(name=d["name"], version=d["version"]).first(): raise errors.AlreadyExists( "Release with the same name and version " "already exists", log_message=True) if "networks_metadata" in d: for network in d["networks_metadata"]: if not "name" in network or not "access" in network: raise errors.InvalidData("Invalid network data: %s" % str(network), log_message=True) if network["access"] not in settings.NETWORK_POOLS: raise errors.InvalidData("Invalid access mode for network", log_message=True) else: d["networks_metadata"] = [] if not "attributes_metadata" in d: d["attributes_metadata"] = {} else: try: Attributes.validate_fixture(d["attributes_metadata"]) except: raise errors.InvalidData( "Invalid logical structure of attributes metadata", log_message=True) return d
def validate_cluster_name(cls, cluster_name): clusters = objects.ClusterCollection.filter_by(None, name=cluster_name) if clusters.first(): raise errors.AlreadyExists( "Environment with this name '{0}' already exists.".format( cluster_name), log_message=True)
def get_plugins_node_roles(cls, cluster): result = {} core_roles = set(cluster.release.roles_metadata) for plugin_db in ClusterPlugins.get_enabled(cluster.id): plugin_roles = wrap_plugin(plugin_db).normalized_roles_metadata # we should check all possible cases of roles intersection # with core ones and those from other plugins # and afterwards show them in error message; # thus role names for which following checks # fails are accumulated in err_info variable err_roles = set(r for r in plugin_roles if r in core_roles or r in result) if err_roles: raise errors.AlreadyExists( "Plugin (ID={0}) is unable to register the following " "node roles: {1}".format(plugin_db.id, ", ".join(sorted(err_roles)))) # update info on processed roles in case of # success of all intersection checks result.update(plugin_roles) return result
def validate(cls, data): d = cls.validate_json(data) if "name" not in d: raise errors.InvalidData("No release name specified", log_message=True) if "version" not in d: raise errors.InvalidData("No release version specified", log_message=True) if db().query(Release).filter_by(name=d["name"], version=d["version"]).first(): raise errors.AlreadyExists( "Release with the same name and version " "already exists", log_message=True) if "networks_metadata" in d: # TODO(enchantner): additional validation for network in d["networks_metadata"]: if "name" not in network: raise errors.InvalidData("Invalid network data: %s" % str(network), log_message=True) else: d["networks_metadata"] = [] if "attributes_metadata" not in d: d["attributes_metadata"] = {} else: try: Attributes.validate_fixture(d["attributes_metadata"]) except Exception: raise errors.InvalidData( "Invalid logical structure of attributes metadata", log_message=True) return d
def validate_update(cls, data, instance): d = cls.validate_json(data) cls._validate_common(d) if db().query(Release).filter_by( name=d.get("name", instance.name), version=d.get("version", instance.version)).filter( not_(Release.id == instance.id)).first(): raise errors.AlreadyExists( "Release with the same name " "and version already exists", log_message=True) if "roles" in d: new_roles = set(d["roles"]) assigned_roles_names = set([ r.name for r in instance.role_list if r.nodes or r.pending_nodes ]) if not assigned_roles_names <= new_roles: raise errors.InvalidData("Cannot delete roles already " "assigned to nodes: {0}".format( ", ".join(assigned_roles_names - new_roles)), log_message=True) return d
def validate_update(cls, data, instance): d = cls._validate_common(data, instance=instance) if "name" in d: query = objects.ClusterCollection.filter_by_not( None, id=instance.id) if objects.ClusterCollection.filter_by( query, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True ) for k in cls._blocked_for_update: if k in d and getattr(instance, k) != d[k]: raise errors.InvalidData( u"Changing '{0}' for environment is prohibited".format(k), log_message=True ) cls._validate_mode(d, instance.release) if 'nodes' in d: # Here d['nodes'] is list of node IDs # to be assigned to the cluster. cls._validate_nodes(d['nodes'], instance) return d
def validate(cls, data): d = cls.validate_json(data) if "name" not in d: raise errors.InvalidData("No release name specified", log_message=True) if "version" not in d: raise errors.InvalidData("No release version specified", log_message=True) if "operating_system" not in d: raise errors.InvalidData("No release operating system specified", log_message=True) if "orchestrator_data" not in d: raise errors.InvalidData('No orchestrator_data specified', log_message=True) if db().query(Release).filter_by(name=d["name"], version=d["version"]).first(): raise errors.AlreadyExists( "Release with the same name and version " "already exists", log_message=True) cls._validate_common(d) if "networks_metadata" not in d: d["networks_metadata"] = {} if "attributes_metadata" not in d: d["attributes_metadata"] = {} return d
def get_components_metadata(cls, release): """Get components metadata for all plugins which related to release. :param release: A release instance :type release: Release model :return: list -- List of plugins components """ components = [] seen_components = \ dict((c['name'], 'release') for c in release.components_metadata) for plugin_adapter in map(wrap_plugin, PluginCollection.get_by_release(release)): plugin_name = plugin_adapter.name for component in plugin_adapter.components_metadata: name = component['name'] if seen_components.get(name, plugin_name) != plugin_name: raise errors.AlreadyExists( 'Plugin {0} is overlapping with {1} by introducing ' 'the same component with name "{2}"'.format( plugin_adapter.name, seen_components[name], name)) if name not in seen_components: seen_components[name] = plugin_adapter.name components.append(component) return components
def check_unique_hostnames(cls, nodes, cluster_id): hostnames = [node.hostname for node in nodes] conflicting_hostnames = [ x[0] for x in db.query(Node.hostname).filter( sa.and_( Node.hostname.in_(hostnames), Node.cluster_id == cluster_id, )).all() ] if conflicting_hostnames: raise errors.AlreadyExists( "Nodes with hostnames [{0}] already exist in cluster {1}.". format(",".join(conflicting_hostnames), cluster_id))
def validate(cls, data): d = cls.validate_json(data) if d.get("name"): if db().query(Cluster).filter_by(name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) if d.get("release"): release = db().query(Release).get(d.get("release")) if not release: raise errors.InvalidData("Invalid release id", log_message=True) return d
def get_volumes_metadata(cls, cluster): """Get volumes metadata for cluster from all plugins which enabled it. :param cluster: A cluster instance :type cluster: Cluster model :return: dict -- Object with merged volumes data from plugins """ volumes_metadata = {'volumes': [], 'volumes_roles_mapping': {}} release_volumes = cluster.release.volumes_metadata.get('volumes', []) release_volumes_ids = [v['id'] for v in release_volumes] processed_volumes = {} enabled_plugins = ClusterPlugins.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): metadata = plugin_adapter.volumes_metadata for volume in metadata.get('volumes', []): volume_id = volume['id'] if volume_id in release_volumes_ids: raise errors.AlreadyExists( 'Plugin {0} is overlapping with release ' 'by introducing the same volume with id "{1}"'.format( plugin_adapter.full_name, volume_id)) elif volume_id in processed_volumes: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same volume with id "{2}"'.format( plugin_adapter.full_name, processed_volumes[volume_id], volume_id)) processed_volumes[volume_id] = plugin_adapter.full_name volumes_metadata.get('volumes_roles_mapping', {}).update( metadata.get('volumes_roles_mapping', {})) volumes_metadata.get('volumes', []).extend(metadata.get('volumes', [])) return volumes_metadata
def _validate_common(cls, data): d = cls.validate_json(data) if d.get("name"): if ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) release_id = d.get("release", d.get("release_id", None)) if release_id: release = Release.get_by_uid(release_id) if not release: raise errors.InvalidData("Invalid release ID", log_message=True) return d
def validate(cls, data): d = cls.validate_json(data) node_group = objects.NodeGroup.get_by_uid(d.get('group_id')) if not node_group: raise errors.InvalidData( "Node group with ID {0} does not exist".format( d.get('group_id'))) if objects.NetworkGroup.get_from_node_group_by_name( node_group.id, d.get('name')): raise errors.AlreadyExists("Network with name {0} already exists " "in node group {1}".format( d['name'], node_group.name)) return d
def validate_update(cls, data, instance): d = cls._validate_common(data, instance=instance) if "name" in d: query = ClusterCollection.filter_by_not(None, id=instance.id) if ClusterCollection.filter_by(query, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) for k in ("net_provider", ): if k in d and getattr(instance, k) != d[k]: raise errors.InvalidData( u"Changing '{0}' for environment is prohibited".format(k), log_message=True) return d
def validate(cls, data): d = cls._validate_common(data) # TODO(ikalnitsky): move it to _validate_common when # PATCH method will be implemented release_id = d.get("release", d.get("release_id", None)) if not release_id: raise errors.InvalidData(u"Release ID is required", log_message=True) if "name" in d: if ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) return d
def _validate_nodes(cls, new_node_ids, instance): set_new_node_ids = set(new_node_ids) set_old_node_ids = set(objects.Cluster.get_nodes_ids(instance)) nodes_to_add = set_new_node_ids - set_old_node_ids nodes_to_remove = set_old_node_ids - set_new_node_ids hostnames_to_add = [ x[0] for x in db.query(Node.hostname).filter( Node.id.in_(nodes_to_add)).all() ] duplicated = [ x[0] for x in db.query(Node.hostname).filter( sa.and_(Node.hostname.in_(hostnames_to_add), Node.cluster_id == instance.id, Node.id.notin_(nodes_to_remove))).all() ] if duplicated: raise errors.AlreadyExists( "Nodes with hostnames [{0}] already exist in cluster {1}.". format(",".join(duplicated), instance.id))
def get_plugins_deployment_tasks(cls, cluster): deployment_tasks = [] processed_tasks = {} enabled_plugins = ClusterPlugins.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): depl_tasks = plugin_adapter.deployment_tasks for t in depl_tasks: t_id = t['id'] if t_id in processed_tasks: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same deployment task with ' 'id {2}'.format(plugin_adapter.full_name, processed_tasks[t_id], t_id)) processed_tasks[t_id] = plugin_adapter.full_name deployment_tasks.extend(depl_tasks) return deployment_tasks
def validate_hostname(cls, hostname, instance): if hostname == instance.hostname: return if instance.status != consts.NODE_STATUSES.discover: raise errors.NotAllowed( "Node hostname may be changed only before provisioning.") if instance.cluster: cluster = instance.cluster public_ssl_endpoint = cluster.attributes.editable.get( 'public_ssl', {}).get('hostname', {}).get('value', "") if public_ssl_endpoint in ( hostname, objects.Node.generate_fqdn_by_hostname(hostname)): raise errors.InvalidData( "New hostname '{0}' conflicts with public TLS endpoint". format(hostname)) if objects.Node.get_by_hostname(hostname, instance.cluster_id): raise errors.AlreadyExists( "Duplicate hostname '{0}'.".format(hostname))
def validate(cls, data, **kwargs): d = cls.validate_json(data) cluster_id = kwargs.get("cluster_id") or d.get("id") if d.get("name"): if db().query(Cluster).filter_by(name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) if d.get("release"): release = db().query(Release).get(d.get("release")) if not release: raise errors.InvalidData("Invalid release id", log_message=True) if cluster_id: cluster = db().query(Cluster).get(cluster_id) if cluster: for k in ("net_provider", "net_segment_type"): if k in d and getattr(cluster, k) != d[k]: raise errors.InvalidData( "Change of '%s' is prohibited" % k, log_message=True) return d
def validate_update(cls, data, instance): d = cls.validate_json(data) cls._validate_common(d) if db().query(models.Release).filter_by( name=d.get("name", instance.name), version=d.get("version", instance.version)).filter( sa.not_(models.Release.id == instance.id)).first(): raise errors.AlreadyExists( "Release with the same name " "and version already exists", log_message=True) if 'roles_metadata' in d: new_roles = set(d['roles_metadata']) clusters = [cluster.id for cluster in instance.clusters] new_roles_array = sa.cast( psql.array(new_roles), psql.ARRAY(sa.String(consts.ROLE_NAME_MAX_SIZE))) node = db().query(models.Node).filter( models.Node.cluster_id.in_(clusters)).filter( sa.not_( sa.and_( models.Node.roles.contained_by(new_roles_array), models.Node.pending_roles.contained_by( new_roles_array)))).first() if node: used_role = set(node.roles + node.pending_roles) used_role -= new_roles raise errors.CannotDelete( "Cannot delete roles already assigned " "to nodes: {0}".format(','.join(used_role))) return d
def _check_duplicate_network_name(cls, node_group, network_name): if objects.NetworkGroup.get_from_node_group_by_name( node_group.id, network_name): raise errors.AlreadyExists("Network with name {0} already exists " "in node group {1}".format( network_name, node_group.name))