def validate(cls, data): # TODO(enchantner): rewrite validators to use Node object data = cls.validate_json(data) if data.get("status", "") != "discover": raise errors.NotAllowed( "Only bootstrap nodes are allowed to be registered.") if 'mac' not in data: raise errors.InvalidData("No mac address specified", log_message=True) if cls.does_node_exist_in_db(data): raise errors.AlreadyExists("Node with mac {0} already " "exists - doing nothing".format( data["mac"]), log_level="info") if cls.validate_existent_node_mac_create(data): raise errors.AlreadyExists("Node with mac {0} already " "exists - doing nothing".format( data["mac"]), log_level="info") if 'meta' in data: MetaValidator.validate_create(data['meta']) return data
def get_volumes_metadata(cls, cluster): """Get volumes metadata for all plugins enabled for the cluster :param cluster: A cluster instance :type cluster: Cluster model :return: dict -- Object with merged volumes data from plugins """ def _get_volumes_ids(instance): return [ v['id'] for v in instance.volumes_metadata.get('volumes', []) ] volumes_metadata = { 'volumes': [], 'volumes_roles_mapping': {}, 'rule_to_pick_boot_disk': [], } cluster_volumes_ids = _get_volumes_ids(cluster) release_volumes_ids = _get_volumes_ids(cluster.release) processed_volumes = {} enabled_plugins = ClusterPlugin.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): metadata = plugin_adapter.volumes_metadata for volume in metadata.get('volumes', []): volume_id = volume['id'] for owner, volumes_ids in (('cluster', cluster_volumes_ids), ('release', release_volumes_ids)): if volume_id in volumes_ids: raise errors.AlreadyExists( 'Plugin {0} is overlapping with {1} ' 'by introducing the same volume with ' 'id "{2}"'.format(plugin_adapter.full_name, owner, volume_id)) elif volume_id in processed_volumes: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same volume with ' 'id "{2}"'.format(plugin_adapter.full_name, processed_volumes[volume_id], volume_id)) processed_volumes[volume_id] = plugin_adapter.full_name volumes_metadata.get('volumes_roles_mapping', {}).update( metadata.get('volumes_roles_mapping', {})) volumes_metadata.get('volumes', []).extend(metadata.get('volumes', [])) volumes_metadata.get('rule_to_pick_boot_disk', []).extend( metadata.get('rule_to_pick_boot_disk', [])) return volumes_metadata
def validate_hostname(cls, hostname, instance): if hostname == instance.hostname: return if instance.status != consts.NODE_STATUSES.discover: raise errors.NotAllowed( "Node hostname may be changed only before provisioning." ) if instance.cluster: cluster = instance.cluster public_ssl_endpoint = cluster.attributes.editable.get( 'public_ssl', {}).get('hostname', {}).get('value', "") if public_ssl_endpoint in ( hostname, objects.Node.generate_fqdn_by_hostname(hostname) ): raise errors.InvalidData( "New hostname '{0}' conflicts with public TLS endpoint" .format(hostname)) if objects.Node.get_by_hostname( hostname, instance.cluster_id): raise errors.AlreadyExists( "Duplicate hostname '{0}'.".format(hostname) )
def validate_hostname(cls, hostname, instance): if not cls.HostnameRegex.match(hostname): raise errors.InvalidData( 'Hostname must consist of english characters, ' 'digits, minus signs and periods. ' '(The following pattern must apply {})'.format( base_types.FQDN['pattern'])) if hostname == instance.hostname: return if instance.status != consts.NODE_STATUSES.discover: raise errors.NotAllowed( "Node hostname may be changed only before provisioning.") if instance.cluster: cluster = instance.cluster public_ssl_endpoint = cluster.attributes.editable.get( 'public_ssl', {}).get('hostname', {}).get('value', "") if public_ssl_endpoint in ( hostname, objects.Node.generate_fqdn_by_hostname(hostname)): raise errors.InvalidData( "New hostname '{0}' conflicts with public TLS endpoint". format(hostname)) if objects.Node.get_by_hostname(hostname, instance.cluster_id): raise errors.AlreadyExists( "Duplicate hostname '{0}'.".format(hostname))
def validate_update(cls, data, instance): d = cls._validate_common(data, instance=instance) if "name" in d: query = objects.ClusterCollection.filter_by_not( None, id=instance.id) if objects.ClusterCollection.filter_by( query, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True ) for k in cls._blocked_for_update: if k in d and getattr(instance, k) != d[k]: raise errors.InvalidData( u"Changing '{0}' for environment is prohibited".format(k), log_message=True ) cls._validate_mode(d, instance.release) if 'nodes' in d: # Here d['nodes'] is list of node IDs # to be assigned to the cluster. cls._validate_nodes(d['nodes'], instance) return d
def get_plugins_node_roles(cls, cluster): result = {} core_roles = set(cluster.release.roles_metadata) for plugin_db in ClusterPlugin.get_enabled(cluster.id): plugin_roles = wrap_plugin(plugin_db).normalized_roles_metadata # we should check all possible cases of roles intersection # with core ones and those from other plugins # and afterwards show them in error message; # thus role names for which following checks # fails are accumulated in err_info variable err_roles = set(r for r in plugin_roles if r in core_roles or r in result) if err_roles: raise errors.AlreadyExists( "Plugin (ID={0}) is unable to register the following " "node roles: {1}".format(plugin_db.id, ", ".join(sorted(err_roles)))) # update info on processed roles in case of # success of all intersection checks result.update(plugin_roles) return result
def get_components_metadata(cls, release): """Get components metadata for all plugins which related to release. :param release: A release instance :type release: Release model :return: list -- List of plugins components """ components = [] seen_components = \ dict((c['name'], 'release') for c in release.components_metadata) for plugin_adapter in map(wrap_plugin, PluginCollection.get_by_release(release)): plugin_name = plugin_adapter.name for component in plugin_adapter.components_metadata: name = component['name'] if seen_components.get(name, plugin_name) != plugin_name: raise errors.AlreadyExists( 'Plugin {0} is overlapping with {1} by introducing ' 'the same component with name "{2}"'.format( plugin_adapter.name, seen_components[name], name)) if name not in seen_components: seen_components[name] = plugin_adapter.name components.append(component) return components
def validate_cluster_name(cls, cluster_name): clusters = objects.ClusterCollection.filter_by(None, name=cluster_name) if clusters.first(): raise errors.AlreadyExists( "Environment with this name '{0}' already exists.".format( cluster_name), log_message=True)
def validate(cls, data): d = cls.validate_json(data) if "name" not in d: raise errors.InvalidData("No release name specified", log_message=True) if "version" not in d: raise errors.InvalidData("No release version specified", log_message=True) if "operating_system" not in d: raise errors.InvalidData("No release operating system specified", log_message=True) if db().query(models.Release).filter_by(name=d["name"], version=d["version"]).first(): raise errors.AlreadyExists( "Release with the same name and version " "already exists", log_message=True) cls._validate_common(d) if "networks_metadata" not in d: d["networks_metadata"] = {} if "attributes_metadata" not in d: d["attributes_metadata"] = {} return d
def get_volumes_metadata(cls, cluster): """Get volumes metadata for cluster from all plugins which enabled it. :param cluster: A cluster instance :type cluster: Cluster model :return: dict -- Object with merged volumes data from plugins """ volumes_metadata = { 'volumes': [], 'volumes_roles_mapping': {}, 'rule_to_pick_boot_disk': [], } release_volumes = cluster.release.volumes_metadata.get('volumes', []) release_volumes_ids = [v['id'] for v in release_volumes] processed_volumes = {} enabled_plugins = ClusterPlugin.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): metadata = plugin_adapter.volumes_metadata for volume in metadata.get('volumes', []): volume_id = volume['id'] if volume_id in release_volumes_ids: raise errors.AlreadyExists( 'Plugin {0} is overlapping with release ' 'by introducing the same volume with id "{1}"'.format( plugin_adapter.full_name, volume_id)) elif volume_id in processed_volumes: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same volume with id "{2}"'.format( plugin_adapter.full_name, processed_volumes[volume_id], volume_id)) processed_volumes[volume_id] = plugin_adapter.full_name volumes_metadata.get('volumes_roles_mapping', {}).update( metadata.get('volumes_roles_mapping', {})) volumes_metadata.get('volumes', []).extend(metadata.get('volumes', [])) volumes_metadata.get('rule_to_pick_boot_disk', []).extend( metadata.get('rule_to_pick_boot_disk', [])) return volumes_metadata
def validate_create(cls, data, instance_cls, instance): parsed = cls.validate(data, instance=instance) tag_name = parsed['name'] if tag_name in instance_cls.get_own_tags(instance): raise errors.AlreadyExists( "Tag with name '{}' already " "exists for {} {}".format( tag_name, instance_cls.__name__.lower(), instance.id)) return parsed
def validate_create(cls, data, instance): parsed = cls.validate_update(data, instance) role_name = parsed['name'] if role_name in instance.roles_metadata: raise errors.AlreadyExists( "Role with name {name} already " "exists for release {release}".format(name=role_name, release=instance.id)) return parsed
def validate(cls, data, **kwargs): parsed = super(ClusterPluginLinkValidator, cls).validate(data) cls.validate_schema(parsed, plugin_link.PLUGIN_LINK_SCHEMA) if objects.ClusterPluginLinkCollection.filter_by( None, url=parsed['url'], cluster_id=kwargs['cluster_id']).first(): raise errors.AlreadyExists( "Cluster plugin link with URL {0} and cluster ID={1} already " "exists".format(parsed['url'], kwargs['cluster_id']), log_message=True) return parsed
def validate_create(cls, data, instance_cls, instance): parsed = cls.validate_update(data, instance_cls, instance) role_name = parsed['name'] if role_name in instance_cls.get_own_roles(instance): raise errors.AlreadyExists("Role with name {} already " "exists for {} {}".format( role_name, instance_cls.__name__.lower(), instance.id)) return parsed
def check_unique_hostnames(cls, nodes, cluster_id): hostnames = [node.hostname for node in nodes] conflicting_hostnames = [ x[0] for x in db.query(Node.hostname).filter( sa.and_( Node.hostname.in_(hostnames), Node.cluster_id == cluster_id, )).all() ] if conflicting_hostnames: raise errors.AlreadyExists( "Nodes with hostnames [{0}] already exist in cluster {1}.". format(",".join(conflicting_hostnames), cluster_id))
def validate_update(cls, data, instance): parsed = super(PluginLinkValidator, cls).validate(data) cls.validate_schema(parsed, plugin_link.PLUGIN_LINK_UPDATE_SCHEMA) if objects.PluginLinkCollection.filter_by_not( objects.PluginLinkCollection.filter_by( None, url=parsed.get('url', instance.url) ), id=instance.id ).first(): raise errors.AlreadyExists( "Plugin link with URL {0} already exists".format( parsed['url']), log_message=True) return parsed
def validate(cls, data): parsed = cls.validate_json(data) cls.validate_schema( parsed, cls.single_schema ) release = objects.Release.get_by_uid( parsed.pop('release'), fail_if_not_found=True ) parsed['release_id'] = release.id if objects.DeploymentSequence.get_by_name_for_release( release, parsed['name']): raise errors.AlreadyExists( 'Sequence with name "{0}" already exist for release {1}.' .format(parsed['name'], release.id) ) return parsed
def _check_vip_addr_intersection(cls, ip_instance, addr): """Check intersection with ip addresses of existing clusters If ip address is being updated for a VIP manually its intersection with ips of all existing clusters must be checked :param obj_id: id of the VIP being updated :param addr: new ip address for VIP """ intersecting_ip = objects.IPAddrCollection.get_all_by_addr(addr)\ .first() if intersecting_ip is not None and intersecting_ip is not ip_instance: err_msg = ("IP address {0} is already allocated within " "{1} network with CIDR {2}".format( addr, intersecting_ip.network_data.name, intersecting_ip.network_data.cidr)) raise errors.AlreadyExists(err_msg)
def _validate_nodes(cls, new_node_ids, instance): set_new_node_ids = set(new_node_ids) set_old_node_ids = set(objects.Cluster.get_nodes_ids(instance)) nodes_to_add = set_new_node_ids - set_old_node_ids nodes_to_remove = set_old_node_ids - set_new_node_ids hostnames_to_add = [ x[0] for x in db.query(Node.hostname).filter( Node.id.in_(nodes_to_add)).all() ] duplicated = [ x[0] for x in db.query(Node.hostname).filter( sa.and_(Node.hostname.in_(hostnames_to_add), Node.cluster_id == instance.id, Node.id.notin_(nodes_to_remove))).all() ] if duplicated: raise errors.AlreadyExists( "Nodes with hostnames [{0}] already exist in cluster {1}.". format(",".join(duplicated), instance.id))
def get_plugins_deployment_tasks(cls, cluster, graph_type=None): deployment_tasks = [] processed_tasks = {} enabled_plugins = ClusterPlugin.get_enabled(cluster.id) for plugin_adapter in map(wrap_plugin, enabled_plugins): depl_tasks = plugin_adapter.get_deployment_tasks(graph_type) for t in depl_tasks: t_id = t['id'] if t_id in processed_tasks: raise errors.AlreadyExists( 'Plugin {0} is overlapping with plugin {1} ' 'by introducing the same deployment task with ' 'id {2}'.format(plugin_adapter.full_name, processed_tasks[t_id], t_id)) processed_tasks[t_id] = plugin_adapter.full_name deployment_tasks.extend(depl_tasks) return deployment_tasks
def validate(cls, data): d = cls._validate_common(data) # TODO(ikalnitsky): move it to _validate_common when # PATCH method will be implemented release_id = d.get("release", d.get("release_id", None)) if not release_id: raise errors.InvalidData(u"Release ID is required", log_message=True) if "name" in d: if objects.ClusterCollection.filter_by(None, name=d["name"]).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True) if "components" in d: cls._validate_components(release_id, d['components']) return d
def validate_update(cls, data, instance): d = cls.validate_json(data) cls._validate_common(d, instance) if db().query(models.Release).filter_by( name=d.get("name", instance.name), version=d.get("version", instance.version)).filter( sa.not_(models.Release.id == instance.id)).first(): raise errors.AlreadyExists( "Release with the same name " "and version already exists", log_message=True) if 'roles_metadata' in d: deleted_roles = (set(instance.roles_metadata) - set(d['roles_metadata'])) clusters_ids = (cluster.id for cluster in instance.clusters) deleted_roles_array = sa.cast( psql.array(deleted_roles), psql.ARRAY(sa.String(consts.ROLE_NAME_MAX_SIZE))) node = db().query(models.Node).filter( models.Node.cluster_id.in_(clusters_ids)).filter( sa.or_( models.Node.roles.overlap(deleted_roles_array), models.Node.pending_roles.overlap( deleted_roles_array))).first() if node: used_role = set(node.roles + node.pending_roles) used_role = used_role.intersection(deleted_roles) raise errors.CannotDelete( "The following roles: {0} cannot be deleted " "since they are already assigned " "to nodes.".format(','.join(used_role))) return d
def validate_update(cls, data, instance): d = cls.validate_json(data) cls._validate_common(d) if db().query(models.Release).filter_by( name=d.get("name", instance.name), version=d.get("version", instance.version)).filter( sa.not_(models.Release.id == instance.id)).first(): raise errors.AlreadyExists( "Release with the same name " "and version already exists", log_message=True) if 'roles_metadata' in d: new_roles = set(d['roles_metadata']) clusters = [cluster.id for cluster in instance.clusters] new_roles_array = sa.cast( psql.array(new_roles), psql.ARRAY(sa.String(consts.ROLE_NAME_MAX_SIZE))) node = db().query(models.Node).filter( models.Node.cluster_id.in_(clusters)).filter( sa.not_( sa.and_( models.Node.roles.contained_by(new_roles_array), models.Node.pending_roles.contained_by( new_roles_array)))).first() if node: used_role = set(node.roles + node.pending_roles) used_role -= new_roles raise errors.CannotDelete( "Cannot delete roles already assigned " "to nodes: {0}".format(','.join(used_role))) return d
def create_for_model(cls, data, instance, graph_type=None): """Create graph attached to model instance with given type. This method is recommended to create or update graphs. :param data: graph data :type data: dict :param instance: external model :type instance: models.Cluster|models.Plugin|models.Release :param graph_type: graph type, default is 'default' :type graph_type: basestring :return: models.DeploymentGraph """ if graph_type is None: graph_type = consts.DEFAULT_DEPLOYMENT_GRAPH_TYPE graph = cls.get_for_model(instance, graph_type=graph_type) if not graph: graph = cls.create(data) cls.attach_to_model(graph, instance, graph_type) return graph else: raise errors.AlreadyExists( 'Graph of given type already exists for this model.')
def _check_duplicate_network_name(cls, node_group, network_name): if objects.NetworkGroup.get_from_node_group_by_name( node_group.id, network_name): raise errors.AlreadyExists("Network with name {0} already exists " "in node group {1}".format( network_name, node_group.name))