Exemple #1
0
    def validate(cls, data, instance=None):
        parsed = super(NetworkTemplateValidator, cls).validate(data)
        cls.validate_schema(parsed, network_template.NETWORK_TEMPLATE)

        # Ensure templates requested in templates_for_node_role are
        # present in network_scheme
        if not parsed['adv_net_template']:
            raise errors.InvalidData("No node groups are defined")

        for ng_name, node_group in six.iteritems(parsed['adv_net_template']):
            defined_templates = set(six.iterkeys(node_group['network_scheme']))
            not_found = set()

            for templates_by_role in six.itervalues(
                    node_group['templates_for_node_role']):
                for template in templates_by_role:
                    if template not in defined_templates:
                        not_found.add(template)

            if not_found:
                raise errors.InvalidData(
                    "Requested templates {0} were not found for node "
                    "group {1}".format(', '.join(not_found), ng_name))

            if not defined_templates:
                raise errors.InvalidData(
                    "No templates are defined for node group {0}".format(
                        ng_name))

        return parsed
Exemple #2
0
 def check_nova_compute_duplicate_and_empty_values(cls, attributes):
     """Check 'nova_computes' attributes for empty and duplicate values."""
     nova_compute_attributes_sets = {
         'vsphere_cluster': set(),
         'service_name': set(),
         'target_node': set()
     }
     for nova_compute_data in attributes:
         for attr, values in six.iteritems(nova_compute_attributes_sets):
             if attr == 'target_node':
                 settings_value = cls._get_target_node_id(nova_compute_data)
                 if settings_value == 'controllers':
                     continue
             else:
                 settings_value = nova_compute_data.get(attr)
             if not settings_value:
                 raise errors.InvalidData(
                     "Empty value for attribute '{0}' is not allowed".
                     format(attr),
                     log_message=True
                 )
             if settings_value in values:
                 raise errors.InvalidData(
                     "Duplicate value '{0}' for attribute '{1}' is "
                     "not allowed".format(settings_value, attr),
                     log_message=True
                 )
             values.add(settings_value)
Exemple #3
0
    def validate(cls, data, cluster=None, force=False):
        d = cls.validate_json(data)
        if "generated" in d:
            raise errors.InvalidData(
                "It is not allowed to update generated attributes",
                log_message=True
            )
        if "editable" in d and not isinstance(d["editable"], dict):
            raise errors.InvalidData(
                "Editable attributes should be a dictionary",
                log_message=True
            )

        attrs = d
        models = None

        if cluster is not None:
            attrs = objects.Cluster.get_updated_editable_attributes(cluster, d)
            cls.validate_provision(cluster, attrs)
            cls.validate_allowed_attributes(cluster, d, force)

            models = objects.Cluster.get_restrictions_models(
                cluster, attrs=attrs.get('editable', {}))

        cls.validate_attributes(attrs.get('editable', {}), models, force=force)

        return d
    def check(self):
        if not self.graph.is_acyclic():
            err = "Graph cannot be processed because it contains cycles in it:"
            # FIXME(mattymo): GraphSolver cannot be used to call this method
            err += ', '.join(
                six.moves.map(str, nx.simple_cycles(nx.DiGraph(self.graph))))
            err += '\n'
            raise errors.InvalidData(err)

        non_existing_tasks = []
        invalid_tasks = []

        for node_key, node_value in six.iteritems(self.graph.node):
            if not node_value.get('id'):
                successors = self.graph.successors(node_key)
                predecessors = self.graph.predecessors(node_key)

                neighbors = successors + predecessors

                non_existing_tasks.append(node_key)
                invalid_tasks.extend(neighbors)

        if non_existing_tasks:
            raise errors.InvalidData(
                "Tasks '{non_existing_tasks}' can't be in requires"
                "|required_for|groups|tasks for [{invalid_tasks}]"
                " because they don't exist in the graph".format(
                    non_existing_tasks=', '.join(
                        str(x) for x in sorted(non_existing_tasks)),
                    invalid_tasks=', '.join(
                        str(x) for x in sorted(set(invalid_tasks)))))
    def _validate_env_before_execute(cls, filters):
        """Validate environment before execute configuration update"""

        # We can not pass cluster object here from handler because cluster_id
        # is passed in request data
        force = filters.get('force', False)
        cluster = objects.Cluster.get_by_uid(filters['cluster_id'],
                                             fail_if_not_found=True)
        if not force and cluster.status != consts.CLUSTER_STATUSES.operational:
            raise errors.InvalidData("Cluster should be in the status "
                                     "'operational'")

        target_nodes = objects.Cluster.get_nodes_to_update_config(
            cluster, filters.get('node_ids'), filters.get('node_role'),
            only_ready_nodes=False)

        ready_target_nodes_uids = set(
            node.uid for node in target_nodes
            if node.status == consts.NODE_STATUSES.ready)

        if not ready_target_nodes_uids:
            raise errors.InvalidData("No nodes in status 'ready'")

        invalid_target_nodes_uids = set(
            node.uid for node in target_nodes
            if node.status != consts.NODE_STATUSES.ready)

        if not force and invalid_target_nodes_uids:
            raise errors.InvalidData(
                "Nodes '{0}' are not in status 'ready' and can not be updated "
                "directly."
                "".format(', '.join(invalid_target_nodes_uids)))
    def _convert_query_fields(cls, data):
        """Converts parameters from URL query to appropriate types

        Parameters in URL query don't care any information about data types.
        Schema validation doesn't perform any type conversion, so
        it is required to convert them before schema validation.
        """
        for field in ['cluster_id', 'node_id']:
            value = data.get(field, None)
            if value is not None:
                try:
                    data[field] = int(value)
                except ValueError:
                    raise errors.InvalidData("Invalid '{0}' value: '{1}'"
                                             .format(field, value))

        node_ids = data.get('node_ids', None)
        if node_ids is not None:
            try:
                data['node_ids'] = [int(n) for n in node_ids.split(',')]
            except ValueError:
                raise errors.InvalidData("Invalid 'node_ids' value: '{0}'"
                                         .format(node_ids))

        if 'is_active' in data:
            try:
                data['is_active'] = utils.parse_bool(data['is_active'])
            except ValueError:
                raise errors.InvalidData("Invalid 'is_active' value: '{0}'"
                                         .format(data['is_active']))
Exemple #7
0
    def validate(cls, data):
        d = cls.validate_json(data)
        if "name" not in d:
            raise errors.InvalidData("No release name specified",
                                     log_message=True)
        if "version" not in d:
            raise errors.InvalidData("No release version specified",
                                     log_message=True)
        if "operating_system" not in d:
            raise errors.InvalidData("No release operating system specified",
                                     log_message=True)

        if db().query(models.Release).filter_by(name=d["name"],
                                                version=d["version"]).first():
            raise errors.AlreadyExists(
                "Release with the same name and version "
                "already exists",
                log_message=True)

        cls._validate_common(d)

        if "networks_metadata" not in d:
            d["networks_metadata"] = {}
        if "attributes_metadata" not in d:
            d["attributes_metadata"] = {}

        return d
Exemple #8
0
    def validate_hostname(cls, hostname, instance):
        if not cls.HostnameRegex.match(hostname):
            raise errors.InvalidData(
                'Hostname must consist of english characters, '
                'digits, minus signs and periods. '
                '(The following pattern must apply {})'.format(
                    base_types.FQDN['pattern']))

        if hostname == instance.hostname:
            return

        if instance.status != consts.NODE_STATUSES.discover:
            raise errors.NotAllowed(
                "Node hostname may be changed only before provisioning.")

        if instance.cluster:
            cluster = instance.cluster
            public_ssl_endpoint = cluster.attributes.editable.get(
                'public_ssl', {}).get('hostname', {}).get('value', "")

            if public_ssl_endpoint in (
                    hostname,
                    objects.Node.generate_fqdn_by_hostname(hostname)):
                raise errors.InvalidData(
                    "New hostname '{0}' conflicts with public TLS endpoint".
                    format(hostname))
        if objects.Node.get_by_hostname(hostname, instance.cluster_id):
            raise errors.AlreadyExists(
                "Duplicate hostname '{0}'.".format(hostname))
Exemple #9
0
    def validate_roles(cls, data, node, roles):
        cluster_id = data.get('cluster_id', node.cluster_id)
        if not cluster_id:
            raise errors.InvalidData(
                "Cannot assign pending_roles to node {0}. "
                "Node doesn't belong to any cluster."
                .format(node.id), log_message=True)

        roles_set = set(roles)
        if len(roles_set) != len(roles):
            raise errors.InvalidData(
                "pending_roles list for node {0} contains "
                "duplicates.".format(node.id), log_message=True)

        cluster = objects.Cluster.get_by_uid(cluster_id)
        available_roles = objects.Cluster.get_roles(cluster)
        invalid_roles = roles_set.difference(available_roles)

        if invalid_roles:
            raise errors.InvalidData(
                u"Roles {0} are not valid for node {1} in environment {2}"
                .format(u", ".join(sorted(invalid_roles)),
                        node.id, cluster.id),
                log_message=True
            )
Exemple #10
0
 def _validate_common(cls, d, instance=None):
     if not instance:
         instance = {}
     if "networks_metadata" in d:
         # TODO(enchantner): additional validation
         meta = d["networks_metadata"]["nova_network"]
         for network in meta["networks"]:
             if "name" not in network:
                 raise errors.InvalidData(
                     "Invalid network data: {0}".format(network),
                     log_message=True)
     if 'roles_metadata' in d:
         roles_meta = d['roles_metadata']
         tags_meta = d.get('tags_metadata',
                           instance.get('tags_metadata', {}))
         available_tags = set(tags_meta)
         for role_name, meta in six.iteritems(roles_meta):
             role_tags = set(meta.get('tags', []))
             missing_tags = role_tags - available_tags
             if missing_tags:
                 raise errors.InvalidData(
                     "Tags {} are present for role {}, but, absent in "
                     "release tags metadata".format(missing_tags,
                                                    role_name),
                     log_message=True)
Exemple #11
0
    def validate(cls, data, cluster=None, force=False):
        d = cls.validate_json(data)
        if "generated" in d:
            raise errors.InvalidData(
                "It is not allowed to update generated attributes",
                log_message=True)
        if "editable" in d and not isinstance(d["editable"], dict):
            raise errors.InvalidData(
                "Editable attributes should be a dictionary", log_message=True)

        attrs = d
        models = None

        if cluster is not None:
            attrs = objects.Cluster.get_updated_editable_attributes(cluster, d)
            cls.validate_provision(cluster, attrs)
            cls.validate_allowed_attributes(cluster, d, force)

            models = {
                'settings': attrs.get('editable', {}),
                'cluster': cluster,
                'version': settings.VERSION,
                'networking_parameters': cluster.network_config,
            }

        cls.validate_attributes(attrs.get('editable', {}), models, force=force)

        return d
Exemple #12
0
    def _validate_hugepages(cls, node, attrs):
        if not objects.NodeAttributes.is_hugepages_enabled(node,
                                                           attributes=attrs):
            return

        hugepage_sizes = set(
            objects.NodeAttributes.total_hugepages(node, attributes=attrs))
        supported_hugepages = set(
            str(page)
            for page in node.meta['numa_topology']['supported_hugepages'])

        if not hugepage_sizes.issubset(supported_hugepages):
            raise errors.InvalidData(
                "Node {0} doesn't support {1} Huge Page(s), supported Huge"
                " Page(s): {2}.".format(
                    node.id, ", ".join(hugepage_sizes - supported_hugepages),
                    ", ".join(supported_hugepages)))

        dpdk_hugepages = utils.get_in(attrs, 'hugepages', 'dpdk', 'value')
        if objects.Node.dpdk_enabled(node):
            min_dpdk_hugepages = consts.MIN_DPDK_HUGEPAGES_MEMORY
            if dpdk_hugepages < min_dpdk_hugepages:
                raise errors.InvalidData(
                    "Node {0} does not have enough hugepages for dpdk. "
                    "Need to allocate at least {1} MB.".format(
                        node.id, min_dpdk_hugepages))
        elif dpdk_hugepages != 0:
            raise errors.InvalidData("Hugepages for dpdk should be equal to 0 "
                                     "if dpdk is disabled.")

        try:
            objects.NodeAttributes.distribute_hugepages(node, attrs)
        except ValueError as exc:
            raise errors.InvalidData(exc.args[0])
Exemple #13
0
    def validate_collection_update(cls, data):
        d = cls.validate_json(data)
        if not isinstance(d, list):
            raise errors.InvalidData(
                "Invalid json list",
                log_message=True
            )

        valid_d = []
        for nd in d:
            valid_nd = {}
            if "id" not in nd:
                raise errors.InvalidData(
                    "ID is not set correctly",
                    log_message=True
                )

            if "status" not in nd:
                raise errors.InvalidData(
                    "ID is not set correctly",
                    log_message=True
                )

            if not objects.Notification.get_by_uid(nd["id"]):
                raise errors.InvalidData(
                    "Invalid ID specified",
                    log_message=True
                )

            valid_nd["id"] = nd["id"]
            valid_nd["status"] = nd["status"]
            valid_d.append(valid_nd)
        return valid_d
Exemple #14
0
    def _validate_components(cls, release_id, components_list):
        release = objects.Release.get_by_uid(release_id)
        release_components = objects.Release.get_all_components(release)
        components_set = set(components_list)
        found_release_components = [
            c for c in release_components if c['name'] in components_set
        ]
        found_release_components_names_set = set(
            c['name'] for c in found_release_components)

        if found_release_components_names_set != components_set:
            raise errors.InvalidData(
                u'{0} components are not related to release "{1}".'.format(
                    sorted(components_set -
                           found_release_components_names_set), release.name),
                log_message=True)

        mandatory_component_types = set(['hypervisor', 'network', 'storage'])
        for component in found_release_components:
            component_name = component['name']
            for incompatible in component.get('incompatible', []):
                incompatible_component_names = list(
                    cls._resolve_names_for_dependency(components_set,
                                                      incompatible))
                if incompatible_component_names:
                    raise errors.InvalidData(
                        u"Incompatible components were found: "
                        u"'{0}' incompatible with {1}.".format(
                            component_name, incompatible_component_names),
                        log_message=True)

            component_type = lambda x: x['name'].split(':', 1)[0]
            for c_type, group in groupby(
                    sorted(component.get('requires', []), key=component_type),
                    component_type):
                group_components = list(group)
                for require in group_components:
                    component_exist = any(
                        cls._resolve_names_for_dependency(
                            components_set, require))
                    if component_exist:
                        break
                else:
                    raise errors.InvalidData(
                        u"Requires {0} for '{1}' components were not "
                        u"satisfied.".format(
                            [c['name'] for c in group_components],
                            component_name),
                        log_message=True)
            if component_type(component) in mandatory_component_types:
                mandatory_component_types.remove(component_type(component))

        if mandatory_component_types:
            raise errors.InvalidData(
                "Components with {0} types required but wasn't found in data".
                format(sorted(mandatory_component_types)),
                log_message=True)
Exemple #15
0
    def validate(cls, data, cluster, relation):
        if relation is None:
            raise errors.InvalidData(
                "Relation for given cluster does not exist")

        if cluster.id != relation.seed_cluster_id:
            raise errors.InvalidData("Given cluster is not seed cluster")

        return data
Exemple #16
0
    def check_operational_controllers_settings(cls, input_nova_computes,
                                               db_nova_computes):
        """Check deployed nova computes settings with target = controllers.

        Raise InvalidData exception if any deployed nova computes clusters with
        target 'controllers' were added, removed or modified.

        :param input_nova_computes: new nova_compute settings
        :type input_nova_computes: list of dicts
        :param db_nova_computes: nova_computes settings stored in db
        :type db_nova_computes: list of dicts
        """
        input_computes_by_vsphere_name = dict(
            (nc['vsphere_cluster'], nc) for nc in input_nova_computes if
            cls._get_target_node_id(nc) == 'controllers'
        )
        db_clusters_names = set()
        for db_nova_compute in db_nova_computes:
            target_name = cls._get_target_node_id(db_nova_compute)
            if target_name == 'controllers':
                vsphere_name = db_nova_compute['vsphere_cluster']
                input_nova_compute = \
                    input_computes_by_vsphere_name.get(vsphere_name)
                if not input_nova_compute:
                    raise errors.InvalidData(
                        "Nova compute instance with target 'controllers' and "
                        "vSphere cluster {0} couldn't be deleted from "
                        "operational environment.".format(vsphere_name),
                        log_message=True
                    )
                for attr, db_value in six.iteritems(db_nova_compute):
                    input_value = input_nova_compute.get(attr)
                    if attr == 'target_node':
                        db_value = cls._get_target_node_id(db_nova_compute)
                        input_value = cls._get_target_node_id(
                            input_nova_compute)
                    if db_value != input_value:
                        raise errors.InvalidData(
                            "Parameter '{0}' of nova compute instance with "
                            "vSphere cluster name '{1}' couldn't be changed".
                            format(attr, vsphere_name),
                            log_message=True
                        )
                db_clusters_names.add(vsphere_name)

        input_clusters_names = set(input_computes_by_vsphere_name)
        if input_clusters_names - db_clusters_names:
            raise errors.InvalidData(
                "Nova compute instances with target 'controllers' couldn't be "
                "added to operational environment. Check nova compute "
                "instances with the following vSphere cluster names: {0}".
                format(', '.join(
                    sorted(input_clusters_names - db_clusters_names))),
                log_message=True
            )
Exemple #17
0
    def _check_attribute(cls, metadata, attributes, new_attributes):
        """Check new_attributes is equal with attributes except editable fields

        :param metadata: dict describes structure and properties of attributes
        :param attributes: attributes which is the basis for comparison
        :param new_attributes: attributes with modifications to check
        """
        if type(attributes) != type(new_attributes):
            raise errors.InvalidData(
                "Value type of '{0}' attribute couldn't be changed.".
                format(metadata.get('label') or metadata.get('name')),
                log_message=True
            )
        # if metadata field contains editable_for_deployed = True, attribute
        # and all its childs may be changed too. No need to check it.
        if metadata.get('editable_for_deployed'):
            return

        # no 'fields' in metadata means that attribute has no any childs(leaf)
        if 'fields' not in metadata:
            if attributes != new_attributes:
                raise errors.InvalidData(
                    "Value of '{0}' attribute couldn't be changed.".
                    format(metadata.get('label') or metadata.get('name')),
                    log_message=True
                )
            return

        fields_sort_functions = {
            'availability_zones': lambda x: x['az_name'],
            'nova_computes': lambda x: x['vsphere_cluster']
        }
        field_name = metadata['name']
        if isinstance(attributes, (list, tuple)):
            if len(attributes) != len(new_attributes):
                raise errors.InvalidData(
                    "Value of '{0}' attribute couldn't be changed.".
                    format(metadata.get('label') or metadata.get('name')),
                    log_message=True
                )
            attributes = sorted(
                attributes, key=fields_sort_functions.get(field_name))
            new_attributes = sorted(
                new_attributes, key=fields_sort_functions.get(field_name))
            for item, new_item in six.moves.zip(attributes, new_attributes):
                for field_metadata in metadata['fields']:
                    cls._check_attribute(field_metadata,
                                         item.get(field_metadata['name']),
                                         new_item.get(field_metadata['name']))
        elif isinstance(attributes, dict):
            for field_metadata in metadata['fields']:
                cls._check_attribute(field_metadata,
                                     attributes.get(field_name),
                                     new_attributes.get(field_name))
Exemple #18
0
 def validate_release_upgrade(cls, orig_release, new_release):
     if not objects.Release.is_deployable(new_release):
         raise errors.InvalidData(
             "Upgrade to the given release ({0}) is not possible because "
             "this release is deprecated and cannot be installed.".format(
                 new_release.id),
             log_message=True)
     if orig_release >= new_release:
         raise errors.InvalidData(
             "Upgrade to the given release ({0}) is not possible because "
             "this release is equal or lower than the release of the "
             "original cluster.".format(new_release.id),
             log_message=True)
Exemple #19
0
    def check_networks_are_acceptable_for_node_to_assign(
            cls, interfaces, node_db):
        # get list of available networks for the node via nodegroup
        node_group_db = node_db.nodegroup
        net_group_ids = set(n.id for n in node_group_db.networks)

        # NOTE(aroma): fuelweb_admin network is shared between
        # default nodegroups of clusters hence holds no value
        # in 'group_id' field yet still must be included into list
        # of networks available for assignment
        if node_group_db.is_default:
            fuelweb_admin_net = \
                objects.NetworkGroup.get_default_admin_network()
            net_group_ids.add(fuelweb_admin_net.id)

        net_ids = set()
        for iface in interfaces:
            net_ids.update(set(net['id']
                               for net in iface['assigned_networks']))

        if net_ids:
            if not net_ids.issubset(net_group_ids):
                invalid_ids = net_ids - net_group_ids
                raise errors.InvalidData(
                    "Node '{0}': networks with IDs '{1}' cannot be used "
                    "because they are not in node group '{2}'".format(
                        node_db.id,
                        ', '.join(six.text_type(n) for n in invalid_ids),
                        node_group_db.name),
                    log_message=True)
            else:
                if not objects.Node.should_have_public(node_db):
                    public_id = next((n.id for n in node_group_db.networks
                                      if n.name == consts.NETWORKS.public),
                                     None)
                    if public_id in net_ids:
                        raise errors.InvalidData(
                            "Trying to assign public network to Node '{0}' "
                            "which should not have public network".format(
                                node_db.id),
                            log_message=True)
                    if public_id is not None:
                        net_group_ids.discard(public_id)
                unassigned_net_ids = net_group_ids - net_ids
                if unassigned_net_ids:
                    raise errors.InvalidData(
                        "Node '{0}': {1} network(s) are left unassigned".
                        format(
                            node_db.id, ",".join(
                                six.text_type(n) for n in unassigned_net_ids)),
                        log_message=True)
Exemple #20
0
 def validate_query(cls, statuses, transaction_types):
     if statuses:
         if not statuses.issubset(consts.TASK_STATUSES):
             raise errors.InvalidData(
                 "Statuses parameter could only be one of: {}".format(
                     ", ".join(consts.TASK_STATUSES))
             )
     if transaction_types:
         if not transaction_types.issubset(consts.TASK_NAMES):
             raise errors.InvalidData(
                 "Transaction types parameter could "
                 "only be one of: {}".format(
                     ", ".join(consts.TASK_NAMES))
             )
Exemple #21
0
    def _verify_node_dpdk_properties(cls, db_node):
        if not objects.NodeAttributes.is_dpdk_hugepages_enabled(db_node):
            raise errors.InvalidData("Hugepages for DPDK are not configured"
                                     " for node '{}'".format(db_node.id))

        if not objects.NodeAttributes.is_nova_hugepages_enabled(db_node):
            raise errors.InvalidData("Hugepages for Nova are not configured"
                                     " for node '{}'".format(db_node.id))

        # check hypervisor type
        h_type = objects.Cluster.get_editable_attributes(
            db_node.cluster)['common']['libvirt_type']['value']

        if h_type != consts.HYPERVISORS.kvm:
            raise errors.InvalidData('Only KVM hypervisor works with DPDK.')
Exemple #22
0
    def _check_component_requires(cls, component, components_names,
                                  available_components_names):
        """Check if all component's requires is satisfied.

        :param component: target component for checking
        :type component: dict
        :param components_names: list of components names for checking
        :type components_names: list
        :param available_components_names: names of all available components
        :type available_components_names: list
        :raises: errors.InvalidData
        """
        component_requires = component.get('requires', [])
        requires_without_predicates = [
            r.get('name') for r in component_requires]
        if all(requires_without_predicates):
            for c_type, group in groupby(sorted(component_requires,
                                                key=cls._get_component_type),
                                         cls._get_component_type):
                group_components = list(group)
                for require in group_components:
                    if cls._resolve_names_for_dependency(
                            components_names, require['name']):
                        break
                else:
                    raise errors.InvalidData(
                        "Component '{0}' requires any of components from {1} "
                        "set.".format(
                            component['name'],
                            sorted([c['name'] for c in group_components])),
                        log_message=True
                    )
        elif any(requires_without_predicates):
            raise errors.InvalidData("Component '{0}' has mixed format of "
                                     "requires.".format(component['name']))
        else:
            check_result = cls._check_predicates(component_requires,
                                                 components_names,
                                                 available_components_names)
            if check_result:
                raise errors.InvalidData(
                    "Requirements was not satisfied for component '{0}': "
                    "{1}({2})".format(
                        component['name'],
                        check_result['failed_predicate'],
                        sorted(item for item in check_result['items'])),
                    log_message=True
                )
Exemple #23
0
    def _validate_data(cls, data, schema):
        """Common part of validation for creating and updating configuration

        Validation fails if there are running deployment tasks in cluster.
        """

        data = super(OpenstackConfigValidator, cls).validate(data)
        cls.validate_schema(data, schema)
        cls._check_exclusive_fields(data)

        # node_id is supported for backward compatibility
        node_id = data.pop('node_id', None)
        if node_id is not None:
            data['node_ids'] = [node_id]

        cluster = objects.Cluster.get_by_uid(
            data['cluster_id'], fail_if_not_found=True)

        node_ids = data.get('node_ids')
        if node_ids:
            nodes = objects.NodeCollection.get_by_ids(node_ids, cluster.id)
            invalid_node_ids = set(node_ids) - set(n.id for n in nodes)

            if invalid_node_ids:
                raise errors.InvalidData(
                    "Nodes '{0}' are not assigned to cluster '{1}'".format(
                        ', '.join(str(n) for n in sorted(invalid_node_ids)),
                        cluster.id))

        cls._check_no_running_deploy_tasks(cluster)
        return data
Exemple #24
0
    def update(cls, instance, data):
        # Plugin name can't be changed. Plugins sync operation uses
        # name for searching plugin data on the file system.
        new_name = data.get('name')
        if new_name is not None and instance.name != new_name:
            raise errors.InvalidData(
                "Plugin can't be renamed. Trying to change name "
                "of the plugin {0} to {1}".format(instance.name, new_name))

        graphs = {}
        data_graphs = data.pop("graphs", [])
        for graph in data_graphs:
            graphs[graph.pop('type')] = graph

        data.pop("deployment_tasks", [])  # could not be updated
        # We must save tags info in the roles_metadata on the update
        data = cls._process_tags(data)
        super(Plugin, cls).update(instance, data)

        for graph_type, graph_data in six.iteritems(graphs):
            existing_graph = DeploymentGraph.get_for_model(
                instance, graph_type=graph_type)
            if existing_graph:
                DeploymentGraph.update(existing_graph, graph_data)
            else:
                DeploymentGraph.create_for_model(graph_data, instance,
                                                 graph_type)
Exemple #25
0
    def validate_update(cls, data, instance):
        d = cls._validate_common(data, instance=instance)

        if "name" in d:
            query = objects.ClusterCollection.filter_by_not(
                None, id=instance.id)

            if objects.ClusterCollection.filter_by(
                    query, name=d["name"]).first():
                raise errors.AlreadyExists(
                    "Environment with this name already exists",
                    log_message=True
                )

        for k in cls._blocked_for_update:
            if k in d and getattr(instance, k) != d[k]:
                raise errors.InvalidData(
                    u"Changing '{0}' for environment is prohibited".format(k),
                    log_message=True
                )

        cls._validate_mode(d, instance.release)
        if 'nodes' in d:
            # Here d['nodes'] is list of node IDs
            # to be assigned to the cluster.
            cls._validate_nodes(d['nodes'], instance)

        return d
Exemple #26
0
    def _expand_restriction(restriction):
        """Normalize restrictions into one canonical format

        :param restriction: restriction object
        :type restriction: string|dict
        :returns: dict -- restriction object in canonical format:
                    {
                        'action': 'enable|disable|hide|none'
                        'condition': 'value1 == value2',
                        'message': 'value1 shouldn't equal value2'
                    }
        """
        result = {
            'action': 'disable'
        }

        if isinstance(restriction, six.string_types):
            result['condition'] = restriction
        elif isinstance(restriction, dict):
            if 'condition' in restriction:
                result.update(restriction)
            else:
                result['condition'] = list(restriction)[0]
                result['message'] = list(restriction.values())[0]
        else:
            raise errors.InvalidData('Invalid restriction format')

        return result
Exemple #27
0
 def validate_provision(cls, cluster, attrs):
     # NOTE(agordeev): disable classic provisioning for 7.0 or higher
     if version.StrictVersion(cluster.release.environment_version) >= \
             version.StrictVersion(consts.FUEL_IMAGE_BASED_ONLY):
         provision_data = attrs['editable'].get('provision')
         if provision_data:
             if provision_data['method']['value'] != \
                     consts.PROVISION_METHODS.image:
                 raise errors.InvalidData(
                     u"Cannot use classic provisioning for adding "
                     u"nodes to environment",
                     log_message=True)
         else:
             raise errors.InvalidData(
                 u"Provisioning method is not set. Unable to continue",
                 log_message=True)
Exemple #28
0
 def validate(cls, data):
     d = cls.validate_json(data)
     if d.get('topic') not in consts.NOTIFICATION_TOPICS:
         raise errors.InvalidData(
             "Notification topic is not found or invalid"
         )
     return d
Exemple #29
0
    def validate_attributes(cls, data, models=None, force=False):
        """Validate attributes.

        :param data: attributes
        :type data: dict
        :param models: models which are used in
                       restrictions conditions
        :type models: dict
        :param force: don't check restrictions
        :type force: bool
        """
        for attrs in six.itervalues(data):
            if not isinstance(attrs, dict):
                continue
            for attr_name, attr in six.iteritems(attrs):
                cls.validate_attribute(attr_name, attr)

        # If settings are present restrictions can be checked
        if models and not force:
            restrict_err = restrictions.AttributesRestriction.check_data(
                models, data)
            if restrict_err:
                raise errors.InvalidData(
                    "Some restrictions didn't pass verification: {}".format(
                        restrict_err))
        return data
Exemple #30
0
    def sum_of_volumes_not_greater_than_disk_size(cls, data):
        for disk in data:
            volumes_size = sum([volume['size'] for volume in disk['volumes']])

            if volumes_size > disk['size']:
                raise errors.InvalidData(u'Not enough free space on disk: %s' %
                                         disk)