예제 #1
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    ctx = context.current()
    cluster = api.get_cluster(id=cluster_id)

    if cluster is None:
        raise ex.NotFoundException(
            {'id': cluster_id}, _('Object with %s not found'))

    b.check_plugin_labels(
        cluster.plugin_name, cluster.hadoop_version)

    acl.check_tenant_for_update(ctx, cluster)
    acl.check_protected_from_update(cluster, data)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = service_api.OPS.get_engine_type_and_version()
    if (not cluster_engine and
            not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and
            cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {"old_engine": cluster.sahara_info.get('infrastructure_engine'),
             "new_engine": engine_type_and_version})

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and (
            plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_network_config(data['add_node_groups'],
                               cluster.has_proxy_gateway())
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
예제 #2
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    ctx = context.current()
    cluster = api.get_cluster(id=cluster_id)

    if cluster is None:
        raise ex.NotFoundException({'id': cluster_id},
                                   _('Object with %s not found'))

    acl.check_tenant_for_update(ctx, cluster)
    acl.check_protected_from_update(cluster, data)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = service_api.OPS.get_engine_type_and_version()
    if (not cluster_engine
            and not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {
                "old_engine": cluster.sahara_info.get('infrastructure_engine'),
                "new_engine": engine_type_and_version
            })

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and
            (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                      'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_network_config(data['add_node_groups'],
                               cluster.has_proxy_gateway())
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
예제 #3
0
def check_cluster_create(data, **kwargs):
    b.check_cluster_unique_name(data['name'])
    b.check_plugin_name_exists(data['plugin_name'])
    b.check_plugin_supports_version(data['plugin_name'],
                                    data['hadoop_version'])
    if data.get('cluster_template_id'):
        ct_id = data['cluster_template_id']
        b.check_cluster_template_exists(ct_id)
        if not data.get('node_groups'):
            b.check_node_groups_in_cluster_templates(data['name'],
                                                     data['plugin_name'],
                                                     data['hadoop_version'],
                                                     ct_id)

    if data.get('user_keypair_id'):
        b.check_keypair_exists(data['user_keypair_id'])

    default_image_id = _get_cluster_field(data, 'default_image_id')
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data['plugin_name'],
                                    data['hadoop_version'], default_image_id)
    else:
        raise ex.NotFoundException('default_image_id',
                                   _("'%s' field is not found"))

    b.check_all_configurations(data)

    if data.get('anti_affinity'):
        b.check_node_processes(data['plugin_name'], data['hadoop_version'],
                               data['anti_affinity'])

    if data.get('node_groups'):
        proxy_gateway_used = len([
            ng
            for ng in data['node_groups'] if ng.get('is_proxy_gateway', False)
        ]) > 0
        b.check_network_config(data['node_groups'], proxy_gateway_used)
        b.check_cluster_hostnames_lengths(data['name'], data['node_groups'])

    neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
    if neutron_net_id:
        if not CONF.use_neutron:
            raise ex.InvalidReferenceException(
                _("'neutron_management_network' field can't be used "
                  "with 'use_neutron=False'"))
        b.check_network_exists(neutron_net_id)
    else:
        if CONF.use_neutron:
            raise ex.NotFoundException('neutron_management_network',
                                       message=_("'%s' field is not found"))
예제 #4
0
파일: clusters.py 프로젝트: snowind/sahara
def check_cluster_create(data, **kwargs):
    b.check_cluster_unique_name(data['name'])
    b.check_plugin_name_exists(data['plugin_name'])
    b.check_plugin_supports_version(data['plugin_name'],
                                    data['hadoop_version'])
    if data.get('cluster_template_id'):
        ct_id = data['cluster_template_id']
        b.check_cluster_template_exists(ct_id)
        if not data.get('node_groups'):
            b.check_node_groups_in_cluster_templates(data['name'],
                                                     data['plugin_name'],
                                                     data['hadoop_version'],
                                                     ct_id)

    if data.get('user_keypair_id'):
        b.check_keypair_exists(data['user_keypair_id'])

    default_image_id = _get_cluster_field(data, 'default_image_id')
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data['plugin_name'],
                                    data['hadoop_version'],
                                    default_image_id)
    else:
        raise ex.NotFoundException('default_image_id',
                                   _("'%s' field is not found"))

    b.check_all_configurations(data)

    if data.get('anti_affinity'):
        b.check_node_processes(data['plugin_name'], data['hadoop_version'],
                               data['anti_affinity'])

    if data.get('node_groups'):
        proxy_gateway_used = len([ng for ng in data['node_groups'] if
                                  ng.get('is_proxy_gateway', False)]) > 0
        b.check_network_config(data['node_groups'], proxy_gateway_used)
        b.check_cluster_hostnames_lengths(data['name'], data['node_groups'])

    neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
    if neutron_net_id:
        if not CONF.use_neutron:
            raise ex.InvalidReferenceException(
                _("'neutron_management_network' field can't be used "
                  "with 'use_neutron=False'"))
        b.check_network_exists(neutron_net_id)
    else:
        if CONF.use_neutron:
            raise ex.NotFoundException('neutron_management_network',
                                       _("'%s' field is not found"))
예제 #5
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    cluster = api.get_cluster(id=cluster_id)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = api.OPS.get_engine_type_and_version()
    if (not cluster_engine
            and not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {
                "old_engine": cluster.sahara_info.get('infrastructure_engine'),
                "new_engine": engine_type_and_version
            })

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and
            (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                      'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != 'Active':
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_network_config(data['add_node_groups'],
                               cluster.has_proxy_gateway())
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
예제 #6
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    cluster = api.get_cluster(id=cluster_id)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = api.OPS.get_engine_type_and_version()
    if (not cluster_engine and
            not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and
            cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {"old_engine": cluster.sahara_info.get('infrastructure_engine'),
             "new_engine": engine_type_and_version})

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and (
            plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != 'Active':
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_network_config(data['add_node_groups'],
                               cluster.has_proxy_gateway())
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
예제 #7
0
def _check_cluster_create(data):
    b.check_plugin_name_exists(data["plugin_name"])
    b.check_plugin_supports_version(data["plugin_name"], data["hadoop_version"])
    b.check_plugin_labels(data["plugin_name"], data["hadoop_version"])

    if data.get("cluster_template_id"):
        ct_id = data["cluster_template_id"]
        b.check_cluster_template_exists(ct_id)
        if not data.get("node_groups"):
            b.check_node_groups_in_cluster_templates(data["name"], data["plugin_name"], data["hadoop_version"], ct_id)

    if data.get("user_keypair_id"):
        b.check_keypair_exists(data["user_keypair_id"])

    default_image_id = _get_cluster_field(data, "default_image_id")
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data["plugin_name"], data["hadoop_version"], default_image_id)
    else:
        raise ex.NotFoundException("default_image_id", _("'%s' field is not found"))

    b.check_all_configurations(data)

    if data.get("anti_affinity"):
        b.check_node_processes(data["plugin_name"], data["hadoop_version"], data["anti_affinity"])

    if data.get("node_groups"):
        proxy_gateway_used = len([ng for ng in data["node_groups"] if ng.get("is_proxy_gateway", False)]) > 0
        b.check_network_config(data["node_groups"], proxy_gateway_used)
        b.check_cluster_hostnames_lengths(data["name"], data["node_groups"])

    neutron_net_id = _get_cluster_field(data, "neutron_management_network")
    if neutron_net_id:
        if not CONF.use_neutron:
            raise ex.InvalidReferenceException(
                _("'neutron_management_network' field can't be used " "with 'use_neutron=False'")
            )
        b.check_network_exists(neutron_net_id)
    else:
        if CONF.use_neutron:
            raise ex.NotFoundException("neutron_management_network", _("'%s' field is not found"))
예제 #8
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    cluster = api.get_cluster(id=cluster_id)
    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and (
            plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'decommission_nodes'))):
        raise ex.InvalidException(
            "Requested plugin '%s' doesn't support cluster scaling feature"
            % cluster.plugin_name)

    if cluster.status != 'Active':
        raise ex.InvalidException("Cluster cannot be scaled not in 'Active' "
                                  "status. Cluster status: " + cluster.status)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])

        b.check_network_config(data['add_node_groups'])
예제 #9
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    cluster = api.get_cluster(id=cluster_id)
    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and
            (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                      'decommission_nodes'))):
        raise ex.InvalidException(
            "Requested plugin '%s' doesn't support cluster scaling feature" %
            cluster.plugin_name)

    if cluster.status != 'Active':
        raise ex.InvalidException("Cluster cannot be scaled not in 'Active' "
                                  "status. Cluster status: " + cluster.status)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])

        b.check_network_config(data['add_node_groups'])
예제 #10
0
def check_cluster_create(data, **kwargs):
    b.check_cluster_unique_name(data['name'])
    b.check_plugin_name_exists(data['plugin_name'])
    b.check_plugin_supports_version(data['plugin_name'],
                                    data['hadoop_version'])
    if data.get('cluster_template_id'):
        ct_id = data['cluster_template_id']
        b.check_cluster_template_exists(ct_id)
        if not data.get('node_groups'):
            b.check_node_groups_in_cluster_templates(data['plugin_name'],
                                                     data['hadoop_version'],
                                                     ct_id)

    if data.get('user_keypair_id'):
        b.check_keypair_exists(data['user_keypair_id'])

    default_image_id = _get_cluster_field(data, 'default_image_id')
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data['plugin_name'],
                                    data['hadoop_version'],
                                    default_image_id)
    else:
        raise ex.NotFoundException('default_image_id',
                                   "'%s' field is not found")

    b.check_all_configurations(data)

    if data.get('anti_affinity'):
        b.check_node_processes(data['plugin_name'], data['hadoop_version'],
                               data['anti_affinity'])

    if data.get('node_groups'):
        b.check_network_config(data['node_groups'])

    neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
    if neutron_net_id:
        b.check_network_exists(neutron_net_id)