Example #1
0
def check_job_execution(data, job_id):
    ctx = context.ctx()
    job_execution_info = data.get('job_execution_info', {})

    cluster = conductor.cluster_get(ctx, data['cluster_id'])
    if not cluster:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%s' doesn't exist") % data['cluster_id'])

    val_base.check_plugin_labels(cluster.plugin_name, cluster.hadoop_version)
    job = conductor.job_get(ctx, job_id)

    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    edp_engine = plugin.get_edp_engine(cluster, job.type)
    if not edp_engine:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%(cluster_id)s' doesn't support job type "
              "'%(job_type)s'") % {
                  "cluster_id": cluster.id,
                  "job_type": job.type
              })

    j_i.check_execution_interface(data, job)
    edp_engine.validate_job_execution(cluster, job, data)

    if 'job_execution_type' in job_execution_info:
        j_type = job_execution_info.get('job_execution_type', 'workflow')
        if j_type == 'scheduled':
            check_scheduled_job_execution_info(job_execution_info)
Example #2
0
def check_job_execution(data, job_id):
    ctx = context.ctx()
    job_execution_info = data.get('job_execution_info', {})

    cluster = conductor.cluster_get(ctx, data['cluster_id'])
    if not cluster:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%s' doesn't exist") % data['cluster_id'])

    val_base.check_plugin_labels(cluster.plugin_name, cluster.hadoop_version)
    job = conductor.job_get(ctx, job_id)

    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    edp_engine = plugin.get_edp_engine(cluster, job.type)
    if not edp_engine:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%(cluster_id)s' doesn't support job type "
              "'%(job_type)s'") % {"cluster_id": cluster.id,
                                   "job_type": job.type})

    j_i.check_execution_interface(data, job)
    edp_engine.validate_job_execution(cluster, job, data)

    if 'job_execution_type' in job_execution_info:
        j_type = job_execution_info.get('job_execution_type', 'workflow')
        if j_type == 'scheduled':
            check_scheduled_job_execution_info(job_execution_info)
Example #3
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    ctx = context.current()
    cluster = api.get_cluster(id=cluster_id)

    if cluster is None:
        raise ex.NotFoundException({'id': cluster_id},
                                   _('Object with %s not found'))

    b.check_plugin_labels(cluster.plugin_name, cluster.hadoop_version)

    acl.check_tenant_for_update(ctx, cluster)
    acl.check_protected_from_update(cluster, data)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = service_api.OPS.get_engine_type_and_version()
    if (not cluster_engine
            and not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {
                "old_engine": cluster.sahara_info.get('infrastructure_engine'),
                "new_engine": engine_type_and_version
            })

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and
            (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                      'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if cluster.user_keypair_id:
        b.check_keypair_exists(cluster.user_keypair_id)

    if cluster.default_image_id:
        b.check_image_registered(cluster.default_image_id)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
Example #4
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    ctx = context.current()
    cluster = api.get_cluster(id=cluster_id)

    if cluster is None:
        raise ex.NotFoundException(
            {'id': cluster_id}, _('Object with %s not found'))

    b.check_plugin_labels(
        cluster.plugin_name, cluster.hadoop_version)

    acl.check_tenant_for_update(ctx, cluster)
    acl.check_protected_from_update(cluster, data)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = service_api.OPS.get_engine_type_and_version()
    if (not cluster_engine and
            not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and
            cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {"old_engine": cluster.sahara_info.get('infrastructure_engine'),
             "new_engine": engine_type_and_version})

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and (
            plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if cluster.user_keypair_id:
        b.check_keypair_exists(cluster.user_keypair_id)

    if cluster.default_image_id:
        b.check_image_registered(cluster.default_image_id)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
Example #5
0
def _check_cluster_create(data):
    b.check_plugin_name_exists(data['plugin_name'])
    b.check_plugin_supports_version(data['plugin_name'],
                                    data['hadoop_version'])
    b.check_plugin_labels(
        data['plugin_name'], data['hadoop_version'])

    if data.get('cluster_template_id'):
        ct_id = data['cluster_template_id']
        b.check_cluster_template_exists(ct_id)
        if not data.get('node_groups'):
            b.check_node_groups_in_cluster_templates(data['name'],
                                                     data['plugin_name'],
                                                     data['hadoop_version'],
                                                     ct_id)

    if data.get('user_keypair_id'):
        b.check_keypair_exists(data['user_keypair_id'])

    default_image_id = _get_cluster_field(data, 'default_image_id')
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data['plugin_name'],
                                    data['hadoop_version'],
                                    default_image_id)
    else:
        raise ex.NotFoundException('default_image_id',
                                   _("'%s' field is not found"))

    b.check_all_configurations(data)

    if data.get('anti_affinity'):
        b.check_node_processes(data['plugin_name'], data['hadoop_version'],
                               data['anti_affinity'])

    if data.get('node_groups'):
        b.check_cluster_hostnames_lengths(data['name'], data['node_groups'])

    neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
    if neutron_net_id:
        if not CONF.use_neutron:
            raise ex.InvalidReferenceException(
                _("'neutron_management_network' field can't be used "
                  "with 'use_neutron=False'"))
        b.check_network_exists(neutron_net_id)
    else:
        if CONF.use_neutron:
            raise ex.NotFoundException('neutron_management_network',
                                       _("'%s' field is not found"))
Example #6
0
def _check_cluster_create(data):
    plugin_version = 'hadoop_version'
    if data.get('plugin_version'):
        plugin_version = 'plugin_version'
    b.check_plugin_name_exists(data['plugin_name'])
    b.check_plugin_supports_version(data['plugin_name'], data[plugin_version])
    b.check_plugin_labels(data['plugin_name'], data[plugin_version])

    if data.get('cluster_template_id'):
        ct_id = data['cluster_template_id']
        b.check_cluster_template_exists(ct_id)
        if not data.get('node_groups'):
            b.check_node_groups_in_cluster_templates(data['name'],
                                                     data['plugin_name'],
                                                     data[plugin_version],
                                                     ct_id)

    if data.get('user_keypair_id'):
        b.check_keypair_exists(data['user_keypair_id'])

    default_image_id = _get_cluster_field(data, 'default_image_id')
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data['plugin_name'], data[plugin_version],
                                    default_image_id)
    else:
        raise ex.NotFoundException('default_image_id',
                                   _("'%s' field is not found"))

    b.check_all_configurations(data)

    if data.get('anti_affinity'):
        b.check_node_processes(data['plugin_name'], data[plugin_version],
                               data['anti_affinity'])

    if data.get('node_groups'):
        b.check_cluster_hostnames_lengths(data['name'], data['node_groups'])

    neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
    if neutron_net_id:
        if not CONF.use_neutron:
            raise ex.InvalidReferenceException(
                _("'neutron_management_network' field can't be used "
                  "with 'use_neutron=False'"))
        b.check_network_exists(neutron_net_id)
    else:
        if CONF.use_neutron:
            raise ex.NotFoundException('neutron_management_network',
                                       _("'%s' field is not found"))
Example #7
0
def _check_cluster_create(data):
    b.check_plugin_name_exists(data["plugin_name"])
    b.check_plugin_supports_version(data["plugin_name"], data["hadoop_version"])
    b.check_plugin_labels(data["plugin_name"], data["hadoop_version"])

    if data.get("cluster_template_id"):
        ct_id = data["cluster_template_id"]
        b.check_cluster_template_exists(ct_id)
        if not data.get("node_groups"):
            b.check_node_groups_in_cluster_templates(data["name"], data["plugin_name"], data["hadoop_version"], ct_id)

    if data.get("user_keypair_id"):
        b.check_keypair_exists(data["user_keypair_id"])

    default_image_id = _get_cluster_field(data, "default_image_id")
    if default_image_id:
        b.check_image_registered(default_image_id)
        b.check_required_image_tags(data["plugin_name"], data["hadoop_version"], default_image_id)
    else:
        raise ex.NotFoundException("default_image_id", _("'%s' field is not found"))

    b.check_all_configurations(data)

    if data.get("anti_affinity"):
        b.check_node_processes(data["plugin_name"], data["hadoop_version"], data["anti_affinity"])

    if data.get("node_groups"):
        proxy_gateway_used = len([ng for ng in data["node_groups"] if ng.get("is_proxy_gateway", False)]) > 0
        b.check_network_config(data["node_groups"], proxy_gateway_used)
        b.check_cluster_hostnames_lengths(data["name"], data["node_groups"])

    neutron_net_id = _get_cluster_field(data, "neutron_management_network")
    if neutron_net_id:
        if not CONF.use_neutron:
            raise ex.InvalidReferenceException(
                _("'neutron_management_network' field can't be used " "with 'use_neutron=False'")
            )
        b.check_network_exists(neutron_net_id)
    else:
        if CONF.use_neutron:
            raise ex.NotFoundException("neutron_management_network", _("'%s' field is not found"))