def _check_swift_data_source_create(data): if len(data['url']) == 0: raise ex.InvalidException(_("Swift url must not be empty")) url = urlparse.urlparse(data['url']) if url.scheme != "swift": raise ex.InvalidException(_("URL scheme must be 'swift'")) # The swift url suffix does not have to be included in the netloc. # However, if the swift suffix indicator is part of the netloc then # we require the right suffix. # Additionally, the path must be more than '/' if (su.SWIFT_URL_SUFFIX_START in url.netloc and not url.netloc.endswith( su.SWIFT_URL_SUFFIX)) or len(url.path) <= 1: raise ex.InvalidException( _("URL must be of the form swift://container%s/object") % su.SWIFT_URL_SUFFIX) if not CONF.use_domain_for_proxy_users and "credentials" not in data: raise ex.InvalidCredentials(_("No credentials provided for Swift")) if not CONF.use_domain_for_proxy_users and ("user" not in data["credentials"]): raise ex.InvalidCredentials( _("User is not provided in credentials for Swift")) if not CONF.use_domain_for_proxy_users and ("password" not in data["credentials"]): raise ex.InvalidCredentials( _("Password is not provided in credentials for Swift"))
def _check_maprfs_data_source_create(data): if len(data['url']) == 0: raise ex.InvalidException(_("MapR FS url must not be empty")) url = urlparse.urlparse(data['url']) if url.scheme: if url.scheme != "maprfs": raise ex.InvalidException(_("URL scheme must be 'maprfs'"))
def _check_hdfs_data_source_create(data): if len(data['url']) == 0: raise ex.InvalidException("HDFS url must not be empty") url = urlparse.urlparse(data['url']) if url.scheme != "hdfs": raise ex.InvalidException("URL scheme must be 'hdfs'") if not url.hostname: raise ex.InvalidException("HDFS url is incorrect, " "cannot determine a hostname")
def check_node_processes(plugin_name, version, node_processes): if len(set(node_processes)) != len(node_processes): raise ex.InvalidException("Duplicates in node processes " "have been detected") plugin_procesess = [] for process in plugin_base.PLUGINS.get_plugin( plugin_name).get_node_processes(version).values(): plugin_procesess += process if not set(node_processes).issubset(set(plugin_procesess)): raise ex.InvalidException("Plugin supports the following " "node procesess: %s" % plugin_procesess)
def check_node_group_template_usage(node_group_template_id, **kwargs): cluster_users = [] template_users = [] for cluster in api.get_clusters(): if (node_group_template_id in [ node_group.node_group_template_id for node_group in cluster.node_groups ]): cluster_users += [cluster.name] for cluster_template in api.get_cluster_templates(): if (node_group_template_id in [ node_group.node_group_template_id for node_group in cluster_template.node_groups ]): template_users += [cluster_template.name] if cluster_users or template_users: raise ex.InvalidException( _("Node group template %(template)s is in use by " "cluster templates: %(users)s; and clusters: %(clusters)s") % { 'template': node_group_template_id, 'users': template_users and ', '.join(template_users) or 'N/A', 'clusters': cluster_users and ', '.join(cluster_users) or 'N/A' })
def check_node_group_configs(plugin_name, hadoop_version, ng_configs, plugin_configs=None): # TODO(aignatov): Should have scope and config type validations pl_confs = plugin_configs or _get_plugin_configs(plugin_name, hadoop_version) for app_target, configs in ng_configs.items(): if app_target not in pl_confs: raise ex.InvalidException("Plugin doesn't contain applicable " "target '%s'" % app_target) for name, values in configs.items(): if name not in pl_confs[app_target]: raise ex.InvalidException("Plugin's applicable target '%s' " "doesn't contain config with name " "'%s'" % (app_target, name))
def check_cinder_exists(): services = [ service.name for service in keystone.client_for_admin().services.list() ] if 'cinder' not in services: raise ex.InvalidException(_("Cinder is not supported"))
def check_plugin_supports_version(p_name, version): if version not in plugin_base.PLUGINS.get_plugin(p_name).get_versions(): raise ex.InvalidException( _("Requested plugin '%(name)s' doesn't support version " "'%(version)s'") % { 'name': p_name, 'version': version })
def check_security_groups_exist(security_groups): security_group_list = nova.client().security_groups.list() allowed_groups = set( reduce(operator.add, [[six.text_type(sg.id), sg.name] for sg in security_group_list], [])) for sg in security_groups: if sg not in allowed_groups: raise ex.InvalidException(_("Security group '%s' not found") % sg)
def check_resize(cluster, r_node_groups): cluster_ng_names = [ng.name for ng in cluster.node_groups] check_duplicates_node_groups_names(r_node_groups) for ng in r_node_groups: if ng['name'] not in cluster_ng_names: raise ex.InvalidException("Cluster doesn't contain node group " "with name '%s'" % ng['name'])
def check_required_image_tags(plugin_name, hadoop_version, image_id): image = api.get_image(id=image_id) plugin = plugin_base.PLUGINS.get_plugin(plugin_name) req_tags = set(plugin.get_required_image_tags(hadoop_version)) if not req_tags.issubset(set(image.tags)): raise ex.InvalidException("Tags of requested image '%s' don't " "contain required tags " "['%s', '%s']" % (image_id, plugin_name, hadoop_version))
def check_cluster_scaling(data, cluster_id, **kwargs): cluster = api.get_cluster(id=cluster_id) cluster_engine = cluster.sahara_info.get( 'infrastructure_engine') if cluster.sahara_info else None if (not cluster_engine and not ops.get_engine_type_and_version().startswith('direct')): raise ex.InvalidException( _("Cluster created before Juno release " "can't be scaled with %(engine)s engine") % {"engine": ops.get_engine_type_and_version()}) if (cluster.sahara_info and cluster_engine != ops.get_engine_type_and_version()): raise ex.InvalidException( _("Cluster created with %(old_engine)s infrastructure engine " "can't be scaled with %(new_engine)s engine") % { "old_engine": cluster.sahara_info.get('infrastructure_engine'), "new_engine": ops.get_engine_type_and_version() }) if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'scale_cluster') and (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'decommission_nodes'))): raise ex.InvalidException( _("Requested plugin '%s' doesn't support cluster scaling feature") % cluster.plugin_name) if cluster.status != 'Active': raise ex.InvalidException( _("Cluster cannot be scaled not in 'Active' status. " "Cluster status: %s") % cluster.status) if data.get("resize_node_groups"): b.check_resize(cluster, data['resize_node_groups']) if data.get("add_node_groups"): b.check_add_node_groups(cluster, data['add_node_groups']) b.check_network_config(data['add_node_groups']) b.check_cluster_hostnames_lengths(cluster.name, data['add_node_groups'])
def check_required_image_tags(plugin_name, hadoop_version, image_id): image = api.get_image(id=image_id) plugin = plugin_base.PLUGINS.get_plugin(plugin_name) req_tags = set(plugin.get_required_image_tags(hadoop_version)) if not req_tags.issubset(set(image.tags)): raise ex.InvalidException( _("Tags of requested image '%(image)s' don't contain required" " tags ['%(name)s', '%(version)s']") % { 'image': image_id, 'name': plugin_name, 'version': hadoop_version })
def _check_swift_data_source_create(data): if len(data['url']) == 0: raise ex.InvalidException("Swift url must not be empty") url = urlparse.urlparse(data['url']) if url.scheme != "swift": raise ex.InvalidException("URL scheme must be 'swift'") # We must have the suffix, and the path must be more than '/' if not url.netloc.endswith(su.SWIFT_URL_SUFFIX) or len(url.path) <= 1: raise ex.InvalidException( "URL must be of the form swift://container%s/object" % su.SWIFT_URL_SUFFIX) if "credentials" not in data: raise ex.InvalidCredentials("No credentials provided for Swift") if "user" not in data["credentials"]: raise ex.InvalidCredentials( "User is not provided in credentials for Swift") if "password" not in data["credentials"]: raise ex.InvalidCredentials( "Password is not provided in credentials for Swift")
def check_cluster_scaling(data, cluster_id, **kwargs): cluster = api.get_cluster(id=cluster_id) if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'scale_cluster') and (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'decommission_nodes'))): raise ex.InvalidException( "Requested plugin '%s' doesn't support cluster scaling feature" % cluster.plugin_name) if cluster.status != 'Active': raise ex.InvalidException("Cluster cannot be scaled not in 'Active' " "status. Cluster status: " + cluster.status) if data.get("resize_node_groups"): b.check_resize(cluster, data['resize_node_groups']) if data.get("add_node_groups"): b.check_add_node_groups(cluster, data['add_node_groups']) b.check_network_config(data['add_node_groups'])
def check_cluster_template_usage(cluster_template_id, **kwargs): users = [] for cluster in api.get_clusters(): if cluster_template_id == cluster.cluster_template_id: users.append(cluster.name) if users: raise ex.InvalidException( _("Cluster template %(id)s in use by %(clusters)s") % {'id': cluster_template_id, 'clusters': ', '.join(users)})
def check_job_execution(data, job_id): ctx = context.ctx() cluster = conductor.cluster_get(ctx, data['cluster_id']) if not cluster: raise ex.InvalidException( _("Cluster with id '%s' doesn't exist") % data['cluster_id']) job = conductor.job_get(ctx, job_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) edp_engine = plugin.get_edp_engine(cluster, job.type) if not edp_engine: raise ex.InvalidException( _("Cluster with id '%(cluster_id)s' doesn't support job type " "'%(job_type)s'") % { "cluster_id": cluster.id, "job_type": job.type }) edp_engine.validate_job_execution(cluster, job, data)
def check_cluster_hostnames_lengths(cluster_name, node_groups): for ng in node_groups: longest_hostname = g.generate_instance_name(cluster_name, ng['name'], ng['count']) longest_hostname += '.' longest_hostname += CONF.node_domain if len(longest_hostname) > MAX_HOSTNAME_LENGTH: raise ex.InvalidException( _("Composite hostname %(host)s in provisioned cluster exceeds" " maximum limit %(limit)s characters") % { 'host': longest_hostname, 'limit': MAX_HOSTNAME_LENGTH })
def check_floatingip_pool_exists(ng_name, pool_id): network = None if CONF.use_neutron: network = nova.get_network(id=pool_id) else: for net in nova.client().floating_ip_pools.list(): if net.name == pool_id: network = net.name break if not network: raise ex.InvalidException("Floating IP pool %s for node group " "'%s' not found" % (pool_id, ng_name))
def check_add_node_groups(cluster, add_node_groups): cluster_ng_names = [ng.name for ng in cluster.node_groups] check_duplicates_node_groups_names(add_node_groups) pl_confs = _get_plugin_configs(cluster.plugin_name, cluster.hadoop_version) for ng in add_node_groups: if ng['name'] in cluster_ng_names: raise ex.InvalidException("Can't add new nodegroup. Cluster " "already has nodegroup with name '%s'" % ng['name']) check_node_group_basic_fields(cluster.plugin_name, cluster.hadoop_version, ng, pl_confs)
def check_cluster_create(data, **kwargs): b.check_cluster_unique_name(data['name']) b.check_plugin_name_exists(data['plugin_name']) b.check_plugin_supports_version(data['plugin_name'], data['hadoop_version']) if data.get('cluster_template_id'): ct_id = data['cluster_template_id'] b.check_cluster_template_exists(ct_id) if not data.get('node_groups'): b.check_node_groups_in_cluster_templates(data['name'], data['plugin_name'], data['hadoop_version'], ct_id) if data.get('user_keypair_id'): b.check_keypair_exists(data['user_keypair_id']) default_image_id = _get_cluster_field(data, 'default_image_id') if default_image_id: b.check_image_registered(default_image_id) b.check_required_image_tags(data['plugin_name'], data['hadoop_version'], default_image_id) else: raise ex.NotFoundException('default_image_id', _("'%s' field is not found")) b.check_all_configurations(data) if data.get('anti_affinity'): b.check_node_processes(data['plugin_name'], data['hadoop_version'], data['anti_affinity']) if data.get('node_groups'): b.check_network_config(data['node_groups']) b.check_cluster_hostnames_lengths(data['name'], data['node_groups']) neutron_net_id = _get_cluster_field(data, 'neutron_management_network') if neutron_net_id: if not CONF.use_neutron: raise ex.InvalidException( _("'neutron_management_network' field can't be used " "with 'use_neutron=False'")) b.check_network_exists(neutron_net_id) else: if CONF.use_neutron: raise ex.NotFoundException('neutron_management_network', message=_("'%s' field is not found"))
def check_floatingip_pool_exists(ng_name, pool_id): network = None if CONF.use_neutron: network = nova.get_network(id=pool_id) else: for net in nova.client().floating_ip_pools.list(): if net.name == pool_id: network = net.name break if not network: raise ex.InvalidException( _("Floating IP pool %(pool)s for node group '%(group)s' " "not found") % { 'pool': pool_id, 'group': ng_name })
def decommission_cluster_instances(self, cluster, clusterspec, instances, ambari_info): raise exc.InvalidException(_('The HDP plugin does not support ' 'the decommissioning of nodes ' 'for HDP version 1.3.2'))
def check_job_binary_internal_exists(jbi_id): if not api.get_job_binary_internal(jbi_id): raise ex.InvalidException("JobBinaryInternal with id '%s'" " doesn't exist" % jbi_id)
def check_data_source_exists(data_source_id): if not api.get_data_source(data_source_id): raise ex.InvalidException("DataSource with id '%s'" " doesn't exist" % data_source_id)
def check_image_registered(image_id): if image_id not in [i.id for i in nova.client().images.list_registered()]: raise ex.InvalidException("Requested image '%s' is not registered" % image_id)
def check_plugin_supports_version(p_name, version): if version not in plugin_base.PLUGINS.get_plugin(p_name).get_versions(): raise ex.InvalidException("Requested plugin '%s' doesn't support" " version '%s'" % (p_name, version))
def check_plugin_name_exists(name): if name not in [p.name for p in api.get_plugins()]: raise ex.InvalidException("Sahara doesn't contain plugin with name " "'%s'" % name)
def check_node_group_template_exists(ng_tmpl_id): if not api.get_node_group_template(id=ng_tmpl_id): raise ex.InvalidException("NodeGroup template with id '%s'" " doesn't exist" % ng_tmpl_id)
def check_cluster_template_exists(cluster_template_id): if not api.get_cluster_template(id=cluster_template_id): raise ex.InvalidException("Cluster template with id '%s'" " doesn't exist" % cluster_template_id)