def check_cluster_create(data, **kwargs): b.check_cluster_unique_name(data['name']) b.check_plugin_name_exists(data['plugin_name']) b.check_plugin_supports_version(data['plugin_name'], data['hadoop_version']) if data.get('cluster_template_id'): ct_id = data['cluster_template_id'] b.check_cluster_template_exists(ct_id) if not data.get('node_groups'): b.check_node_groups_in_cluster_templates(data['plugin_name'], data['hadoop_version'], ct_id) if data.get('user_keypair_id'): b.check_keypair_exists(data['user_keypair_id']) default_image_id = _get_cluster_field(data, 'default_image_id') if default_image_id: b.check_image_registered(default_image_id) b.check_required_image_tags(data['plugin_name'], data['hadoop_version'], default_image_id) else: raise ex.NotFoundException('default_image_id', "'%s' field is not found") b.check_all_configurations(data) if data.get('anti_affinity'): b.check_node_processes(data['plugin_name'], data['hadoop_version'], data['anti_affinity']) if data.get('node_groups'): b.check_network_config(data['node_groups'])
def check_cluster_create(data, **kwargs): b.check_cluster_unique_name(data["name"]) b.check_plugin_name_exists(data["plugin_name"]) b.check_plugin_supports_version(data["plugin_name"], data["hadoop_version"]) if data.get("cluster_template_id"): ct_id = data["cluster_template_id"] b.check_cluster_template_exists(ct_id) if not data.get("node_groups"): b.check_node_groups_in_cluster_templates(data["plugin_name"], data["hadoop_version"], ct_id) if data.get("user_keypair_id"): b.check_keypair_exists(data["user_keypair_id"]) default_image_id = _get_cluster_field(data, "default_image_id") if default_image_id: b.check_image_registered(default_image_id) b.check_required_image_tags(data["plugin_name"], data["hadoop_version"], default_image_id) else: raise ex.NotFoundException("default_image_id", "'%s' field is not found") b.check_all_configurations(data) if data.get("anti_affinity"): b.check_node_processes(data["plugin_name"], data["hadoop_version"], data["anti_affinity"]) if data.get("node_groups"): b.check_network_config(data["node_groups"])
def check_cluster_scaling(data, cluster_id, **kwargs): cluster = api.get_cluster(id=cluster_id) if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'scale_cluster') and ( plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'decommission_nodes'))): raise ex.InvalidException( "Requested plugin '%s' doesn't support cluster scaling feature" % cluster.plugin_name) if cluster.status != 'Active': raise ex.InvalidException("Cluster cannot be scaled not in 'Active' " "status. Cluster status: " + cluster.status) if data.get("resize_node_groups"): b.check_resize(cluster, data['resize_node_groups']) if data.get("add_node_groups"): b.check_add_node_groups(cluster, data['add_node_groups']) b.check_network_config(data['add_node_groups'])