def test_create(self): flavor = self.flavors.first() ngt = self.nodegroup_templates.first() configs = self.plugins_configs.first() new_name = ngt.name + '-new' self.mox.StubOutWithMock( workflow_helpers, 'parse_configs_from_context') api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn([flavor]) api.sahara.plugin_get_version_details(IsA(http.HttpRequest), ngt.plugin_name, ngt.hadoop_version) \ .MultipleTimes().AndReturn(configs) api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \ .AndReturn([]) api.network.security_group_list(IsA(http.HttpRequest)) \ .AndReturn([]) workflow_helpers.parse_configs_from_context( IgnoreArg(), IgnoreArg()).AndReturn({}) api.sahara.nodegroup_template_create( IsA(http.HttpRequest), **{'name': new_name, 'plugin_name': ngt.plugin_name, 'hadoop_version': ngt.hadoop_version, 'description': ngt.description, 'flavor_id': flavor.id, 'volumes_per_node': None, 'volumes_size': None, 'volumes_availability_zone': None, 'node_processes': ['namenode'], 'node_configs': {}, 'floating_ip_pool': None, 'security_groups': [], 'auto_security_group': True, 'availability_zone': None}) \ .AndReturn(True) self.mox.ReplayAll() res = self.client.post( CREATE_URL, {'nodegroup_name': new_name, 'plugin_name': ngt.plugin_name, ngt.plugin_name + '_version': '1.2.1', 'hadoop_version': ngt.hadoop_version, 'description': ngt.description, 'flavor': flavor.id, 'availability_zone': None, 'storage': 'ephemeral_drive', 'volumes_per_node': 0, 'volumes_size': 0, 'volumes_availability_zone': None, 'floating_ip_pool': None, 'security_autogroup': True, 'processes': 'HDFS:namenode'}) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, INDEX_URL) self.assertMessageCount(success=1)
def test_create(self): flavor = self.flavors.first() ngt = self.nodegroup_templates.first() configs = self.plugins_configs.first() new_name = ngt.name + '-new' self.mox.StubOutWithMock(workflow_helpers, 'parse_configs_from_context') api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn([flavor]) api.sahara.plugin_get_version_details(IsA(http.HttpRequest), ngt.plugin_name, ngt.hadoop_version) \ .MultipleTimes().AndReturn(configs) api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \ .AndReturn([]) api.network.security_group_list(IsA(http.HttpRequest)) \ .AndReturn([]) workflow_helpers.parse_configs_from_context(IgnoreArg(), IgnoreArg()).AndReturn({}) api.sahara.nodegroup_template_create( IsA(http.HttpRequest), **{'name': new_name, 'plugin_name': ngt.plugin_name, 'hadoop_version': ngt.hadoop_version, 'description': ngt.description, 'flavor_id': flavor.id, 'volumes_per_node': None, 'volumes_size': None, 'node_processes': ['namenode'], 'node_configs': {}, 'floating_ip_pool': None, 'security_groups': [], 'auto_security_group': True, 'availability_zone': None}) \ .AndReturn(True) self.mox.ReplayAll() res = self.client.post( CREATE_URL, { 'nodegroup_name': new_name, 'plugin_name': ngt.plugin_name, ngt.plugin_name + '_version': '1.2.1', 'hadoop_version': ngt.hadoop_version, 'description': ngt.description, 'flavor': flavor.id, 'availability_zone': None, 'storage': 'ephemeral_drive', 'volumes_per_node': 0, 'volumes_size': 0, 'floating_ip_pool': None, 'security_autogroup': True, 'processes': 'HDFS:namenode' }) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, INDEX_URL) self.assertMessageCount(success=1)
def handle(self, request, context): try: node_groups = [] configs_dict = whelpers.parse_configs_from_context(context, self.defaults) ids = json.loads(context["ng_forms_ids"]) for id in ids: name = context["ng_group_name_" + str(id)] template_id = context["ng_template_id_" + str(id)] count = context["ng_count_" + str(id)] ng = {"name": name, "node_group_template_id": template_id, "count": count} node_groups.append(ng) plugin, hadoop_version = whelpers.get_plugin_and_hadoop_version(request) # TODO(nkonovalov): Fix client to support default_image_id saharaclient.cluster_template_create( request, context["general_cluster_template_name"], plugin, hadoop_version, context["general_description"], configs_dict, node_groups, context["anti_affinity_info"], ) return True except api_base.APIException as e: self.error_description = str(e) return False except Exception: exceptions.handle(request, _("Cluster template creation failed")) return False
def handle(self, request, context): try: processes = [] for service_process in context["general_processes"]: processes.append(str(service_process).split(":")[1]) configs_dict = ( workflow_helpers.parse_configs_from_context( context, self.defaults)) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) volumes_per_node = None volumes_size = None volumes_availability_zone = None if context["general_storage"] == "cinder_volume": volumes_per_node = context["general_volumes_per_node"] volumes_size = context["general_volumes_size"] volumes_availability_zone = \ context["general_volumes_availability_zone"] ngt = saharaclient.nodegroup_template_create( request, name=context["general_nodegroup_name"], plugin_name=plugin, hadoop_version=hadoop_version, description=context["general_description"], flavor_id=context["general_flavor"], volumes_per_node=volumes_per_node, volumes_size=volumes_size, volumes_availability_zone=volumes_availability_zone, node_processes=processes, node_configs=configs_dict, floating_ip_pool=context.get("general_floating_ip_pool"), security_groups=context["security_groups"], auto_security_group=context["security_autogroup"], is_proxy_gateway=context["general_proxygateway"], availability_zone=context["general_availability_zone"]) hlps = helpers.Helpers(request) if hlps.is_from_guide(): guide_type = context["general_guide_template_type"] request.session[guide_type + "_name"] = ( context["general_nodegroup_name"]) request.session[guide_type + "_id"] = ngt.id self.success_url = ( "horizon:project:data_processing.wizard:cluster_guide") return True except api_base.APIException as e: self.error_description = str(e) return False except Exception: exceptions.handle(request)
def handle(self, request, context): try: node_groups = [] configs_dict = whelpers.parse_configs_from_context(context, self.defaults) ids = json.loads(context['ng_forms_ids']) for id in ids: name = context['ng_group_name_' + str(id)] template_id = context['ng_template_id_' + str(id)] count = context['ng_count_' + str(id)] raw_ng = context.get("ng_serialized_" + str(id)) if raw_ng and raw_ng != 'null': ng = json.loads(base64.urlsafe_b64decode(str(raw_ng))) else: ng = dict() ng["name"] = name ng["count"] = count if template_id and template_id != u'None': ng["node_group_template_id"] = template_id node_groups.append(ng) plugin, hadoop_version = whelpers.\ get_plugin_and_hadoop_version(request) # TODO(nkonovalov): Fix client to support default_image_id saharaclient.cluster_template_create( request, context["general_cluster_template_name"], plugin, hadoop_version, context["general_description"], configs_dict, node_groups, context["anti_affinity_info"], ) hlps = helpers.Helpers(request) if hlps.is_from_guide(): request.session["guide_cluster_template_name"] = ( context["general_cluster_template_name"]) self.success_url = ( "horizon:project:data_processing.wizard:cluster_guide") return True except api_base.APIException as e: self.error_description = str(e) return False except Exception: exceptions.handle(request, _("Cluster template creation failed")) return False
def handle(self, request, context): try: processes = [] for service_process in context["general_processes"]: processes.append(str(service_process).split(":")[1]) configs_dict = ( workflow_helpers.parse_configs_from_context( context, self.defaults)) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) volumes_per_node = None volumes_size = None volumes_availability_zone = None if context["general_storage"] == "cinder_volume": volumes_per_node = context["general_volumes_per_node"] volumes_size = context["general_volumes_size"] volumes_availability_zone = \ context["general_volumes_availability_zone"] saharaclient.nodegroup_template_update( request=request, ngt_id=self.template_id, name=context["general_nodegroup_name"], plugin_name=plugin, hadoop_version=hadoop_version, flavor_id=context["general_flavor"], description=context["general_description"], volumes_per_node=volumes_per_node, volumes_size=volumes_size, volumes_availability_zone=volumes_availability_zone, node_processes=processes, node_configs=configs_dict, floating_ip_pool=context.get("general_floating_ip_pool"), security_groups=context["security_groups"], auto_security_group=context["security_autogroup"], availability_zone=context["general_availability_zone"]) return True except api_base.APIException as e: self.error_description = str(e.message) return False except Exception: exceptions.handle(request)
def handle(self, request, context): try: processes = [] for service_process in context["general_processes"]: processes.append(str(service_process).split(":")[1]) configs_dict = (workflow_helpers.parse_configs_from_context( context, self.defaults)) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) volumes_per_node = None volumes_size = None volumes_availability_zone = None if context["general_storage"] == "cinder_volume": volumes_per_node = context["general_volumes_per_node"] volumes_size = context["general_volumes_size"] volumes_availability_zone = \ context["general_volumes_availability_zone"] saharaclient.nodegroup_template_update( request=request, ngt_id=self.template_id, name=context["general_nodegroup_name"], plugin_name=plugin, hadoop_version=hadoop_version, flavor_id=context["general_flavor"], description=context["general_description"], volumes_per_node=volumes_per_node, volumes_size=volumes_size, volumes_availability_zone=volumes_availability_zone, node_processes=processes, node_configs=configs_dict, floating_ip_pool=context.get("general_floating_ip_pool"), security_groups=context["security_groups"], auto_security_group=context["security_autogroup"], availability_zone=context["general_availability_zone"]) return True except api_base.APIException as e: self.error_description = str(e.message) return False except Exception: exceptions.handle(request)
def handle(self, request, context): try: node_groups = [] configs_dict = whelpers.parse_configs_from_context(context, self.defaults) ids = json.loads(context['ng_forms_ids']) for id in ids: name = context['ng_group_name_' + str(id)] template_id = context['ng_template_id_' + str(id)] count = context['ng_count_' + str(id)] raw_ng = context.get("ng_serialized_" + str(id)) if raw_ng and raw_ng != 'null': ng = json.loads(base64.urlsafe_b64decode(str(raw_ng))) else: ng = dict() ng["name"] = name ng["count"] = count if template_id and template_id != u'None': ng["node_group_template_id"] = template_id node_groups.append(ng) plugin, hadoop_version = whelpers. \ get_plugin_and_hadoop_version(request) saharaclient.cluster_template_update( request=request, ct_id=self.cluster_template_id, name=context["general_cluster_template_name"], plugin_name=plugin, hadoop_version=hadoop_version, description=context["general_description"], cluster_configs=configs_dict, node_groups=node_groups, anti_affinity=context["anti_affinity_info"], ) return True except exceptions.Conflict as e: self.error_description = str(e) return False except Exception: exceptions.handle(request, _("Cluster template update failed")) return False
def handle(self, request, context): try: node_groups = [] configs_dict = whelpers.parse_configs_from_context(context, self.defaults) ids = json.loads(context['ng_forms_ids']) for id in ids: name = context['ng_group_name_' + str(id)] template_id = context['ng_template_id_' + str(id)] count = context['ng_count_' + str(id)] ng = {"name": name, "node_group_template_id": template_id, "count": count} node_groups.append(ng) plugin, hadoop_version = whelpers.\ get_plugin_and_hadoop_version(request) # TODO(nkonovalov): Fix client to support default_image_id saharaclient.cluster_template_create( request, context["general_cluster_template_name"], plugin, hadoop_version, context["general_description"], configs_dict, node_groups, context["anti_affinity_info"], ) return True except api_base.APIException as e: self.error_description = str(e) return False except Exception: exceptions.handle(request, _("Cluster template creation failed")) return False
def handle(self, request, context): try: processes = [] for service_process in context["general_processes"]: processes.append(str(service_process).split(":")[1]) configs_dict = whelpers.parse_configs_from_context(context, self.defaults) plugin, hadoop_version = whelpers.\ get_plugin_and_hadoop_version(request) volumes_per_node = None volumes_size = None if context["general_storage"] == "cinder_volume": volumes_per_node = context["general_volumes_per_node"] volumes_size = context["general_volumes_size"] saharaclient.nodegroup_template_create( request, name=context["general_nodegroup_name"], plugin_name=plugin, hadoop_version=hadoop_version, description=context["general_description"], flavor_id=context["general_flavor"], volumes_per_node=volumes_per_node, volumes_size=volumes_size, node_processes=processes, node_configs=configs_dict, floating_ip_pool=context.get("general_floating_ip_pool", None)) return True except api_base.APIException as e: self.error_description = str(e) return False except Exception: exceptions.handle(request)
def test_update(self): flavor = self.flavors.first() ngt = self.nodegroup_templates.first() configs = self.plugins_configs.first() new_name = ngt.name + '-updated' UPDATE_URL = reverse( 'horizon:project:data_processing.nodegroup_templates:edit', kwargs={'template_id': ngt.id}) self.mox.StubOutWithMock( workflow_helpers, 'parse_configs_from_context') api.cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones') \ .AndReturn(True) api.cinder.availability_zone_list(IsA(http.HttpRequest)) \ .AndReturn(self.availability_zones.list()) api.cinder.volume_type_list(IsA(http.HttpRequest))\ .AndReturn([]) api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn([flavor]) api.sahara.plugin_get_version_details(IsA(http.HttpRequest), ngt.plugin_name, ngt.hadoop_version) \ .MultipleTimes().AndReturn(configs) api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \ .AndReturn([]) api.network.security_group_list(IsA(http.HttpRequest)) \ .AndReturn([]) workflow_helpers.parse_configs_from_context( IgnoreArg(), IgnoreArg()).AndReturn({}) api.sahara.nodegroup_template_get(IsA(http.HttpRequest), ngt.id) \ .AndReturn(ngt) api.sahara.nodegroup_template_update( request=IsA(http.HttpRequest), ngt_id=ngt.id, name=new_name, plugin_name=ngt.plugin_name, hadoop_version=ngt.hadoop_version, flavor_id=flavor.id, description=ngt.description, volumes_per_node=None, volumes_size=None, volumes_availability_zone=None, node_processes=['namenode'], node_configs={}, floating_ip_pool=None, security_groups=[], auto_security_group=True, availability_zone=None).AndReturn(True) self.mox.ReplayAll() res = self.client.post( UPDATE_URL, {'ng_id': ngt.id, 'nodegroup_name': new_name, 'plugin_name': ngt.plugin_name, ngt.plugin_name + '_version': '1.2.1', 'hadoop_version': ngt.hadoop_version, 'description': ngt.description, 'flavor': flavor.id, 'availability_zone': None, 'storage': 'ephemeral_drive', 'volumes_per_node': 0, 'volumes_size': 0, 'volumes_availability_zone': None, 'floating_ip_pool': None, 'security_autogroup': True, 'processes': 'HDFS:namenode'}) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, INDEX_URL) self.assertMessageCount(success=1)
def test_update(self): flavor = self.flavors.first() ngt = self.nodegroup_templates.first() configs = self.plugins_configs.first() new_name = ngt.name + '-updated' UPDATE_URL = reverse( 'horizon:project:data_processing.nodegroup_templates:edit', kwargs={'template_id': ngt.id}) self.mox.StubOutWithMock( workflow_helpers, 'parse_configs_from_context') api.cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones') \ .AndReturn(True) api.cinder.availability_zone_list(IsA(http.HttpRequest)) \ .AndReturn(self.availability_zones.list()) api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn([flavor]) api.sahara.plugin_get_version_details(IsA(http.HttpRequest), ngt.plugin_name, ngt.hadoop_version) \ .MultipleTimes().AndReturn(configs) api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \ .AndReturn([]) api.network.security_group_list(IsA(http.HttpRequest)) \ .AndReturn([]) workflow_helpers.parse_configs_from_context( IgnoreArg(), IgnoreArg()).AndReturn({}) api.sahara.nodegroup_template_get(IsA(http.HttpRequest), ngt.id) \ .AndReturn(ngt) api.sahara.nodegroup_template_update( request=IsA(http.HttpRequest), ngt_id=ngt.id, name=new_name, plugin_name=ngt.plugin_name, hadoop_version=ngt.hadoop_version, flavor_id=flavor.id, description=ngt.description, volumes_per_node=None, volumes_size=None, volumes_availability_zone=None, node_processes=['namenode'], node_configs={}, floating_ip_pool=None, security_groups=[], auto_security_group=True, availability_zone=None).AndReturn(True) self.mox.ReplayAll() res = self.client.post( UPDATE_URL, {'ng_id': ngt.id, 'nodegroup_name': new_name, 'plugin_name': ngt.plugin_name, ngt.plugin_name + '_version': '1.2.1', 'hadoop_version': ngt.hadoop_version, 'description': ngt.description, 'flavor': flavor.id, 'availability_zone': None, 'storage': 'ephemeral_drive', 'volumes_per_node': 0, 'volumes_size': 0, 'volumes_availability_zone': None, 'floating_ip_pool': None, 'security_autogroup': True, 'processes': 'HDFS:namenode'}) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, INDEX_URL) self.assertMessageCount(success=1)