def __init__(self, request, *args, **kwargs): super(SelectNodeProcessesAction, self).__init__( request, *args, **kwargs) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) node_processes = {} try: version_details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) node_processes = version_details.node_processes except Exception: exceptions.handle(request, _("Unable to generate process choices.")) process_choices = [] for service, processes in node_processes.items(): for process in processes: choice_label = str(service) + ":" + str(process) process_choices.append((choice_label, process)) self.fields["processes"] = forms.MultipleChoiceField( label=_("Select Node Group Processes"), widget=CheckboxSelectMultiple(), choices=process_choices, required=True)
def __init__(self, request, *args, **kwargs): super(GeneralConfigAction, self).__init__(request, *args, **kwargs) hlps = helpers.Helpers(request) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) process_choices = [] try: version_details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) for service, processes in version_details.node_processes.items(): for process in processes: process_choices.append( (str(service) + ":" + str(process), process)) except Exception: exceptions.handle(request, _("Unable to generate process choices.")) if not saharaclient.SAHARA_AUTO_IP_ALLOCATION_ENABLED: pools = network.floating_ip_pools_list(request) pool_choices = [(pool.id, pool.name) for pool in pools] pool_choices.insert(0, (None, "Do not assign floating IPs")) self.fields['floating_ip_pool'] = forms.ChoiceField( label=_("Floating IP Pool"), choices=pool_choices, required=False) self.fields["proxygateway"] = forms.BooleanField( label=_("Proxy Gateway"), widget=forms.CheckboxInput(), help_text=_("Sahara will use instances of this node group to " "access other cluster instances."), required=False) self.fields["processes"] = forms.MultipleChoiceField( label=_("Processes"), widget=forms.CheckboxSelectMultiple(), help_text=_("Processes to be launched in node group"), choices=process_choices) self.fields["plugin_name"] = forms.CharField( widget=forms.HiddenInput(), initial=plugin) self.fields["hadoop_version"] = forms.CharField( widget=forms.HiddenInput(), initial=hadoop_version) node_parameters = hlps.get_general_node_group_configs( plugin, hadoop_version) for param in node_parameters: self.fields[param.name] = workflow_helpers.build_control(param) if request.REQUEST.get("guide_template_type"): self.fields["guide_template_type"] = forms.CharField( required=False, widget=forms.HiddenInput(), initial=request.REQUEST.get("guide_template_type"))
def get_targeted_cluster_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details(self.request, plugin_name, hadoop_version) parameters = {} for service in plugin.node_processes.keys(): parameters[service] = self._extract_parameters(plugin.configs, 'cluster', service) return parameters
def __init__(self, request, *args, **kwargs): super(GeneralConfigAction, self).__init__(request, *args, **kwargs) hlps = helpers.Helpers(request) plugin, hadoop_version = workflow_helpers.get_plugin_and_hadoop_version(request) process_choices = [] try: version_details = saharaclient.plugin_get_version_details(request, plugin, hadoop_version) for service, processes in version_details.node_processes.items(): for process in processes: process_choices.append((str(service) + ":" + str(process), process)) except Exception: exceptions.handle(request, _("Unable to generate process choices.")) if not saharaclient.SAHARA_AUTO_IP_ALLOCATION_ENABLED: pools = network.floating_ip_pools_list(request) pool_choices = [(pool.id, pool.name) for pool in pools] pool_choices.insert(0, (None, "Do not assign floating IPs")) self.fields["floating_ip_pool"] = forms.ChoiceField( label=_("Floating IP Pool"), choices=pool_choices, required=False ) self.fields["proxygateway"] = forms.BooleanField( label=_("Proxy Gateway"), widget=forms.CheckboxInput(), help_text=_("Sahara will use instances of this node group to " "access other cluster instances."), required=False, ) self.fields["processes"] = forms.MultipleChoiceField( label=_("Processes"), widget=forms.CheckboxSelectMultiple(), help_text=_("Processes to be launched in node group"), choices=process_choices, ) self.fields["plugin_name"] = forms.CharField(widget=forms.HiddenInput(), initial=plugin) self.fields["hadoop_version"] = forms.CharField(widget=forms.HiddenInput(), initial=hadoop_version) node_parameters = hlps.get_general_node_group_configs(plugin, hadoop_version) for param in node_parameters: self.fields[param.name] = workflow_helpers.build_control(param) if request.REQUEST.get("guide_template_type"): self.fields["guide_template_type"] = forms.CharField( required=False, widget=forms.HiddenInput(), initial=request.REQUEST.get("guide_template_type") )
def populate_image_choices(self, request, context): try: all_images = saharaclient.image_list(request) plugin, hadoop_version = whelpers.\ get_plugin_and_hadoop_version(request) details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) choices = [ (image.id, image.name) for image in all_images if (set(details.required_image_tags).issubset(set(image.tags))) ] except Exception: exceptions.handle(request, _("Unable to fetch image choices.")) choices = [] if not choices: choices.append(("", _("No Images Available"))) return choices
def populate_image_choices(self, request, context): try: all_images = saharaclient.image_list(request) plugin, hadoop_version = whelpers.\ get_plugin_and_hadoop_version(request) details = saharaclient.plugin_get_version_details(request, plugin, hadoop_version) choices = [(image.id, image.name) for image in all_images if (set(details.required_image_tags). issubset(set(image.tags)))] except Exception: exceptions.handle(request, _("Unable to fetch image choices.")) choices = [] if not choices: choices.append(("", _("No Images Available"))) return choices
def __init__(self, request, context_seed, entry_point, *args, **kwargs): self.template_id = context_seed["template_id"] self.template = saharaclient.nodegroup_template_get( request, self.template_id) self._set_configs_to_copy(self.template.node_configs) plugin = self.template.plugin_name hadoop_version = self.template.hadoop_version request.GET = request.GET.copy() request.GET.update({ "plugin_name": plugin, "hadoop_version": hadoop_version }) super(CopyNodegroupTemplate, self).__init__(request, context_seed, entry_point, *args, **kwargs) g_fields = None s_fields = None for step in self.steps: if isinstance(step, create_flow.GeneralConfig): g_fields = step.action.fields if isinstance(step, create_flow.SecurityConfig): s_fields = step.action.fields g_fields["nodegroup_name"].initial = self.template.name + "-copy" g_fields["description"].initial = self.template.description g_fields["flavor"].initial = self.template.flavor_id if hasattr(self.template, "availability_zone"): g_fields["availability_zone"].initial = ( self.template.availability_zone) if hasattr(self.template, "volumes_availability_zone"): g_fields["volumes_availability_zone"].initial = \ self.template.volumes_availability_zone storage = "cinder_volume" if self.template.volumes_per_node > 0 \ else "ephemeral_drive" volumes_per_node = self.template.volumes_per_node volumes_size = self.template.volumes_size g_fields["storage"].initial = storage g_fields["volumes_per_node"].initial = volumes_per_node g_fields["volumes_size"].initial = volumes_size g_fields["volumes_availability_zone"].initial = \ self.template.volumes_availability_zone if self.template.floating_ip_pool: g_fields['floating_ip_pool'].initial = ( self.template.floating_ip_pool) s_fields["security_autogroup"].initial = ( self.template.auto_security_group) if self.template.security_groups: s_fields["security_groups"].initial = dict([ (sg, sg) for sg in self.template.security_groups ]) processes_dict = dict() try: plugin_details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) plugin_node_processes = plugin_details.node_processes except Exception: plugin_node_processes = dict() exceptions.handle(request, _("Unable to fetch plugin details.")) for process in self.template.node_processes: # need to know the service _service = None for service, processes in plugin_node_processes.items(): if process in processes: _service = service break processes_dict["%s:%s" % (_service, process)] = process g_fields["processes"].initial = processes_dict
def __init__(self, request, context_seed, entry_point, *args, **kwargs): self.template_id = context_seed["template_id"] self.template = saharaclient.nodegroup_template_get(request, self.template_id) self._set_configs_to_copy(self.template.node_configs) plugin = self.template.plugin_name hadoop_version = self.template.hadoop_version request.GET = request.GET.copy() request.GET.update({"plugin_name": plugin, "hadoop_version": hadoop_version}) super(CopyNodegroupTemplate, self).__init__(request, context_seed, entry_point, *args, **kwargs) g_fields = None snp_fields = None s_fields = None for step in self.steps: if isinstance(step, create_flow.GeneralConfig): g_fields = step.action.fields if isinstance(step, create_flow.SecurityConfig): s_fields = step.action.fields if isinstance(step, create_flow.SelectNodeProcesses): snp_fields = step.action.fields g_fields["nodegroup_name"].initial = self.template.name + "-copy" g_fields["description"].initial = self.template.description g_fields["flavor"].initial = self.template.flavor_id if hasattr(self.template, "availability_zone"): g_fields["availability_zone"].initial = self.template.availability_zone if hasattr(self.template, "volumes_availability_zone"): g_fields["volumes_availability_zone"].initial = self.template.volumes_availability_zone storage = "cinder_volume" if self.template.volumes_per_node > 0 else "ephemeral_drive" volumes_per_node = self.template.volumes_per_node volumes_size = self.template.volumes_size volume_type = self.template.volume_type volume_local_to_instance = self.template.volume_local_to_instance g_fields["storage"].initial = storage g_fields["volumes_per_node"].initial = volumes_per_node g_fields["volumes_size"].initial = volumes_size g_fields["volumes_availability_zone"].initial = self.template.volumes_availability_zone g_fields["volume_type"].initial = volume_type g_fields["volume_local_to_instance"].initial = volume_local_to_instance g_fields["proxygateway"].initial = self.template.is_proxy_gateway g_fields["use_autoconfig"].initial = self.template.use_autoconfig if self.template.floating_ip_pool: g_fields["floating_ip_pool"].initial = self.template.floating_ip_pool s_fields["security_autogroup"].initial = self.template.auto_security_group if self.template.security_groups: s_fields["security_groups"].initial = dict([(sg, sg) for sg in self.template.security_groups]) processes_dict = dict() try: plugin_details = saharaclient.plugin_get_version_details(request, plugin, hadoop_version) plugin_node_processes = plugin_details.node_processes except Exception: plugin_node_processes = dict() exceptions.handle(request, _("Unable to fetch plugin details.")) for process in self.template.node_processes: # need to know the service _service = None for service, processes in plugin_node_processes.items(): if process in processes: _service = service break processes_dict["%s:%s" % (_service, process)] = process snp_fields["processes"].initial = processes_dict
def get_general_node_group_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details(self.request, plugin_name, hadoop_version) return self._extract_parameters(plugin.configs, 'node', 'general')
def get_cluster_general_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details(self.request, plugin_name, hadoop_version) return self._extract_parameters(plugin.configs, 'cluster', "general")
def get_node_processes(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details(self.request, plugin_name, hadoop_version) return self._get_node_processes(plugin)