def __init__(self, request, *args, **kwargs): super(SelectNodeProcessesAction, self).__init__( request, *args, **kwargs) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) node_processes = {} try: version_details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) node_processes = version_details.node_processes except Exception: exceptions.handle(request, _("Unable to generate process choices.")) process_choices = [] for service, processes in node_processes.items(): for process in processes: choice_label = str(service) + ":" + str(process) process_choices.append((choice_label, process)) self.fields["processes"] = forms.MultipleChoiceField( label=_("Select Node Group Processes"), widget=CheckboxSelectMultiple(), choices=process_choices, required=True)
def _populate_image_choices(self, request, plugin, hadoop_version): all_images = saharaclient.image_list(request) details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) return [(image.id, image.name) for image in all_images if (set(details.required_image_tags).issubset(set(image.tags))) ]
def __init__(self, request, *args, **kwargs): super(SelectNodeProcessesAction, self).__init__(request, *args, **kwargs) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) node_processes = {} try: version_details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) node_processes = version_details.node_processes except Exception: exceptions.handle(request, _("Unable to generate process choices.")) process_choices = [] for service, processes in node_processes.items(): for process in processes: choice_label = str(service) + ":" + str(process) process_choices.append((choice_label, process)) self.fields["processes"] = forms.MultipleChoiceField( label=_("Select Node Group Processes"), widget=CheckboxSelectMultiple(), choices=process_choices, required=True)
def _populate_image_choices(self, request, plugin, hadoop_version): all_images = saharaclient.image_list(request) details = saharaclient.plugin_get_version_details(request, plugin, hadoop_version) return [(image.id, image.name) for image in all_images if (set(details.required_image_tags). issubset(set(image.tags)))]
def get_help_text(self): extra = dict() plugin_name, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(self.request)) extra["plugin_name"] = plugin_name extra["hadoop_version"] = hadoop_version plugin = saharaclient.plugin_get_version_details( self.request, plugin_name, hadoop_version) extra["deprecated"] = workflow_helpers.is_version_of_plugin_deprecated( plugin, hadoop_version) return super(GeneralConfigAction, self).get_help_text(extra)
def get_targeted_cluster_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details( self.request, plugin_name, hadoop_version) parameters = {} for service in plugin.node_processes.keys(): parameters[service] = self._extract_parameters( plugin.configs, 'cluster', service) return parameters
def get_targeted_cluster_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details(self.request, plugin_name, hadoop_version) parameters = {} for service in plugin.node_processes.keys(): parameters[service] = self._extract_parameters(plugin.configs, 'cluster', service) return parameters
def get_general_and_service_nodegroups_parameters(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details( self.request, plugin_name, hadoop_version) general_parameters = self._extract_parameters( plugin.configs, 'node', 'general') service_parameters = {} for service in plugin.node_processes.keys(): service_parameters[service] = self._extract_parameters( plugin.configs, 'node', service) return general_parameters, service_parameters
def populate_image_choices(self, request, context, empty_choice=False): try: all_images = saharaclient.image_list(request) plugin, hadoop_version = get_plugin_and_hadoop_version(request) details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) choices = [ (image.id, image.name) for image in all_images if (set(details.required_image_tags).issubset(set(image.tags))) ] except Exception: exceptions.handle(request, _("Unable to fetch image choices.")) choices = [] if empty_choice: choices = [(None, _('No image specified'))] + choices if not choices: choices.append(("", _("No Images Available"))) return choices
def populate_image_choices(self, request, context, empty_choice=False): try: all_images = saharaclient.image_list(request) plugin, hadoop_version = get_plugin_and_hadoop_version(request) details = saharaclient.plugin_get_version_details(request, plugin, hadoop_version) choices = [(image.id, image.name) for image in all_images if (set(details.required_image_tags). issubset(set(image.tags)))] except Exception: exceptions.handle(request, _("Unable to fetch image choices.")) choices = [] if empty_choice: choices = [(None, _('No image specified'))] + choices if not choices: choices.append(("", _("No Images Available"))) return choices
def get_general_node_group_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details( self.request, plugin_name, hadoop_version) return self._extract_parameters(plugin.configs, 'node', 'general')
def get_cluster_general_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details( self.request, plugin_name, hadoop_version) return self._extract_parameters(plugin.configs, 'cluster', "general")
def get_node_processes(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details( self.request, plugin_name, hadoop_version) return self._get_node_processes(plugin)
def get_general_node_group_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details(self.request, plugin_name, hadoop_version) return self._extract_parameters(plugin.configs, 'node', 'general')
def get_cluster_general_configs(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details(self.request, plugin_name, hadoop_version) return self._extract_parameters(plugin.configs, 'cluster', "general")
def __init__(self, request, context_seed, entry_point, *args, **kwargs): self.template_id = context_seed["template_id"] self.template = saharaclient.nodegroup_template_get(request, self.template_id) self._set_configs_to_copy(self.template.node_configs) plugin = self.template.plugin_name if saharaclient.VERSIONS.active == '2': version_attr = 'plugin_version' else: version_attr = 'hadoop_version' hadoop_version = getattr(self.template, version_attr) request.GET = request.GET.copy() request.GET.update( {"plugin_name": plugin, version_attr: hadoop_version}) super(CopyNodegroupTemplate, self).__init__(request, context_seed, entry_point, *args, **kwargs) g_fields = None snp_fields = None s_fields = None share_fields = None for step in self.steps: if isinstance(step, create_flow.GeneralConfig): g_fields = step.action.fields if isinstance(step, create_flow.SecurityConfig): s_fields = step.action.fields if isinstance(step, create_flow.SelectNodeProcesses): snp_fields = step.action.fields if isinstance(step, create_flow.SelectNodeGroupShares): share_fields = step.action.fields g_fields["nodegroup_name"].initial = self.template.name + "-copy" g_fields["description"].initial = self.template.description g_fields["flavor"].initial = self.template.flavor_id if hasattr(self.template, "availability_zone"): g_fields["availability_zone"].initial = ( self.template.availability_zone) if hasattr(self.template, "volumes_availability_zone"): g_fields["volumes_availability_zone"].initial = \ self.template.volumes_availability_zone storage = "cinder_volume" if self.template.volumes_per_node > 0 \ else "ephemeral_drive" volumes_per_node = self.template.volumes_per_node volumes_size = self.template.volumes_size volume_type = self.template.volume_type volume_local_to_instance = self.template.volume_local_to_instance g_fields["storage"].initial = storage g_fields["volumes_per_node"].initial = volumes_per_node g_fields["volumes_size"].initial = volumes_size g_fields["volumes_availability_zone"].initial = \ self.template.volumes_availability_zone g_fields['volume_type'].initial = volume_type g_fields['volume_local_to_instance'].initial = volume_local_to_instance g_fields["proxygateway"].initial = self.template.is_proxy_gateway g_fields["use_autoconfig"].initial = self.template.use_autoconfig g_fields["is_public"].initial = self.template.is_public g_fields['is_protected'].initial = self.template.is_protected g_fields["image"].initial = self.template.image_id if self.template.floating_ip_pool: g_fields['floating_ip_pool'].initial = ( self.template.floating_ip_pool) s_fields["security_autogroup"].initial = ( self.template.auto_security_group) if self.template.security_groups: s_fields["security_groups"].initial = dict( [(sg, sg) for sg in self.template.security_groups]) processes_dict = dict() try: plugin_details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) plugin_node_processes = plugin_details.node_processes except Exception: plugin_node_processes = dict() exceptions.handle(request, _("Unable to fetch plugin details.")) for process in self.template.node_processes: # need to know the service _service = None for service, processes in plugin_node_processes.items(): if process in processes: _service = service break processes_dict["%s:%s" % (_service, process)] = process snp_fields["processes"].initial = processes_dict if share_fields: share_fields["shares"].initial = ( self._get_share_defaults(share_fields))
def get_node_processes(self, plugin_name, hadoop_version): plugin = saharaclient.plugin_get_version_details(self.request, plugin_name, hadoop_version) return self._get_node_processes(plugin)
def __init__(self, request, context_seed, entry_point, *args, **kwargs): self.template_id = context_seed["template_id"] self.template = saharaclient.nodegroup_template_get( request, self.template_id) self._set_configs_to_copy(self.template.node_configs) plugin = self.template.plugin_name if saharaclient.VERSIONS.active == '2': version_attr = 'plugin_version' else: version_attr = 'hadoop_version' hadoop_version = getattr(self.template, version_attr) request.GET = request.GET.copy() request.GET.update({ "plugin_name": plugin, version_attr: hadoop_version }) super(CopyNodegroupTemplate, self).__init__(request, context_seed, entry_point, *args, **kwargs) g_fields = None snp_fields = None s_fields = None share_fields = None for step in self.steps: if isinstance(step, create_flow.GeneralConfig): g_fields = step.action.fields if isinstance(step, create_flow.SecurityConfig): s_fields = step.action.fields if isinstance(step, create_flow.SelectNodeProcesses): snp_fields = step.action.fields if isinstance(step, create_flow.SelectNodeGroupShares): share_fields = step.action.fields g_fields["nodegroup_name"].initial = self.template.name + "-copy" g_fields["description"].initial = self.template.description g_fields["flavor"].initial = self.template.flavor_id if hasattr(self.template, "availability_zone"): g_fields["availability_zone"].initial = ( self.template.availability_zone) if hasattr(self.template, "volumes_availability_zone"): g_fields["volumes_availability_zone"].initial = \ self.template.volumes_availability_zone storage = "cinder_volume" if self.template.volumes_per_node > 0 \ else "ephemeral_drive" volumes_per_node = self.template.volumes_per_node volumes_size = self.template.volumes_size volume_type = self.template.volume_type volume_local_to_instance = self.template.volume_local_to_instance g_fields["storage"].initial = storage g_fields["volumes_per_node"].initial = volumes_per_node g_fields["volumes_size"].initial = volumes_size g_fields["volumes_availability_zone"].initial = \ self.template.volumes_availability_zone g_fields['volume_type'].initial = volume_type g_fields['volume_local_to_instance'].initial = volume_local_to_instance g_fields["proxygateway"].initial = self.template.is_proxy_gateway g_fields["use_autoconfig"].initial = self.template.use_autoconfig g_fields["is_public"].initial = self.template.is_public g_fields['is_protected'].initial = self.template.is_protected g_fields["image"].initial = self.template.image_id if self.template.floating_ip_pool: g_fields['floating_ip_pool'].initial = ( self.template.floating_ip_pool) s_fields["security_autogroup"].initial = ( self.template.auto_security_group) if self.template.security_groups: s_fields["security_groups"].initial = dict([ (sg, sg) for sg in self.template.security_groups ]) processes_dict = dict() try: plugin_details = saharaclient.plugin_get_version_details( request, plugin, hadoop_version) plugin_node_processes = plugin_details.node_processes except Exception: plugin_node_processes = dict() exceptions.handle(request, _("Unable to fetch plugin details.")) for process in self.template.node_processes: # need to know the service _service = None for service, processes in plugin_node_processes.items(): if process in processes: _service = service break processes_dict["%s:%s" % (_service, process)] = process snp_fields["processes"].initial = processes_dict if share_fields: share_fields["shares"].initial = ( self._get_share_defaults(share_fields))