def get(self, request, *args, **kwargs): time_helpers = helpers.Helpers(request) cluster_id = kwargs.get("cluster_id") need_update, not_done_count, checks = False, 0, [] mapping_to_label_type = {'red': 'danger', 'yellow': 'warning', 'green': 'success', 'checking': 'info'} try: cluster = saharaclient.cluster_get(request, cluster_id) for check in self._get_checks(cluster): check['label'] = mapping_to_label_type.get( check['status'].lower()) if not check['description']: check['description'] = _("No description") if check['status'] == self._status_in_progress: not_done_count += 1 check['duration'] = time_helpers.get_duration( check['created_at'], check['updated_at']) checks.append(check) except APIException: need_update = False checks = [] if not_done_count > 0: need_update = True context = {"checks": checks, "need_update": need_update} return HttpResponse(json.dumps(context), content_type='application/json')
def get_data(self, job_execution): status = job_execution.info["status"] end_time = None if status in [k for k, v in JobsTable.STATUS_CHOICES]: end_time = job_execution.updated_at return helpers.Helpers(None).get_duration(job_execution.created_at, end_time)
def handle(self, request, context): main_locations = [] lib_locations = [] for k in context.keys(): if k.startswith('lib_'): lib_locations.append(context.get(k)) if context.get("main_binary", None): job_type = context.get("job_type", None) if job_type not in ["Java", "MapReduce"]: main_locations.append(context["main_binary"]) argument_ids = json.loads(context['argument_ids']) interface = [{ "name": context['argument_name_' + str(arg_id)], "description": (context['argument_description_' + str(arg_id)] or None), "mapping_type": context['argument_mapping_type_' + str(arg_id)], "location": context['argument_location_' + str(arg_id)], "value_type": context['argument_value_type_' + str(arg_id)], "required": context['argument_required_' + str(arg_id)], "default": (context['argument_default_value_' + str(arg_id)] or None) } for arg_id in argument_ids] try: job = saharaclient.job_create(request, context["job_name"], context["job_type"], main_locations, lib_locations, context["job_description"], interface=interface, is_public=context['is_public'], is_protected=context['is_protected']) hlps = helpers.Helpers(request) if hlps.is_from_guide(): request.session["guide_job_id"] = job.id request.session["guide_job_type"] = context["job_type"] request.session["guide_job_name"] = context["job_name"] self.success_url = ( "horizon:project:data_processing.wizard:jobex_guide") return True except Exception: exceptions.handle(request) return False
def handle(self, request, context): try: hlps = helpers.Helpers(request) hlps.reset_guide() plugin_name = context["plugin_name"] request.session["plugin_name"] = plugin_name request.session["plugin_version"] = (context[plugin_name + "_version"]) messages.success(request, _("Cluster type chosen")) return True except Exception: exceptions.handle(request, _("Unable to set cluster type")) return False
def handle(self, request, context): try: hlps = helpers.Helpers(request) job_type = context["guide_job_type"] if force_text( request.session.get("guide_job_type")) != (force_text( helpers.JOB_TYPE_MAP[job_type][0])): hlps.reset_job_guide() request.session["guide_job_type"] = ( helpers.JOB_TYPE_MAP[job_type][0]) messages.success(request, _("Job type chosen")) return True except Exception: exceptions.handle(request, _("Unable to set job type")) return False
def __init__(self, request, context_seed, entry_point, *args, **kwargs): hlps = helpers.Helpers(request) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) general_parameters, service_parameters = \ hlps.get_general_and_service_nodegroups_parameters(plugin, hadoop_version) if saharaclient.base.is_service_enabled(request, 'share'): ConfigureNodegroupTemplate._register_step(self, SelectNodeGroupShares) self._populate_tabs(general_parameters, service_parameters) super(ConfigureNodegroupTemplate, self).__init__(request, context_seed, entry_point, *args, **kwargs)
def handle(self, request, context): s3_credentials = {} if context["general_data_source_type"] == "s3": if context.get("general_data_source_credential_accesskey", None): s3_credentials["accesskey"] = context[ "general_data_source_credential_accesskey"] if context.get("general_data_source_credential_secretkey", None): s3_credentials["secretkey"] = context[ "general_data_source_credential_secretkey"] if context.get("general_data_source_credential_endpoint", None): s3_credentials["endpoint"] = context[ "general_data_source_credential_endpoint"] s3_credentials["bucket_in_path"] = context[ "general_data_source_credential_s3_bucket_in_path"] s3_credentials["ssl"] = context[ "general_data_source_credential_s3_ssl"] s3_credentials = s3_credentials or None try: self.object = saharaclient.data_source_create( request, context["general_data_source_name"], context["general_data_source_description"], context["general_data_source_type"], context["source_url"], context.get("general_data_source_credential_user", None), context.get("general_data_source_credential_pass", None), is_public=context['general_is_public'], is_protected=context['general_is_protected'], s3_credentials=s3_credentials) hlps = helpers.Helpers(request) if hlps.is_from_guide(): request.session["guide_datasource_id"] = self.object.id request.session["guide_datasource_name"] = self.object.name self.success_url = ( "horizon:project:data_processing.wizard:jobex_guide") return True except Exception: exceptions.handle(request) return False
def __init__(self, request, context_seed, entry_point, *args, **kwargs): ConfigureClusterTemplate._cls_registry = [] hlps = helpers.Helpers(request) plugin, hadoop_version = whelpers.\ get_plugin_and_hadoop_version(request) general_parameters = hlps.get_cluster_general_configs( plugin, hadoop_version) service_parameters = hlps.get_targeted_cluster_configs( plugin, hadoop_version) if saharaclient.base.is_service_enabled(request, 'share'): ConfigureClusterTemplate._register_step(self, SelectClusterShares) if saharaclient.base.is_service_enabled(request, 'dns'): ConfigureClusterTemplate._register_step(self, SelectDnsDomains) self._populate_tabs(general_parameters, service_parameters) super(ConfigureClusterTemplate, self).__init__(request, context_seed, entry_point, *args, **kwargs)
def handle(self, request, context): try: self.object = saharaclient.data_source_create( request, context["general_data_source_name"], context["general_data_source_description"], context["general_data_source_type"], context["source_url"], context.get("general_data_source_credential_user", None), context.get("general_data_source_credential_pass", None), is_public=context['general_is_public'], is_protected=context['general_is_protected']) hlps = helpers.Helpers(request) if hlps.is_from_guide(): request.session["guide_datasource_id"] = self.object.id request.session["guide_datasource_name"] = self.object.name self.success_url = ( "horizon:project:data_processing.wizard:jobex_guide") return True except Exception: exceptions.handle(request) return False
def handle(self, request, context): try: processes = [] for service_process in context["general_processes"]: processes.append(str(service_process).split(":")[1]) configs_dict = (workflow_helpers.parse_configs_from_context( context, self.defaults)) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) volumes_per_node = None volumes_size = None volumes_availability_zone = None volume_type = None volume_local_to_instance = False if context["general_storage"] == "cinder_volume": volumes_per_node = context["general_volumes_per_node"] volumes_size = context["general_volumes_size"] volumes_availability_zone = \ context["general_volumes_availability_zone"] volume_type = context["general_volume_type"] volume_local_to_instance = \ context["general_volume_local_to_instance"] ngt_shares = context.get('ngt_shares', []) image_id = context["general_image"] or None ngt = saharaclient.nodegroup_template_create( request, name=context["general_nodegroup_name"], plugin_name=plugin, hadoop_version=hadoop_version, description=context["general_description"], flavor_id=context["general_flavor"], volumes_per_node=volumes_per_node, volumes_size=volumes_size, volumes_availability_zone=volumes_availability_zone, volume_type=volume_type, volume_local_to_instance=volume_local_to_instance, node_processes=processes, node_configs=configs_dict, floating_ip_pool=context.get("general_floating_ip_pool"), security_groups=context["security_groups"], auto_security_group=context["security_autogroup"], is_proxy_gateway=context["general_proxygateway"], availability_zone=context["general_availability_zone"], use_autoconfig=context['general_use_autoconfig'], shares=ngt_shares, is_public=context['general_is_public'], is_protected=context['general_is_protected'], image_id=image_id) hlps = helpers.Helpers(request) if hlps.is_from_guide(): guide_type = context["general_guide_template_type"] request.session[guide_type + "_name"] = (context["general_nodegroup_name"]) request.session[guide_type + "_id"] = ngt.id self.success_url = ( "horizon:project:data_processing.clusters:cluster_guide") return True except api_base.APIException as e: self.error_description = str(e) return False except Exception: exceptions.handle(request)
def __init__(self, request, *args, **kwargs): super(GeneralConfigAction, self).__init__(request, *args, **kwargs) hlps = helpers.Helpers(request) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) if not saharaclient.SAHARA_AUTO_IP_ALLOCATION_ENABLED: pools = neutron.floating_ip_pools_list(request) pool_choices = [(pool.id, pool.name) for pool in pools] pool_choices.insert(0, (None, "Do not assign floating IPs")) self.fields['floating_ip_pool'] = forms.ChoiceField( label=_("Floating IP Pool"), choices=pool_choices, required=False) self.fields["use_autoconfig"] = forms.BooleanField( label=_("Auto-configure"), help_text=_("If selected, instances of a node group will be " "automatically configured during cluster " "creation. Otherwise you should manually specify " "configuration values."), required=False, widget=forms.CheckboxInput(), initial=True, ) self.fields["proxygateway"] = forms.BooleanField( label=_("Proxy Gateway"), widget=forms.CheckboxInput(), help_text=_("Sahara will use instances of this node group to " "access other cluster instances."), required=False) self.fields['is_public'] = acl_utils.get_is_public_form( _("node group template")) self.fields['is_protected'] = acl_utils.get_is_protected_form( _("node group template")) self.fields["plugin_name"] = forms.CharField( widget=forms.HiddenInput(), initial=plugin) self.fields["hadoop_version"] = forms.CharField( widget=forms.HiddenInput(), initial=hadoop_version) self.fields["storage"].choices = storage_choices(request) node_parameters = hlps.get_general_node_group_configs( plugin, hadoop_version) for param in node_parameters: self.fields[param.name] = workflow_helpers.build_control(param) # when we copy or edit a node group template then # request contains valuable info in both GET and POST methods req = request.GET.copy() req.update(request.POST) if req.get("guide_template_type"): self.fields["guide_template_type"] = forms.CharField( required=False, widget=forms.HiddenInput(), initial=req.get("guide_template_type")) if is_cinder_enabled(request): volume_types = cinder.volume_type_list(request) else: volume_types = [] self.fields['volume_type'].choices = [(None, _("No volume type"))] + \ [(type.name, type.name) for type in volume_types]
def get(self, request, *args, **kwargs): if kwargs["reset_cluster_guide"]: hlps = helpers.Helpers(request) hlps.reset_guide() return http.HttpResponseRedirect(reverse_lazy(self.pattern_name))
def get(self, request, *args, **kwargs): cluster_id = kwargs.get("cluster_id") time_helpers = helpers.Helpers(request) try: cluster = saharaclient.cluster_get(request, cluster_id, show_progress=True) node_group_mapping = {} for node_group in cluster.node_groups: node_group_mapping[node_group["id"]] = node_group["name"] provision_steps = cluster.provision_progress # Sort by create time provision_steps = sorted(provision_steps, key=ClusterEventsView._created_at_key, reverse=True) for step in provision_steps: # Sort events of the steps also step["events"] = sorted(step["events"], key=ClusterEventsView._created_at_key, reverse=True) successful_events_count = 0 for event in step["events"]: if event["node_group_id"]: event["node_group_name"] = node_group_mapping[ event["node_group_id"]] event_result = _("Unknown") if event["successful"] is True: successful_events_count += 1 event_result = _("Completed Successfully") elif event["successful"] is False: event_result = _("Failed") event["result"] = event_result if not event["event_info"]: event["event_info"] = _("No info available") step["duration"] = time_helpers.get_duration( step["created_at"], step["updated_at"]) step['started_at'] = time_helpers.to_time_zone( step["created_at"], localize=True) result = _("In progress") step["completed"] = successful_events_count if step["successful"] is True: step["completed"] = step["total"] result = _("Completed Successfully") elif step["successful"] is False: result = _("Failed") step["result"] = result status = cluster.status.lower() need_update = status not in ("active", "error") except APIException: # Cluster is not available. Returning empty event log. need_update = False provision_steps = [] context = {"provision_steps": provision_steps, "need_update": need_update} return HttpResponse(json.dumps(context), content_type='application/json')
def handle(self, request, context): try: node_groups = [] configs_dict = whelpers.parse_configs_from_context( context, self.defaults) ids = json.loads(context['ng_forms_ids']) for id in ids: name = context['ng_group_name_' + str(id)] template_id = context['ng_template_id_' + str(id)] count = context['ng_count_' + str(id)] raw_ng = context.get("ng_serialized_" + str(id)) if raw_ng and raw_ng != 'null': ng = json.loads(utils.deserialize(str(raw_ng))) else: ng = dict() ng["name"] = name ng["count"] = count if template_id and template_id != u'None': ng["node_group_template_id"] = template_id node_groups.append(ng) plugin, hadoop_version = whelpers.\ get_plugin_and_hadoop_version(request) ct_shares = [] if "ct_shares" in context: ct_shares = context["ct_shares"] domain = context.get('dns_domain_name', None) if domain == 'None': domain = None # TODO(nkonovalov): Fix client to support default_image_id saharaclient.cluster_template_create( request, context["general_cluster_template_name"], plugin, hadoop_version, context["general_description"], configs_dict, node_groups, context["anti_affinity_info"], use_autoconfig=context['general_use_autoconfig'], shares=ct_shares, is_public=context['general_is_public'], is_protected=context['general_is_protected'], domain_name=domain) hlps = helpers.Helpers(request) if hlps.is_from_guide(): request.session["guide_cluster_template_name"] = ( context["general_cluster_template_name"]) self.success_url = ( "horizon:project:data_processing.clusters:cluster_guide") return True except api_base.APIException as e: self.error_description = str(e) return False except Exception: exceptions.handle(request, _("Cluster template creation failed")) return False
def get_success_url(self): hlps = helpers.Helpers(self.request) if hlps.is_from_guide(): self.success_url = reverse_lazy( "horizon:project:data_processing.wizard:jobex_guide") return self.success_url
def get_data(self, cluster): return helpers.Helpers(None).get_duration(cluster.created_at)
def __init__(self, request, *args, **kwargs): super(GeneralConfigAction, self).__init__(request, *args, **kwargs) hlps = helpers.Helpers(request) plugin, hadoop_version = ( workflow_helpers.get_plugin_and_hadoop_version(request)) if not saharaclient.SAHARA_FLOATING_IP_DISABLED: pools = neutron.floating_ip_pools_list(request) pool_choices = [(pool.id, pool.name) for pool in pools] pool_choices.insert(0, (None, "Do not assign floating IPs")) self.fields['floating_ip_pool'] = forms.ChoiceField( label=_("Floating IP Pool"), choices=pool_choices, required=False) self.fields["use_autoconfig"] = forms.BooleanField( label=_("Auto-configure"), help_text=_("If selected, instances of a node group will be " "automatically configured during cluster " "creation. Otherwise you should manually specify " "configuration values."), required=False, widget=forms.CheckboxInput(), initial=True, ) self.fields["proxygateway"] = forms.BooleanField( label=_("Proxy Gateway"), widget=forms.CheckboxInput(), help_text=_("Sahara will use instances of this node group to " "access other cluster instances."), required=False) self.fields['is_public'] = acl_utils.get_is_public_form( _("node group template")) self.fields['is_protected'] = acl_utils.get_is_protected_form( _("node group template")) if saharaclient.VERSIONS.active == '2': self.fields['boot_storage'] = forms.ChoiceField( label=_("Boot storage location"), help_text=_("Choose a boot mode"), choices=storage_choices(request), widget=forms.Select( attrs={ "class": "boot_storage_field switchable", 'data-slug': 'boot_storage_loc' })) self.fields['boot_volume_type'] = forms.ChoiceField( label=_("Boot volume type"), required=False, widget=forms.Select( attrs={ "class": "boot_volume_type_field switched", "data-switch-on": "boot_storage_loc", "data-boot_storage_loc-cinder_volume": _('Boot volume type') })) self.fields['boot_volume_local_to_instance'] = forms.BooleanField( label=_("Boot volume local to instance"), required=False, help_text=_("Boot volume locality"), widget=forms.CheckboxInput( attrs={ "class": "boot_volume_local_to_instance_field switched", "data-switch-on": "boot_storage_loc", "data-boot_storage_loc-cinder_volume": _('Boot volume local to instance') })) self.fields['boot_volume_availability_zone'] = forms.ChoiceField( label=_("Boot volume availability Zone"), choices=self.populate_volumes_availability_zone_choices( request, None), help_text=_("Create boot volume in this availability zone."), required=False, widget=forms.Select( attrs={ "class": "boot_volume_availability_zone_field switched", "data-switch-on": "boot_storage_loc", "data-boot_storage_loc-cinder_volume": _('Boot volume availability zone') })) self.fields["plugin_name"] = forms.CharField( widget=forms.HiddenInput(), initial=plugin) self.fields["hadoop_version"] = forms.CharField( widget=forms.HiddenInput(), initial=hadoop_version) self.fields["storage"].choices = storage_choices(request) node_parameters = hlps.get_general_node_group_configs( plugin, hadoop_version) for param in node_parameters: self.fields[param.name] = workflow_helpers.build_control(param) # when we copy or edit a node group template then # request contains valuable info in both GET and POST methods req = request.GET.copy() req.update(request.POST) if req.get("guide_template_type"): self.fields["guide_template_type"] = forms.CharField( required=False, widget=forms.HiddenInput(), initial=req.get("guide_template_type")) if is_cinder_enabled(request): volume_types = cinder.volume_type_list(request) else: volume_types = [] self.fields['volume_type'].choices = [(None, _("No volume type"))] + \ [(type.name, type.name) for type in volume_types] if saharaclient.VERSIONS.active == '2': self.fields['boot_volume_type'].choices = ( self.fields['volume_type'].choices)