Ejemplo n.º 1
0
def populate_anti_affinity_choices(self, request, context):
    try:
        sahara = saharaclient.client(request)
        plugin, version = whelpers.get_plugin_and_hadoop_version(request)

        version_details = sahara.plugins.get_version_details(plugin, version)
        process_choices = []
        for processes in version_details.node_processes.values():
            for process in processes:
                process_choices.append((process, process))

        cluster_template_id = request.REQUEST.get("cluster_template_id", None)
        if cluster_template_id is None:
            selected_processes = request.REQUEST.get("aa_groups", [])
        else:
            cluster_template = (
                sahara.cluster_templates.get(cluster_template_id))
            selected_processes = cluster_template.anti_affinity

        checked_dict = dict()

        for process in selected_processes:
            checked_dict[process] = process

        self.fields['anti_affinity'].initial = checked_dict
    except Exception:
        process_choices = []
        exceptions.handle(request,
                          _("Unable to populate anti-affinity processes."))
    return process_choices
Ejemplo n.º 2
0
 def get_help_text(self):
     extra = dict()
     plugin, hadoop_version = (
         workflow_helpers.get_plugin_and_hadoop_version(self.request))
     extra["plugin_name"] = plugin
     extra["hadoop_version"] = hadoop_version
     return super(GeneralConfigAction, self).get_help_text(extra)
Ejemplo n.º 3
0
    def __init__(self, request, *args, **kwargs):
        super(SelectNodeProcessesAction,
              self).__init__(request, *args, **kwargs)

        plugin, hadoop_version = (
            workflow_helpers.get_plugin_and_hadoop_version(request))
        node_processes = {}
        try:
            version_details = saharaclient.plugin_get_version_details(
                request, plugin, hadoop_version)
            node_processes = version_details.node_processes
        except Exception:
            exceptions.handle(request,
                              _("Unable to generate process choices."))
        process_choices = []
        for service, processes in node_processes.items():
            for process in processes:
                choice_label = str(service) + ":" + str(process)
                process_choices.append((choice_label, process))

        self.fields["processes"] = forms.MultipleChoiceField(
            label=_("Select Node Group Processes"),
            widget=CheckboxSelectMultiple(),
            choices=process_choices,
            required=True)
Ejemplo n.º 4
0
    def __init__(self, request, *args, **kwargs):
        super(SelectNodeProcessesAction, self).__init__(
            request, *args, **kwargs)

        plugin, hadoop_version = (
            workflow_helpers.get_plugin_and_hadoop_version(request))
        node_processes = {}
        try:
            version_details = saharaclient.plugin_get_version_details(
                request, plugin, hadoop_version)
            node_processes = version_details.node_processes
        except Exception:
            exceptions.handle(request,
                              _("Unable to generate process choices."))
        process_choices = []
        for service, processes in node_processes.items():
            for process in processes:
                choice_label = str(service) + ":" + str(process)
                process_choices.append((choice_label, process))

        self.fields["processes"] = forms.MultipleChoiceField(
            label=_("Select Node Group Processes"),
            widget=CheckboxSelectMultiple(),
            choices=process_choices,
            required=True)
Ejemplo n.º 5
0
def populate_anti_affinity_choices(self, request, context):
    try:
        sahara = saharaclient.client(request)
        plugin, version = whelpers.get_plugin_and_hadoop_version(request)

        version_details = sahara.plugins.get_version_details(plugin, version)
        process_choices = []
        for processes in version_details.node_processes.values():
            for process in processes:
                process_choices.append((process, process))

        cluster_template_id = request.GET.get("cluster_template_id", None)
        if cluster_template_id is None:
            selected_processes = request.GET.get("aa_groups", [])
        else:
            cluster_template = (
                sahara.cluster_templates.get(cluster_template_id))
            selected_processes = cluster_template.anti_affinity

        checked_dict = dict()

        for process in selected_processes:
            checked_dict[process] = process

        self.fields['anti_affinity'].initial = checked_dict
    except Exception:
        process_choices = []
        exceptions.handle(request,
                          _("Unable to populate anti-affinity processes."))
    return process_choices
Ejemplo n.º 6
0
 def get_help_text(self):
     extra = dict()
     plugin_name, hadoop_version = (
         workflow_helpers.get_plugin_and_hadoop_version(self.request))
     extra["plugin_name"] = plugin_name
     extra["hadoop_version"] = hadoop_version
     plugin = saharaclient.plugin_get_version_details(
         self.request, plugin_name, hadoop_version)
     extra["deprecated"] = workflow_helpers.is_version_of_plugin_deprecated(
         plugin, hadoop_version)
     return super(GeneralConfigAction, self).get_help_text(extra)
Ejemplo n.º 7
0
 def get_help_text(self):
     extra = dict()
     plugin_name, hadoop_version = (
         workflow_helpers.get_plugin_and_hadoop_version(self.request))
     extra["plugin_name"] = plugin_name
     extra["hadoop_version"] = hadoop_version
     plugin = saharaclient.plugin_get_version_details(
         self.request, plugin_name, hadoop_version)
     extra["deprecated"] = workflow_helpers.is_version_of_plugin_deprecated(
         plugin, hadoop_version)
     return super(GeneralConfigAction, self).get_help_text(extra)
Ejemplo n.º 8
0
    def handle(self, request, context):
        node_groups = None

        plugin, hadoop_version = (
            whelpers.get_plugin_and_hadoop_version(request))

        ct_id = context["cluster_general_cluster_template"] or None
        user_keypair = context["cluster_general_keypair"] or None

        argument_ids = json.loads(context['argument_ids'])
        interface = {name: context["argument_" + str(arg_id)]
                     for arg_id, name in argument_ids.items()}

        try:
            cluster = saharaclient.cluster_create(
                request,
                context["cluster_general_cluster_name"],
                plugin, hadoop_version,
                cluster_template_id=ct_id,
                default_image_id=context["cluster_general_image"],
                description=context["cluster_general_description"],
                node_groups=node_groups,
                user_keypair_id=user_keypair,
                is_transient=not(context["cluster_general_persist_cluster"]),
                net_id=context.get(
                    "cluster_general_neutron_management_network",
                    None))
        except Exception:
            exceptions.handle(request,
                              _("Unable to create new cluster for job."))
            return False

        try:
            saharaclient.job_execution_create(
                request,
                context["job_general_job"],
                cluster.id,
                context["job_general_job_input"],
                context["job_general_job_output"],
                context["job_config"],
                interface,
                is_public=context['job_general_is_public'],
                is_protected=context['job_general_is_protected']
            )
        except Exception:
            exceptions.handle(request,
                              _("Unable to launch job."))
            return False
        return True
Ejemplo n.º 9
0
    def __init__(self, request, context_seed, entry_point, *args, **kwargs):
        hlps = helpers.Helpers(request)

        plugin, hadoop_version = (
            workflow_helpers.get_plugin_and_hadoop_version(request))

        general_parameters, service_parameters = \
            hlps.get_general_and_service_nodegroups_parameters(plugin,
                                                               hadoop_version)

        if saharaclient.base.is_service_enabled(request, 'share'):
            ConfigureNodegroupTemplate._register_step(self,
                                                      SelectNodeGroupShares)

        self._populate_tabs(general_parameters, service_parameters)

        super(ConfigureNodegroupTemplate,
              self).__init__(request, context_seed, entry_point, *args,
                             **kwargs)
Ejemplo n.º 10
0
    def __init__(self, request, context_seed, entry_point, *args, **kwargs):
        hlps = helpers.Helpers(request)

        plugin, hadoop_version = (
            workflow_helpers.get_plugin_and_hadoop_version(request))

        general_parameters, service_parameters = \
            hlps.get_general_and_service_nodegroups_parameters(plugin,
                                                               hadoop_version)

        if saharaclient.base.is_service_enabled(request, 'share'):
            ConfigureNodegroupTemplate._register_step(self,
                                                      SelectNodeGroupShares)

        self._populate_tabs(general_parameters, service_parameters)

        super(ConfigureNodegroupTemplate, self).__init__(request,
                                                         context_seed,
                                                         entry_point,
                                                         *args, **kwargs)
Ejemplo n.º 11
0
    def handle(self, request, context):
        try:
            processes = []
            for service_process in context["general_processes"]:
                processes.append(str(service_process).split(":")[1])

            configs_dict = (
                workflow_helpers.parse_configs_from_context(
                    context, self.defaults))

            plugin, hadoop_version = (
                workflow_helpers.get_plugin_and_hadoop_version(request))

            volumes_per_node = None
            volumes_size = None
            volumes_availability_zone = None
            volume_type = None
            volume_local_to_instance = False

            if context["general_storage"] == "cinder_volume":
                volumes_per_node = context["general_volumes_per_node"]
                volumes_size = context["general_volumes_size"]
                volumes_availability_zone = \
                    context["general_volumes_availability_zone"]
                volume_type = context["general_volume_type"]
                volume_local_to_instance = \
                    context["general_volume_local_to_instance"]

            ngt_shares = context.get('ngt_shares', [])

            image_id = context["general_image"] or None

            ngt = saharaclient.nodegroup_template_create(
                request,
                name=context["general_nodegroup_name"],
                plugin_name=plugin,
                hadoop_version=hadoop_version,
                description=context["general_description"],
                flavor_id=context["general_flavor"],
                volumes_per_node=volumes_per_node,
                volumes_size=volumes_size,
                volumes_availability_zone=volumes_availability_zone,
                volume_type=volume_type,
                volume_local_to_instance=volume_local_to_instance,
                node_processes=processes,
                node_configs=configs_dict,
                floating_ip_pool=context.get("general_floating_ip_pool"),
                security_groups=context["security_groups"],
                auto_security_group=context["security_autogroup"],
                is_proxy_gateway=context["general_proxygateway"],
                availability_zone=context["general_availability_zone"],
                use_autoconfig=context['general_use_autoconfig'],
                shares=ngt_shares,
                is_public=context['general_is_public'],
                is_protected=context['general_is_protected'],
                image_id=image_id)

            hlps = helpers.Helpers(request)
            if hlps.is_from_guide():
                guide_type = context["general_guide_template_type"]
                request.session[guide_type + "_name"] = (
                    context["general_nodegroup_name"])
                request.session[guide_type + "_id"] = ngt.id
                self.success_url = (
                    "horizon:project:data_processing.clusters:cluster_guide")

            return True
        except api_base.APIException as e:
            self.error_description = str(e)
            return False
        except Exception:
            exceptions.handle(request)
Ejemplo n.º 12
0
    def handle(self, request, context):
        try:
            processes = []
            for service_process in context["general_processes"]:
                processes.append(str(service_process).split(":")[1])

            configs_dict = (workflow_helpers.parse_configs_from_context(
                context, self.defaults))

            plugin, hadoop_version = (
                workflow_helpers.get_plugin_and_hadoop_version(request))

            volumes_per_node = None
            volumes_size = None
            volumes_availability_zone = None
            volume_type = None
            volume_local_to_instance = False

            if context["general_storage"] == "cinder_volume":
                volumes_per_node = context["general_volumes_per_node"]
                volumes_size = context["general_volumes_size"]
                volumes_availability_zone = \
                    context["general_volumes_availability_zone"]
                volume_type = context["general_volume_type"]
                volume_local_to_instance = \
                    context["general_volume_local_to_instance"]

            ngt_shares = context.get('ngt_shares', [])

            image_id = context["general_image"] or None

            ngt = saharaclient.nodegroup_template_create(
                request,
                name=context["general_nodegroup_name"],
                plugin_name=plugin,
                hadoop_version=hadoop_version,
                description=context["general_description"],
                flavor_id=context["general_flavor"],
                volumes_per_node=volumes_per_node,
                volumes_size=volumes_size,
                volumes_availability_zone=volumes_availability_zone,
                volume_type=volume_type,
                volume_local_to_instance=volume_local_to_instance,
                node_processes=processes,
                node_configs=configs_dict,
                floating_ip_pool=context.get("general_floating_ip_pool"),
                security_groups=context["security_groups"],
                auto_security_group=context["security_autogroup"],
                is_proxy_gateway=context["general_proxygateway"],
                availability_zone=context["general_availability_zone"],
                use_autoconfig=context['general_use_autoconfig'],
                shares=ngt_shares,
                is_public=context['general_is_public'],
                is_protected=context['general_is_protected'],
                image_id=image_id)

            hlps = helpers.Helpers(request)
            if hlps.is_from_guide():
                guide_type = context["general_guide_template_type"]
                request.session[guide_type +
                                "_name"] = (context["general_nodegroup_name"])
                request.session[guide_type + "_id"] = ngt.id
                self.success_url = (
                    "horizon:project:data_processing.clusters:cluster_guide")

            return True
        except api_base.APIException as e:
            self.error_description = str(e)
            return False
        except Exception:
            exceptions.handle(request)
Ejemplo n.º 13
0
    def __init__(self, request, *args, **kwargs):
        super(GeneralConfigAction, self).__init__(request, *args, **kwargs)

        hlps = helpers.Helpers(request)

        plugin, hadoop_version = (
            workflow_helpers.get_plugin_and_hadoop_version(request))

        if not saharaclient.SAHARA_AUTO_IP_ALLOCATION_ENABLED:
            pools = network.floating_ip_pools_list(request)
            pool_choices = [(pool.id, pool.name) for pool in pools]
            pool_choices.insert(0, (None, "Do not assign floating IPs"))

            self.fields['floating_ip_pool'] = forms.ChoiceField(
                label=_("Floating IP Pool"),
                choices=pool_choices,
                required=False)

        self.fields["use_autoconfig"] = forms.BooleanField(
            label=_("Auto-configure"),
            help_text=_("If selected, instances of a node group will be "
                        "automatically configured during cluster "
                        "creation. Otherwise you should manually specify "
                        "configuration values."),
            required=False,
            widget=forms.CheckboxInput(),
            initial=True,
        )

        self.fields["proxygateway"] = forms.BooleanField(
            label=_("Proxy Gateway"),
            widget=forms.CheckboxInput(),
            help_text=_("Sahara will use instances of this node group to "
                        "access other cluster instances."),
            required=False)

        self.fields['is_public'] = acl_utils.get_is_public_form(
            _("node group template"))
        self.fields['is_protected'] = acl_utils.get_is_protected_form(
            _("node group template"))

        self.fields["plugin_name"] = forms.CharField(
            widget=forms.HiddenInput(),
            initial=plugin
        )
        self.fields["hadoop_version"] = forms.CharField(
            widget=forms.HiddenInput(),
            initial=hadoop_version
        )
        node_parameters = hlps.get_general_node_group_configs(plugin,
                                                              hadoop_version)
        for param in node_parameters:
            self.fields[param.name] = workflow_helpers.build_control(param)

        # when we copy or edit a node group template then
        # request contains valuable info in both GET and POST methods
        req = request.GET.copy()
        req.update(request.POST)
        if req.get("guide_template_type"):
            self.fields["guide_template_type"] = forms.CharField(
                required=False,
                widget=forms.HiddenInput(),
                initial=req.get("guide_template_type"))

        try:
            volume_types = cinder.volume_type_list(request)
        except Exception:
            exceptions.handle(request,
                              _("Unable to get volume type list."))

        self.fields['volume_type'].choices = [(None, _("No volume type"))] + \
                                             [(type.name, type.name)
                                              for type in volume_types]
Ejemplo n.º 14
0
    def __init__(self, request, *args, **kwargs):
        super(GeneralConfigAction, self).__init__(request, *args, **kwargs)

        hlps = helpers.Helpers(request)

        plugin, hadoop_version = (
            workflow_helpers.get_plugin_and_hadoop_version(request))

        if not saharaclient.SAHARA_AUTO_IP_ALLOCATION_ENABLED:
            pools = neutron.floating_ip_pools_list(request)
            pool_choices = [(pool.id, pool.name) for pool in pools]
            pool_choices.insert(0, (None, "Do not assign floating IPs"))

            self.fields['floating_ip_pool'] = forms.ChoiceField(
                label=_("Floating IP Pool"),
                choices=pool_choices,
                required=False)

        self.fields["use_autoconfig"] = forms.BooleanField(
            label=_("Auto-configure"),
            help_text=_("If selected, instances of a node group will be "
                        "automatically configured during cluster "
                        "creation. Otherwise you should manually specify "
                        "configuration values."),
            required=False,
            widget=forms.CheckboxInput(),
            initial=True,
        )

        self.fields["proxygateway"] = forms.BooleanField(
            label=_("Proxy Gateway"),
            widget=forms.CheckboxInput(),
            help_text=_("Sahara will use instances of this node group to "
                        "access other cluster instances."),
            required=False)

        self.fields['is_public'] = acl_utils.get_is_public_form(
            _("node group template"))
        self.fields['is_protected'] = acl_utils.get_is_protected_form(
            _("node group template"))

        self.fields["plugin_name"] = forms.CharField(
            widget=forms.HiddenInput(), initial=plugin)
        self.fields["hadoop_version"] = forms.CharField(
            widget=forms.HiddenInput(), initial=hadoop_version)

        self.fields["storage"].choices = storage_choices(request)

        node_parameters = hlps.get_general_node_group_configs(
            plugin, hadoop_version)
        for param in node_parameters:
            self.fields[param.name] = workflow_helpers.build_control(param)

        # when we copy or edit a node group template then
        # request contains valuable info in both GET and POST methods
        req = request.GET.copy()
        req.update(request.POST)
        if req.get("guide_template_type"):
            self.fields["guide_template_type"] = forms.CharField(
                required=False,
                widget=forms.HiddenInput(),
                initial=req.get("guide_template_type"))

        if is_cinder_enabled(request):
            volume_types = cinder.volume_type_list(request)
        else:
            volume_types = []

        self.fields['volume_type'].choices = [(None, _("No volume type"))] + \
                                             [(type.name, type.name)
                                              for type in volume_types]
Ejemplo n.º 15
0
    def handle(self, request, context):
        try:
            processes = []
            for service_process in context["general_processes"]:
                processes.append(str(service_process).split(":")[1])

            configs_dict = (
                workflow_helpers.parse_configs_from_context(
                    context, self.defaults))

            plugin, hadoop_version = (
                workflow_helpers.get_plugin_and_hadoop_version(request))

            volumes_per_node = 0
            volumes_size = None
            volumes_availability_zone = None
            volume_type = None
            volume_local_to_instance = False

            if context["general_storage"] == "cinder_volume":
                volumes_per_node = context["general_volumes_per_node"]
                volumes_size = context["general_volumes_size"]
                volume_type = context["general_volume_type"]
                volume_local_to_instance = \
                    context["general_volume_local_to_instance"]
                volumes_availability_zone = \
                    context["general_volumes_availability_zone"]

            ngt_shares = context.get('ngt_shares', [])

            image_id = context["general_image"] or None

            args_dict = dict(
                request=request,
                ngt_id=self.template_id,
                name=context["general_nodegroup_name"],
                plugin_name=plugin,
                hadoop_version=hadoop_version,
                flavor_id=context["general_flavor"],
                description=context["general_description"],
                volumes_per_node=volumes_per_node,
                volumes_size=volumes_size,
                volume_type=volume_type,
                volume_local_to_instance=volume_local_to_instance,
                volumes_availability_zone=volumes_availability_zone,
                node_processes=processes,
                node_configs=configs_dict,
                floating_ip_pool=context.get("general_floating_ip_pool"),
                security_groups=context["security_groups"],
                auto_security_group=context["security_autogroup"],
                availability_zone=context["general_availability_zone"],
                use_autoconfig=context['general_use_autoconfig'],
                is_proxy_gateway=context["general_proxygateway"],
                shares=ngt_shares,
                is_public=context['general_is_public'],
                is_protected=context['general_is_protected'],
                image_id=image_id)

            if saharaclient.VERSIONS.active == '2':
                args_dict['boot_from_volume'] = (
                    context['general_boot_from_volume'])

            saharaclient.nodegroup_template_update(**args_dict)

            return True
        except api_base.APIException as e:
            self.error_description = str(e.message)
            return False
        except Exception:
            exceptions.handle(request)
Ejemplo n.º 16
0
    def __init__(self, request, *args, **kwargs):
        super(GeneralConfigAction, self).__init__(request, *args, **kwargs)

        hlps = helpers.Helpers(request)

        plugin, hadoop_version = (
            workflow_helpers.get_plugin_and_hadoop_version(request))

        if not saharaclient.SAHARA_FLOATING_IP_DISABLED:
            pools = neutron.floating_ip_pools_list(request)
            pool_choices = [(pool.id, pool.name) for pool in pools]
            pool_choices.insert(0, (None, "Do not assign floating IPs"))

            self.fields['floating_ip_pool'] = forms.ChoiceField(
                label=_("Floating IP Pool"),
                choices=pool_choices,
                required=False)

        self.fields["use_autoconfig"] = forms.BooleanField(
            label=_("Auto-configure"),
            help_text=_("If selected, instances of a node group will be "
                        "automatically configured during cluster "
                        "creation. Otherwise you should manually specify "
                        "configuration values."),
            required=False,
            widget=forms.CheckboxInput(),
            initial=True,
        )

        self.fields["proxygateway"] = forms.BooleanField(
            label=_("Proxy Gateway"),
            widget=forms.CheckboxInput(),
            help_text=_("Sahara will use instances of this node group to "
                        "access other cluster instances."),
            required=False)

        self.fields['is_public'] = acl_utils.get_is_public_form(
            _("node group template"))
        self.fields['is_protected'] = acl_utils.get_is_protected_form(
            _("node group template"))

        if saharaclient.VERSIONS.active == '2':
            self.fields['boot_storage'] = forms.ChoiceField(
                label=_("Boot storage location"),
                help_text=_("Choose a boot mode"),
                choices=storage_choices(request),
                widget=forms.Select(
                    attrs={
                        "class": "boot_storage_field switchable",
                        'data-slug': 'boot_storage_loc'
                    }))

            self.fields['boot_volume_type'] = forms.ChoiceField(
                label=_("Boot volume type"),
                required=False,
                widget=forms.Select(
                    attrs={
                        "class":
                        "boot_volume_type_field switched",
                        "data-switch-on":
                        "boot_storage_loc",
                        "data-boot_storage_loc-cinder_volume":
                        _('Boot volume type')
                    }))

            self.fields['boot_volume_local_to_instance'] = forms.BooleanField(
                label=_("Boot volume local to instance"),
                required=False,
                help_text=_("Boot volume locality"),
                widget=forms.CheckboxInput(
                    attrs={
                        "class":
                        "boot_volume_local_to_instance_field switched",
                        "data-switch-on":
                        "boot_storage_loc",
                        "data-boot_storage_loc-cinder_volume":
                        _('Boot volume local to instance')
                    }))

            self.fields['boot_volume_availability_zone'] = forms.ChoiceField(
                label=_("Boot volume availability Zone"),
                choices=self.populate_volumes_availability_zone_choices(
                    request, None),
                help_text=_("Create boot volume in this availability zone."),
                required=False,
                widget=forms.Select(
                    attrs={
                        "class":
                        "boot_volume_availability_zone_field switched",
                        "data-switch-on":
                        "boot_storage_loc",
                        "data-boot_storage_loc-cinder_volume":
                        _('Boot volume availability zone')
                    }))

        self.fields["plugin_name"] = forms.CharField(
            widget=forms.HiddenInput(), initial=plugin)
        self.fields["hadoop_version"] = forms.CharField(
            widget=forms.HiddenInput(), initial=hadoop_version)

        self.fields["storage"].choices = storage_choices(request)

        node_parameters = hlps.get_general_node_group_configs(
            plugin, hadoop_version)
        for param in node_parameters:
            self.fields[param.name] = workflow_helpers.build_control(param)

        # when we copy or edit a node group template then
        # request contains valuable info in both GET and POST methods
        req = request.GET.copy()
        req.update(request.POST)
        if req.get("guide_template_type"):
            self.fields["guide_template_type"] = forms.CharField(
                required=False,
                widget=forms.HiddenInput(),
                initial=req.get("guide_template_type"))

        if is_cinder_enabled(request):
            volume_types = cinder.volume_type_list(request)
        else:
            volume_types = []

        self.fields['volume_type'].choices = [(None, _("No volume type"))] + \
                                             [(type.name, type.name)
                                              for type in volume_types]

        if saharaclient.VERSIONS.active == '2':
            self.fields['boot_volume_type'].choices = (
                self.fields['volume_type'].choices)
Ejemplo n.º 17
0
    def __init__(self, request, *args, **kwargs):
        super(GeneralConfigAction, self).__init__(request, *args, **kwargs)

        hlps = helpers.Helpers(request)

        plugin, hadoop_version = (
            workflow_helpers.get_plugin_and_hadoop_version(request))

        if not saharaclient.SAHARA_FLOATING_IP_DISABLED:
            pools = neutron.floating_ip_pools_list(request)
            pool_choices = [(pool.id, pool.name) for pool in pools]
            pool_choices.insert(0, (None, "Do not assign floating IPs"))

            self.fields['floating_ip_pool'] = forms.ChoiceField(
                label=_("Floating IP Pool"),
                choices=pool_choices,
                required=False)

        self.fields["use_autoconfig"] = forms.BooleanField(
            label=_("Auto-configure"),
            help_text=_("If selected, instances of a node group will be "
                        "automatically configured during cluster "
                        "creation. Otherwise you should manually specify "
                        "configuration values."),
            required=False,
            widget=forms.CheckboxInput(),
            initial=True,
        )

        self.fields["proxygateway"] = forms.BooleanField(
            label=_("Proxy Gateway"),
            widget=forms.CheckboxInput(),
            help_text=_("Sahara will use instances of this node group to "
                        "access other cluster instances."),
            required=False)

        self.fields['is_public'] = acl_utils.get_is_public_form(
            _("node group template"))
        self.fields['is_protected'] = acl_utils.get_is_protected_form(
            _("node group template"))

        if saharaclient.VERSIONS.active == '2':
            self.fields['boot_storage'] = forms.ChoiceField(
                label=_("Boot storage location"),
                help_text=_("Choose a boot mode"),
                choices=storage_choices(request),
                widget=forms.Select(attrs={
                    "class": "boot_storage_field switchable",
                    'data-slug': 'boot_storage_loc'
                }))

            self.fields['boot_volume_type'] = forms.ChoiceField(
                label=_("Boot volume type"),
                required=False,
                widget=forms.Select(attrs={
                    "class": "boot_volume_type_field switched",
                    "data-switch-on": "boot_storage_loc",
                    "data-boot_storage_loc-cinder_volume":
                        _('Boot volume type')
                })
            )

            self.fields['boot_volume_local_to_instance'] = forms.BooleanField(
                label=_("Boot volume local to instance"),
                required=False,
                help_text=_("Boot volume locality"),
                widget=forms.CheckboxInput(attrs={
                    "class": "boot_volume_local_to_instance_field switched",
                    "data-switch-on": "boot_storage_loc",
                    "data-boot_storage_loc-cinder_volume":
                        _('Boot volume local to instance')
                })
            )

            self.fields['boot_volume_availability_zone'] = forms.ChoiceField(
                label=_("Boot volume availability Zone"),
                choices=self.populate_volumes_availability_zone_choices(
                    request, None),
                help_text=_("Create boot volume in this availability zone."),
                required=False,
                widget=forms.Select(attrs={
                    "class": "boot_volume_availability_zone_field switched",
                    "data-switch-on": "boot_storage_loc",
                    "data-boot_storage_loc-cinder_volume":
                        _('Boot volume availability zone')
                })
            )

        self.fields["plugin_name"] = forms.CharField(
            widget=forms.HiddenInput(),
            initial=plugin
        )
        self.fields["hadoop_version"] = forms.CharField(
            widget=forms.HiddenInput(),
            initial=hadoop_version
        )

        self.fields["storage"].choices = storage_choices(request)

        node_parameters = hlps.get_general_node_group_configs(plugin,
                                                              hadoop_version)
        for param in node_parameters:
            self.fields[param.name] = workflow_helpers.build_control(param)

        # when we copy or edit a node group template then
        # request contains valuable info in both GET and POST methods
        req = request.GET.copy()
        req.update(request.POST)
        if req.get("guide_template_type"):
            self.fields["guide_template_type"] = forms.CharField(
                required=False,
                widget=forms.HiddenInput(),
                initial=req.get("guide_template_type"))

        if is_cinder_enabled(request):
            volume_types = cinder.volume_type_list(request)
        else:
            volume_types = []

        self.fields['volume_type'].choices = [(None, _("No volume type"))] + \
                                             [(type.name, type.name)
                                              for type in volume_types]

        if saharaclient.VERSIONS.active == '2':
            self.fields['boot_volume_type'].choices = (
                self.fields['volume_type'].choices)