Beispiel #1
0
def fit_results_to_dict(fit_results, min_bound=None, max_bound=None):
    """Create a JSON-comparable dict from a FitResults object.

    Arguments:
        fit_results (FitResults): object containing fit parameters,\
            errors and type
        min_bound: optional min value to add to dictionary if min isn't\
            a fit parameter.
        max_bound: optional max value to add to dictionary if max isn't\
            a fit parameter.

    Returns:
        JSON-compatible dictionary with fit results

    Note:
        Supported fit types: 'norm', 'expon', 'uniform'
    """
    type_map = {'norm': 'normal', 'expon': 'exponential', 'uniform': 'uniform'}
    param_map = {'uniform': lambda p: [('min', p[0]), ('max', p[0] + p[1])],
                 'norm': lambda p: [('mu', p[0]), ('sigma', p[1])],
                 'expon': lambda p: [('lambda', 1.0 / p[1])]}

    d = OrderedDict({'type': type_map[fit_results.type]})
    d.update(param_map[fit_results.type](fit_results.params))

    if min_bound is not None and 'min' not in d:
        d['min'] = min_bound
    if max_bound is not None and 'max' not in d:
        d['max'] = max_bound

    return d
Beispiel #2
0
    def _generate_group_task(self, endpoint, fw_group):
        user = self._playhouse.user
        access_key = user.get_playbook_vars('access_key')
        secret_access_key = user.get_playbook_vars('secret_access_key')

        tags = [
            fw_group['groupName'],
            'nifcloud_fw',
            'nifcloud_fw_group',
            'nifcloud_fw_group_{}'.format(fw_group['groupName']),
        ]

        task = InspiredTask()
        task.template = 'nifcloud/default.task.fw_group.yml.j2'
        task.content = OrderedDict((
            ('name', 'create fw {}'.format(fw_group['groupName'])),
            ('local_action',
             OrderedDict((
                 ('module', 'nifcloud_fw'),
                 ('access_key', access_key),
                 ('secret_access_key', secret_access_key),
                 ('endpoint', endpoint),
                 ('group_name', fw_group['groupName']),
                 ('availability_zone', fw_group['availabilityZone']),
                 ('log_limit', fw_group['groupLogLimit']),
                 ('state', 'present'),
                 ('purge_ip_permissions', False),
             ))),
            ('tags', tags),
        ))

        return task
Beispiel #3
0
def fit_results_to_dict(fit_results, min_bound=None, max_bound=None):
    '''Create a JSON-comparable dict from a FitResults object

    Parameters:
        fit_results (FitResults): object containing fit parameters,\
            errors and type
        min_bound: optional min value to add to dictionary if min isn't\
            a fit parameter.
        max_bound: optional max value to add to dictionary if max isn't\
            a fit parameter.

    Returns:
        JSON-compatible dictionary with fit results

    Note:
        Supported fit types: 'norm', 'expon', 'uniform'
    '''

    type_map = {'norm': 'normal', 'expon': 'exponential', 'uniform': 'uniform'}
    param_map = {'uniform': lambda p: [('min', p[0]), ('max', p[0] + p[1])],
                 'norm': lambda p: [('mu', p[0]), ('sigma', p[1])],
                 'expon': lambda p: [('lambda', 1.0 / p[1])]}

    d = OrderedDict({'type': type_map[fit_results.type]})
    d.update(param_map[fit_results.type](fit_results.params))

    if min_bound is not None and 'min' not in d:
        d['min'] = min_bound
    if max_bound is not None and 'max' not in d:
        d['max'] = max_bound

    return d
Beispiel #4
0
    def __init__(
        self,
        config_file=None,
        cluster_label=None,  # args.cluster_template
        fail_on_file_absence=False,
        fail_on_error=None,
        cluster_name=None,
        auto_refresh=True,
        enforce_version=True,
        skip_load_json_config=False,
    ):
        """
        Initialize object, from file, from a CFN Stack or from the internal mapping.

        NOTE: The class tries to parse the config file (the default one, if not specified) to get AWS credentials

        # "From file" initialization parameters:
        :param config_file: if specified the initialization of the sections will start from the file
        :param cluster_label: the label associated to a [cluster ...] section in the file
        :param fail_on_file_absence: initialization will fail if the specified file or a default one doesn't exist
        :param fail_on_error: tells if initialization must fail in presence of errors. If not set, the behaviour will
        depend on sanity_check parameter in conf file
        # "From Stack" initialization parameters:
        :param cluster_name: the cluster name associated to a running Stack,
        if specified the initialization will start from the running Stack
        :param auto_refresh: if set, refresh() method will be called every time something changes in the structure of
        the configuration, like a section being added, removed or renamed.
        :param enforce_version: when True enforces the CLI version to be of the same version as the cluster the user
        is interacting with.
        """
        self.__autorefresh = False  # Initialization in progress
        self.fail_on_error = fail_on_error
        self.cfn_stack = None
        self.__sections = OrderedDict({})
        self.__enforce_version = enforce_version
        self.__skip_load_json_config = skip_load_json_config

        # always parse the configuration file if there, to get AWS section
        self._init_config_parser(config_file, fail_on_file_absence)
        # init AWS section
        self.__init_section_from_file(AWS, self.config_parser)
        self.__init_region()
        self.__init_aws_credentials()

        # init pcluster_config object, from cfn or from config_file
        if cluster_name:
            self.cluster_name = cluster_name
            self.__init_sections_from_cfn(cluster_name)
        else:
            self.__init_sections_from_file(cluster_label, self.config_parser,
                                           fail_on_file_absence)

        # Load instance types data if available
        self.__init_additional_instance_types_data()

        self.__autorefresh = auto_refresh  # Initialization completed

        # Refresh sections and parameters
        self._config_updated()
Beispiel #5
0
    def _check_file(self, f):
        """Run tests on a morphology file."""
        L.info('File: %s', f)

        full_result = True
        full_summary = OrderedDict()
        try:
            data = load_data(f)
        except Exception as e:  # pylint: disable=W0703
            L.error('Failed to load data... skipping tests for this file')
            L.error(e.args)
            return False, {f: OrderedDict([('ALL', False)])}

        try:
            result, summary = self._check_loop(data, 'structural_checks')
            full_result &= result
            full_summary.update(summary)

            nrn = fst_core.FstNeuron(data)
            result, summary = self._check_loop(nrn, 'neuron_checks')
            full_result &= result
            full_summary.update(summary)
        except Exception as e:  # pylint: disable=W0703
            L.error('Check failed: %s', str(type(e)) + str(e.args))
            full_result = False

        full_summary['ALL'] = full_result

        for m, s in full_summary.items():
            self._log_msg(m, s)

        return full_result, {f: full_summary}
Beispiel #6
0
    def _genelate_role_nifcloud(self):
        role_nifcloud = self.inspiration[
            'role_nifcloud'] if 'role_nifcloud' in self.inspiration else 'nifcloud'

        role = InspiredRole()
        role.content = OrderedDict(
            (('role', role_nifcloud), ('tags', ['role_nifcloud']),
             ('when', 'nifcloud_role_exec is defined and nifcloud_role_exec')))
        return role
Beispiel #7
0
    def refresh(self):
        """
        Reload the sections structure and refresh all configuration sections and parameters.

        This method must be called if structural configuration changes have been applied, like updating a section
        label, adding or removing a section etc.
        """
        # Rebuild the new sections structure
        new_sections = OrderedDict({})
        for key, sections in self.__sections.items():
            new_sections_map = OrderedDict({})
            for _, section in sections.items():
                new_sections_map[section.label] = section
            new_sections[key] = new_sections_map
        self.__sections = new_sections

        # Refresh all sections
        for _, sections in self.__sections.items():
            for _, section in sections.items():
                section.refresh()
Beispiel #8
0
    def _check_loop(self, obj, check_mod_str):
        """Run all the checks in a check_module."""
        check_module = self._check_modules[check_mod_str]
        checks = self._config['checks'][check_mod_str]
        result = True
        summary = OrderedDict()
        for check in checks:
            ok = self._do_check(obj, check_module, check)
            summary[ok.title] = ok.status
            result &= ok.status

        return result, summary
Beispiel #9
0
    def _check_file(self, f):
        '''Run tests on a morphology file'''
        L.info('File: %s', f)

        full_result = True
        full_summary = OrderedDict()
        try:
            data = load_data(f)
        except Exception as e:  # pylint: disable=W0703
            L.error('Failed to load data... skipping tests for this file')
            L.error(e.args)
            return False, {f: OrderedDict([('ALL', False)])}

        try:
            result, summary = self._check_loop(data, 'structural_checks')
            full_result &= result
            full_summary.update(summary)

            nrn = fst_core.FstNeuron(data)
            result, summary = self._check_loop(nrn, 'neuron_checks')
            full_result &= result
            full_summary.update(summary)
        except Exception as e:  # pylint: disable=W0703
            L.error('Check failed: %s', str(type(e)) + str(e.args))
            full_result = False

        full_summary['ALL'] = full_result

        for m, s in full_summary.items():
            self._log_msg(m, s)

        return full_result, {f: full_summary}
Beispiel #10
0
    def write_data(self, file_descr):
        """
        Format data from self.data_df, write into file_descr (opened with opener).
        """
        columns = self.data_df.columns
        col0 = columns[0]  # First column title (usually Database)
        col1 = columns[1]  # Second column title (usually Collection or Table)
        tmpl_variables = OrderedDict()
        for db in self.data_df[col0].unique():
            tmpl_variables[db] = OrderedDict()
            df_db = self.data_df.query('{} == @db'.format(col0)).iloc[:, 1:]
            for col in df_db[col1].unique():
                df_col = df_db.query('{} == @col'.format(col1)).iloc[:, 1:]
                tmpl_variables[db][col] = df_col.values.tolist()

        tmpl_filename = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                     'resources', 'data_dict.tmpl')
        with open(tmpl_filename) as tmpl_fd:
            tmpl = jinja2.Template(tmpl_fd.read())

        file_descr.write(tmpl.render(col_titles=list(self.data_df)[2:],
                                     data=tmpl_variables))
Beispiel #11
0
def _parse_sqs_messages(messages, table):
    update_events = OrderedDict()
    for message in messages:
        message_text = json.loads(message.body)
        message_attrs = json.loads(message_text.get("Message"))

        event_type = message_attrs.get("Event")
        if not event_type:
            log.warning("Unable to read message. Deleting.")
            message.delete()
            continue

        instance_id = message_attrs.get("EC2InstanceId")
        if event_type == "parallelcluster:COMPUTE_READY":
            log.info("Processing COMPUTE_READY event for instance %s",
                     instance_id)
            update_event = _process_compute_ready_event(message_attrs, message)
        elif event_type == "autoscaling:EC2_INSTANCE_TERMINATE":
            log.info("Processing EC2_INSTANCE_TERMINATE event for instance %s",
                     instance_id)
            update_event = _process_instance_terminate_event(
                message_attrs, message, table)
        else:
            log.info("Unsupported event type %s. Discarding message." %
                     event_type)
            update_event = None

        if update_event:
            hostname = update_event.host.hostname
            if hostname in update_events:
                # delete first to preserve messages order in dict
                del update_events[hostname]
            update_events[hostname] = update_event
        else:
            # discarding message
            log.warning("Discarding message %s", message)
            message.delete()

    return update_events.values()
Beispiel #12
0
    def __init__(
        self,
        config_file=None,
        cluster_label=None,  # args.cluster_template
        fail_on_file_absence=False,
        fail_on_error=None,
        cluster_name=None,
    ):
        """
        Initialize object, from file, from a CFN Stack or from the internal mapping.

        NOTE: The class tries to parse the config file (the default one, if not specified) to get AWS credentials

        # "From file" initialization parameters:
        :param config_file: if specified the initialization of the sections will start from the file
        :param cluster_label: the label associated to a [cluster ...] section in the file
        :param fail_on_file_absence: initialization will fail if the specified file or a default one doesn't exist
        :param fail_on_error: tells if initialization must fail in presence of errors. If not set, the behaviour will
        depend on sanity_check parameter in conf file
        # "From Stack" initialization parameters:
        :param cluster_name: the cluster name associated to a running Stack,
        if specified the initialization will start from the running Stack
        """
        self.fail_on_error = fail_on_error
        self.sections = OrderedDict({})

        # always parse the configuration file if there, to get AWS section
        self._init_config_parser(config_file, fail_on_file_absence)
        # init AWS section
        self.__init_section_from_file(AWS, self.config_parser)
        self.__init_region()
        self.__init_aws_credentials()

        # init pcluster_config object, from cfn or from config_file
        if cluster_name:
            self.__init_sections_from_cfn(cluster_name)
        else:
            self.__init_sections_from_file(cluster_label, self.config_parser,
                                           fail_on_file_absence)
Beispiel #13
0
    def add_section(self, section):
        """
        Add a section to the PclusterConfig object.

        The internal sections structure is a dictionary:
        {
            "ebs" :{"ebs1": Section, "ebs2": Section},
            "vpc" :{"default": Section}
        }

        :param section, a Section object
        """
        if section.key not in self.__sections:
            self.__sections[section.key] = OrderedDict({})

        section_label = section.label if section.label else section.definition.get(
            "default_label", "default")
        self.__sections[section.key][section_label] = section
        self._config_updated()
GLOBAL = {
    "type":
    CfnSection,
    "key":
    "global",
    "params":
    OrderedDict([
        (
            "cluster_template",
            {
                # TODO This could be a SettingsParam referring to a CLUSTER section
                "default": "default",
                "update_policy": UpdatePolicy.IGNORED
            }),
        ("update_check", {
            "type": BoolCfnParam,
            "default": True,
            "update_policy": UpdatePolicy.IGNORED
        }),
        ("sanity_check", {
            "type": BoolCfnParam,
            "default": True,
            "update_policy": UpdatePolicy.IGNORED
        }),
    ])
}

ALIASES = {
    "type": CfnSection,
    "key": "aliases",
    "params": {
Beispiel #15
0
class PclusterConfig(object):
    """
    Class to manage the configuration of a cluster created (or to create) with ParallelCluster.

    This class contains a dictionary of sections associated to the given cluster
    """
    def __init__(
        self,
        config_file=None,
        cluster_label=None,  # args.cluster_template
        fail_on_file_absence=False,
        fail_on_error=None,
        cluster_name=None,
        auto_refresh=True,
        enforce_version=True,
        skip_load_json_config=False,
    ):
        """
        Initialize object, from file, from a CFN Stack or from the internal mapping.

        NOTE: The class tries to parse the config file (the default one, if not specified) to get AWS credentials

        # "From file" initialization parameters:
        :param config_file: if specified the initialization of the sections will start from the file
        :param cluster_label: the label associated to a [cluster ...] section in the file
        :param fail_on_file_absence: initialization will fail if the specified file or a default one doesn't exist
        :param fail_on_error: tells if initialization must fail in presence of errors. If not set, the behaviour will
        depend on sanity_check parameter in conf file
        # "From Stack" initialization parameters:
        :param cluster_name: the cluster name associated to a running Stack,
        if specified the initialization will start from the running Stack
        :param auto_refresh: if set, refresh() method will be called every time something changes in the structure of
        the configuration, like a section being added, removed or renamed.
        :param enforce_version: when True enforces the CLI version to be of the same version as the cluster the user
        is interacting with.
        """
        self.__autorefresh = False  # Initialization in progress
        self.fail_on_error = fail_on_error
        self.cfn_stack = None
        self.__sections = OrderedDict({})
        self.__enforce_version = enforce_version
        self.__skip_load_json_config = skip_load_json_config

        # always parse the configuration file if there, to get AWS section
        self._init_config_parser(config_file, fail_on_file_absence)
        # init AWS section
        self.__init_section_from_file(AWS, self.config_parser)
        self.__init_region()
        self.__init_aws_credentials()

        # init pcluster_config object, from cfn or from config_file
        if cluster_name:
            self.cluster_name = cluster_name
            self.__init_sections_from_cfn(cluster_name)
        else:
            self.__init_sections_from_file(cluster_label, self.config_parser,
                                           fail_on_file_absence)

        # Load instance types data if available
        self.__init_additional_instance_types_data()

        self.__autorefresh = auto_refresh  # Initialization completed

        # Refresh sections and parameters
        self._config_updated()

    def _init_config_parser(self,
                            config_file,
                            fail_on_config_file_absence=True):
        """
        Parse the config file and initialize config_file and config_parser attributes.

        :param config_file: The config file to parse
        :param fail_on_config_file_absence: set to true to raise SystemExit if config file doesn't exist
        """
        if config_file:
            self.config_file = config_file
            default_config = False
        elif "AWS_PCLUSTER_CONFIG_FILE" in os.environ:
            self.config_file = os.environ["AWS_PCLUSTER_CONFIG_FILE"]
            default_config = False
        else:
            self.config_file = default_config_file_path()
            default_config = True

        self.config_file = str(self.config_file)

        if not os.path.isfile(self.config_file):
            if fail_on_config_file_absence:
                error_message = "Configuration file {0} not found.".format(
                    self.config_file)
                if default_config:
                    error_message += (
                        "\nYou can execute the 'pcluster configure' command "
                        "or see https://docs.aws.amazon.com/parallelcluster/latest/ug/configuration.html"
                    )
                self.error(error_message)
            else:
                LOGGER.debug("Specified configuration file %s doesn't exist.",
                             self.config_file)
        else:
            LOGGER.debug("Parsing configuration file %s", self.config_file)
        self.config_parser = configparser.ConfigParser(
            inline_comment_prefixes=("#", ";"))
        try:
            self.config_parser.read(self.config_file)
        except (configparser.ParsingError,
                configparser.DuplicateOptionError) as e:
            self.error("Error parsing configuration file {0}.\n{1}".format(
                self.config_file, str(e)))

    @staticmethod
    def get_global_section_keys():
        """Return the keys associated to the global sections, not related to the cluster one."""
        return ["aws", "aliases", "global"]

    def get_section_keys(self,
                         include_global_sections=False,
                         excluded_keys=None):
        """Return the section keys."""
        excluded_keys = excluded_keys or []
        if not include_global_sections:
            excluded_keys += self.get_global_section_keys()

        section_keys = [
            section_key for section_key in self.__sections.keys()
            if section_key not in excluded_keys
        ]
        return section_keys

    def _get_file_section_names(self):
        """Return the names of the sections as represented in the configuration file."""
        file_section_names = []
        for section_key, sections in self.__sections.items():
            for _, section in sections.items():
                file_section_names.append(
                    get_file_section_name(section_key, section.label))

        return file_section_names

    def get_sections(self, section_key):
        """
        Get the Section(s) identified by the given key.

        Example of output:
        {
            "ebs" : {
                "ebs1": Section, "ebs2": Section
            }
        }

        :param section_key: the identifier of the section type
        :return a dictionary containing the section
        """
        return self.__sections.get(section_key, {})

    def get_section(self, section_key, section_label=None):
        """
        Get the Section identified by the given key and label.

        Example of output:
        {
            "ebs1": Section
        }

        :param section_key: the identifier of the section type
        :param section_label: the label of the section, returns the first section if empty.
        """
        if section_label:
            section = self.get_sections(section_key).get(section_label, None)
        else:
            sections = self.get_sections(section_key)
            section = next(iter(sections.values()), None) if sections else None
        return section

    def add_section(self, section):
        """
        Add a section to the PclusterConfig object.

        The internal sections structure is a dictionary:
        {
            "ebs" :{"ebs1": Section, "ebs2": Section},
            "vpc" :{"default": Section}
        }

        :param section, a Section object
        """
        if section.key not in self.__sections:
            self.__sections[section.key] = OrderedDict({})

        section_label = section.label if section.label else section.definition.get(
            "default_label", "default")
        self.__sections[section.key][section_label] = section
        self._config_updated()

    def remove_section(self, section_key, section_label=None):
        """
        Remove a section from the PclusterConfig object, if there.

        :param section_key: the identifier of the section type
        :param section_label: the label of the section to delete.
        """
        if section_key in self.__sections:
            sections = self.__sections[section_key]

            if section_label:
                # If section label is specified, remove it directly
                sections.pop(section_label)
            else:
                # If no label is specified, check that no more than one section exists with the provided key
                if len(sections) > 1:
                    raise Exception(
                        "More than one section with key {0}".format(
                            section_key))
                else:
                    self.__sections.pop(section_key)
        self._config_updated()

    def __init_aws_credentials(self):
        """Set credentials in the environment to be available for all the boto3 calls."""
        # Init credentials by checking if they have been provided in config
        try:
            aws_section = self.get_section("aws")
            aws_access_key_id = aws_section.get_param_value(
                "aws_access_key_id")
            if aws_access_key_id:
                os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id

            aws_secret_access_key = aws_section.get_param_value(
                "aws_secret_access_key")
            if aws_secret_access_key:
                os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
        except AttributeError:
            # If there is no [aws] section in the config file,
            # we rely on the AWS CLI configuration or already set env variable
            pass

    @property
    def cluster_model(self):
        """Get the cluster model used in the configuration."""
        cluster_model = ClusterModel.SIT
        cluster_section = self.get_section("cluster")
        if cluster_section:
            cluster_model = get_cluster_model(
                cluster_section.definition.get("cluster_model"))
        return cluster_model

    @property
    def region(self):
        """Get the region. The value is stored inside the aws_region_name of the aws section."""
        return self.get_section("aws").get_param_value("aws_region_name")

    @region.setter
    def region(self, region):
        """Set the region. The value is stored inside the aws_region_name of the aws section."""
        self.get_section("aws").get_param("aws_region_name").value = region

    def __init_region(self):
        """
        Evaluate region to use and set in the environment to be available for all the boto3 calls.

        Order is 1) AWS_DEFAULT_REGION env 2) Config file 3) default from aws config file
        """
        if os.environ.get("AWS_DEFAULT_REGION"):
            self.region = os.environ.get("AWS_DEFAULT_REGION")
        elif self.region:
            os.environ["AWS_DEFAULT_REGION"] = self.region
        else:
            self.error(
                "You must specify a region"
                "\nRun `aws configure`, or add the `-r <REGION_NAME>` arg to the command you are trying to"
                " run, or set the `AWS_DEFAULT_REGION` environment variable.")

    @property
    def fail_on_error(self):
        """Get fail_on_error property value. Will fall back to sanity_check parameter if not explicitly set."""
        if self._fail_on_error is None:
            self._fail_on_error = (
                self.get_section("global").get_param_value("sanity_check")
                if self.get_section("global") else
                GLOBAL.get("params").get("sanity_check").get("default"))
        return self._fail_on_error

    @fail_on_error.setter
    def fail_on_error(self, fail_on_error):
        """Set fail_on_error property value."""
        self._fail_on_error = fail_on_error

    def to_file(self, print_stdout=False, exclude_unrelated_sections=False):
        """Convert the internal representation of the cluster to the relative file sections."""
        if exclude_unrelated_sections:
            # Remove sections not strictly related to the cluster from the config_parser.
            cluster_related_sections = self._get_file_section_names()
            for section_name in self.config_parser.sections():
                if section_name not in cluster_related_sections:
                    self.config_parser.remove_section(section_name)

        for section_key in self.get_global_section_keys():
            self.get_section(section_key).to_file(self.config_parser,
                                                  write_defaults=True)

        self.get_section("cluster").to_file(self.config_parser)

        if print_stdout:
            # print log file to stdout instead of writing the file
            self.config_parser.write(sys.stdout)
            sys.stdout.flush()
        else:
            # ensure that the directory for the config file exists
            if not os.path.isfile(self.config_file):
                try:
                    config_folder = os.path.dirname(self.config_file) or "."
                    os.makedirs(config_folder)
                except OSError as e:
                    if e.errno != errno.EEXIST:
                        raise  # can safely ignore EEXISTS for this purpose...

                # Fix permissions
                with open(self.config_file, "a"):
                    os.chmod(self.config_file, stat.S_IRUSR | stat.S_IWUSR)

            # Write configuration to disk
            with open(self.config_file, "w") as conf_file_stream:
                self.config_parser.write(conf_file_stream)

    def to_cfn(self):
        """
        Convert the internal representation of the cluster to a list of CFN parameters.

        :return: a dict containing the cfn parameters associated with the cluster configuration
        """
        return self.to_storage().cfn_params

    def to_storage(self):
        """
        Get a data structure with all the information needed to persist the configuration.

        The internal representation of the cluster is converted into a data structure containing the information to be
        stored into all the storage mechanisms used by the CLI (currently CloudFormation parameters and Json).

        :return: a dict containing the cfn parameters and the json dict associated with the cluster configuration
        """
        return self.get_section("cluster").to_storage()

    def __init_sections_from_file(self,
                                  cluster_label=None,
                                  config_parser=None,
                                  fail_on_absence=False):
        """
        Initialize all the Sections object and add them to the internal structure by parsing configuration file.

        :param cluster_label: the label of the section (if there)
        :param config_parser: the config parser object to parse
        :param fail_on_absence: if true, the initialization will fail if one section doesn't exist in the file
        """
        for section_definition in [ALIASES, GLOBAL]:
            self.__init_section_from_file(section_definition, config_parser)

        # get cluster by cluster_label
        if not cluster_label:
            cluster_label = (
                self.get_section("global").get_param_value("cluster_template")
                if self.get_section("global") else None)

        # Infer cluster model and load cluster section accordingly
        cluster_model = infer_cluster_model(config_parser=config_parser,
                                            cluster_label=cluster_label)

        self.__init_section_from_file(
            cluster_model.get_cluster_section_definition(),
            config_parser,
            section_label=cluster_label,
            fail_on_absence=fail_on_absence,
        )

    def __init_section_from_file(self,
                                 section_definition,
                                 config_parser,
                                 section_label=None,
                                 fail_on_absence=False):
        """
        Initialize the Section object and add it to the internal structure.

        :param section_definition: the definition of the section to initialize
        :param config_parser: the config parser object to parse
        :param section_label: the label of the section (if there)
        :param fail_on_absence: if true, the initialization will fail if the section doesn't exist in the file
        """
        section_type = section_definition.get("type")
        section = section_type(section_definition=section_definition,
                               pcluster_config=self,
                               section_label=section_label)
        self.add_section(section)
        try:
            section.from_file(config_parser, fail_on_absence)
        except configparser.NoSectionError as e:
            self.error("Section '[{0}]' not found in the config file.".format(
                e.section))

    @property
    def auto_refresh(self):
        """Return the configuration autorefresh."""
        return self.__autorefresh

    @auto_refresh.setter
    def auto_refresh(self, refresh_enabled):
        """Enable or disable the configuration autorefresh."""
        self.__autorefresh = refresh_enabled

    def _config_updated(self):
        """
        Notify the PclusterConfig instance that the configuration structure has changed.

        The purpose of this method is to allow internal configuration objects such as Param, Section etc to notify the
        parent PclusterConfig when something structural has changed. The configuration will be reloaded based on whether
        or not the autofresh function is enabled.
        """
        if self.__autorefresh:
            self.refresh()

    def refresh(self):
        """
        Reload the sections structure and refresh all configuration sections and parameters.

        This method must be called if structural configuration changes have been applied, like updating a section
        label, adding or removing a section etc.
        """
        # Rebuild the new sections structure
        new_sections = OrderedDict({})
        for key, sections in self.__sections.items():
            new_sections_map = OrderedDict({})
            for _, section in sections.items():
                new_sections_map[section.label] = section
            new_sections[key] = new_sections_map
        self.__sections = new_sections

        # Refresh all sections
        for _, sections in self.__sections.items():
            for _, section in sections.items():
                section.refresh()

    def __init_sections_from_cfn(self, cluster_name):
        try:
            self.cfn_stack = get_stack(get_stack_name(cluster_name))
            if self.__enforce_version and get_stack_version(
                    self.cfn_stack) != get_installed_version():
                self.error(
                    "The cluster {0} was created with a different version of ParallelCluster: {1}. "
                    "Installed version is {2}. This operation may only be performed using the same ParallelCluster "
                    "version used to create the cluster.".format(
                        cluster_name, get_stack_version(self.cfn_stack),
                        get_installed_version()))

            cfn_params = self.cfn_stack.get("Parameters")
            json_params = self.__load_json_config(
                self.cfn_stack) if not self.__skip_load_json_config else None
            cfn_tags = self.cfn_stack.get("Tags")

            # Infer cluster model and load cluster section accordingly
            cluster_model = infer_cluster_model(cfn_stack=self.cfn_stack)
            section = ClusterCfnSection(section_definition=cluster_model.
                                        get_cluster_section_definition(),
                                        pcluster_config=self)

            self.add_section(section)

            section.from_storage(StorageData(cfn_params, json_params,
                                             cfn_tags))

        except ClientError as e:
            self.error(
                "Unable to retrieve the configuration of the cluster '{0}'.\n{1}"
                .format(cluster_name,
                        e.response.get("Error").get("Message")))

    def validate(self):
        """Validate the configuration."""
        for _, sections in self.__sections.items():
            for _, section in sections.items():
                section.validate()

        # test provided configuration
        self.__test_configuration()

    def get_head_node_availability_zone(self):
        """Get the Availability zone of the Head Node Subnet."""
        return self.get_section("vpc").get_param_value(
            "master_availability_zone")

    def get_compute_availability_zone(self):
        """Get the Availability zone of the Compute Subnet."""
        return self.get_section("vpc").get_param_value(
            "compute_availability_zone")

    def __load_json_config(self, cfn_stack):
        """Retrieve Json configuration params from the S3 bucket linked from the cfn params."""
        json_config = None
        if is_hit_enabled_cluster(cfn_stack):
            s3_bucket_name = get_cfn_param(cfn_stack.get("Parameters"),
                                           "ResourcesS3Bucket")
            artifact_directory = get_cfn_param(cfn_stack.get("Parameters"),
                                               "ArtifactS3RootDirectory")
            if not s3_bucket_name or s3_bucket_name == "NONE":
                self.error(
                    "Unable to retrieve configuration: ResourceS3Bucket not available."
                )
            if not artifact_directory or artifact_directory == "NONE":
                self.error(
                    "Unable to retrieve configuration: ArtifactS3RootDirectory not available."
                )

            json_config = self.__retrieve_cluster_config(
                s3_bucket_name, artifact_directory)

        return json_config

    def __retrieve_cluster_config(self, bucket, artifact_directory):
        table = boto3.resource("dynamodb").Table(
            get_stack_name(self.cluster_name))
        config_version = None  # Use latest if not found
        try:
            config_version_item = table.get_item(ConsistentRead=True,
                                                 Key={"Id": "CLUSTER_CONFIG"})
            if config_version_item or "Item" in config_version_item:
                config_version = config_version_item["Item"].get("Version")
        except Exception as e:
            self.error(
                "Failed when retrieving cluster config version from DynamoDB with error {0}"
                .format(e))

        try:
            config_version_args = {
                "VersionId": config_version
            } if config_version else {}
            s3_object = boto3.resource("s3").Object(
                bucket, "{prefix}/configs/cluster-config.json".format(
                    prefix=artifact_directory))
            json_str = s3_object.get(
                **config_version_args)["Body"].read().decode("utf-8")
            return json.loads(json_str, object_pairs_hook=OrderedDict)
        except Exception as e:
            self.error(
                "Unable to load configuration from bucket '{bucket}/{prefix}'.\n{error}"
                .format(bucket=bucket, prefix=artifact_directory, error=e))

    def __test_configuration(self):  # noqa: C901
        """
        Perform global tests to verify that the wanted cluster configuration can be deployed in the user's account.

        Check operations may involve dryrun tests and/or other AWS calls and depend on the current cluster model.
        """
        LOGGER.debug("Testing configuration parameters...")
        self.cluster_model.test_configuration(self)
        LOGGER.debug("Configuration parameters tested correctly.")

    def error(self, message):
        """Print an error message and Raise SystemExit exception to the stderr if fail_on_error is true."""
        if self.fail_on_error:
            sys.exit("ERROR: {0}".format(message))
        else:
            print("ERROR: {0}".format(message))

    def warn(self, message):
        """Print a warning message."""
        print("WARNING: {0}".format(message))

    @staticmethod
    def init_aws(config_file=None):
        """
        Initialize AWS env settings from pcluster config file.

        Useful when the only thing needed is to set AWS env variables, without really loading and checking the
        configuration settings.
        :param config_file: pcluster config file - None to use default
        """
        PclusterConfig(config_file=config_file,
                       fail_on_error=False,
                       fail_on_file_absence=False,
                       auto_refresh=False)

    def update(self, pcluster_config):
        """
        Update the entire configuration structure taking the data from the provided pcluster_config instance.

        This operation allows the configuration metadata to be correctly updated before being sent back to
        CloudFormation.

        :param pcluster_config: The new configuration containing the updated settings.
        """
        # When the configuration is updated, all parameters are replaced except config metadata
        # which is needed to keep the right linking between sections and CloudFormation resources
        config_metadata_param = self.get_section("cluster").get_param(
            "cluster_config_metadata")
        self.__sections = pcluster_config.__sections
        self.get_section("cluster").set_param("cluster_config_metadata",
                                              config_metadata_param)

    def __init_additional_instance_types_data(self):
        """Store additional instance type information coming from instance_types_data parameter."""
        InstanceTypeInfo.load_additional_instance_types_data(
            self.get_section("cluster").get_param_value("instance_types_data"))
Beispiel #16
0
class PclusterConfig(object):
    """
    Class to manage the configuration of a cluster created (or to create) with ParallelCluster.

    This class contains a dictionary of sections associated to the given cluster
    """
    def __init__(
        self,
        config_file=None,
        cluster_label=None,  # args.cluster_template
        fail_on_file_absence=False,
        fail_on_error=None,
        cluster_name=None,
    ):
        """
        Initialize object, from file, from a CFN Stack or from the internal mapping.

        NOTE: The class tries to parse the config file (the default one, if not specified) to get AWS credentials

        # "From file" initialization parameters:
        :param config_file: if specified the initialization of the sections will start from the file
        :param cluster_label: the label associated to a [cluster ...] section in the file
        :param fail_on_file_absence: initialization will fail if the specified file or a default one doesn't exist
        :param fail_on_error: tells if initialization must fail in presence of errors. If not set, the behaviour will
        depend on sanity_check parameter in conf file
        # "From Stack" initialization parameters:
        :param cluster_name: the cluster name associated to a running Stack,
        if specified the initialization will start from the running Stack
        """
        self.fail_on_error = fail_on_error
        self.sections = OrderedDict({})

        # always parse the configuration file if there, to get AWS section
        self._init_config_parser(config_file, fail_on_file_absence)
        # init AWS section
        self.__init_section_from_file(AWS, self.config_parser)
        self.__init_region()
        self.__init_aws_credentials()

        # init pcluster_config object, from cfn or from config_file
        if cluster_name:
            self.__init_sections_from_cfn(cluster_name)
        else:
            self.__init_sections_from_file(cluster_label, self.config_parser,
                                           fail_on_file_absence)

    def _init_config_parser(self,
                            config_file,
                            fail_on_config_file_absence=True):
        """
        Parse the config file and initialize config_file and config_parser attributes.

        :param config_file: The config file to parse
        :param fail_on_config_file_absence: set to true to raise SystemExit if config file doesn't exist
        """
        if config_file:
            self.config_file = config_file
            default_config = False
        elif "AWS_PCLUSTER_CONFIG_FILE" in os.environ:
            self.config_file = os.environ["AWS_PCLUSTER_CONFIG_FILE"]
            default_config = False
        else:
            config_file = os.path.expanduser(
                os.path.join("~", ".parallelcluster", "config"))
            default_config = True

        self.config_file = str(
            config_file if config_file else os.path.
            expanduser(os.path.join("~", ".parallelcluster", "config")))

        if not os.path.isfile(self.config_file):
            if fail_on_config_file_absence:
                error_message = "Configuration file {0} not found."
                if default_config:
                    error_message += (
                        "\nYou can copy a template from {1}{2}examples{2}config "
                        "or execute the 'pcluster configure' command".format(
                            self.config_file,
                            os.path.dirname(
                                os.path.abspath(
                                    inspect.getfile(inspect.currentframe()))),
                            os.path.sep,
                        ))
                self.error(error_message)
            else:
                LOGGER.debug("Specified configuration file %s doesn't exist.",
                             self.config_file)
        else:
            LOGGER.debug("Parsing configuration file %s", self.config_file)
        self.config_parser = configparser.ConfigParser(
            inline_comment_prefixes=("#", ";"))
        try:
            self.config_parser.read(self.config_file)
        except configparser.ParsingError as e:
            LOGGER.debug("Error parsing configuration file {0}.\n{1}".format(
                self.config_file, str(e)))

    def get_sections(self, section_key):
        """
        Get the Section(s) identified by the given key.

        Example of output:
        {
            "ebs" : {
                "ebs1": Section, "ebs2": Section
            }
        }

        :param section_key: the identifier of the section type
        :return a dictionary containing the section
        """
        return self.sections.get(section_key, {})

    def get_section(self, section_key, section_label=None):
        """
        Get the Section identified by the given key and label.

        Example of output:
        {
            "ebs1": Section
        }

        :param section_key: the identifier of the section type
        :param section_label: the label of the section, returns the first section if empty.
        """
        if section_label:
            section = self.get_sections(section_key).get(section_label, None)
        else:
            sections = self.get_sections(section_key)
            section = next(iter(sections.values()), None) if sections else None
        return section

    def add_section(self, section):
        """
        Add a section to the PclusterConfig object.

        The internal sections structure is a dictionary:
        {
            "ebs" :{"ebs1": Section, "ebs2": Section},
            "vpc" :{"default": Section}
        }

        :param section, a Section object
        """
        if section.key not in self.sections:
            self.sections[section.key] = {}

        section_label = section.label if section.label else section.definition.get(
            "default_label", "default")
        self.sections[section.key][section_label] = section

    def remove_section(self, section_key, section_label):
        """
        Remove a section from the PclusterConfig object, if there.

        :param section_key: the identifier of the section type
        :param section_label: the label of the section to delete.
        """
        if section_key in self.sections:
            self.sections[section_key].pop(section_label, None)

    def __init_aws_credentials(self):
        """Set credentials in the environment to be available for all the boto3 calls."""
        # Init credentials by checking if they have been provided in config
        try:
            aws_section = self.get_section("aws")
            aws_access_key_id = aws_section.get_param_value(
                "aws_access_key_id")
            if aws_access_key_id:
                os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id

            aws_secret_access_key = aws_section.get_param_value(
                "aws_secret_access_key")
            if aws_secret_access_key:
                os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
        except AttributeError:
            # If there is no [aws] section in the config file,
            # we rely on the AWS CLI configuration or already set env variable
            pass

    @property
    def region(self):
        """Get the region. The value is stored inside the aws_region_name of the aws section."""
        return self.get_section("aws").get_param_value("aws_region_name")

    @region.setter
    def region(self, region):
        """Set the region. The value is stored inside the aws_region_name of the aws section."""
        self.get_section("aws").get_param("aws_region_name").value = region

    def __init_region(self):
        """
        Evaluate region to use and set in the environment to be available for all the boto3 calls.

        Order is 1) AWS_DEFAULT_REGION env 2) Config file 3) default from mapping
        """
        if os.environ.get("AWS_DEFAULT_REGION"):
            self.region = os.environ.get("AWS_DEFAULT_REGION")
        else:
            os.environ["AWS_DEFAULT_REGION"] = self.region

    @property
    def fail_on_error(self):
        """Get fail_on_error property value. Will fall back to sanity_check parameter if not explicitly set."""
        if self._fail_on_error is None:
            self._fail_on_error = self.get_section("global").get_param_value(
                "sanity_check")
        return self._fail_on_error

    @fail_on_error.setter
    def fail_on_error(self, fail_on_error):
        """Set fail_on_error property value."""
        self._fail_on_error = fail_on_error

    def to_file(self):
        """Convert the internal representation of the cluster to the relative file sections."""
        for section_key in ["aws", "global", "aliases"]:
            self.get_section(section_key).to_file(self.config_parser,
                                                  write_defaults=True)

        self.get_section("cluster").to_file(self.config_parser)

        # ensure that the directory for the config file exists
        if not os.path.isfile(self.config_file):
            try:
                config_folder = os.path.dirname(self.config_file) or "."
                os.makedirs(config_folder)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise  # can safely ignore EEXISTS for this purpose...

            # Fix permissions
            with open(self.config_file, "a"):
                os.chmod(self.config_file, stat.S_IRUSR | stat.S_IWUSR)

        # Write configuration to disk
        with open(self.config_file, "w") as conf_file_stream:
            self.config_parser.write(conf_file_stream)

    def to_cfn(self):
        """
        Convert the internal representation of the cluster to a list of CFN parameters.

        :return: a dict containing the cfn parameters associated with the cluster configuration
        """
        return self.get_section("cluster").to_cfn()

    def __init_sections_from_file(self,
                                  cluster_label=None,
                                  config_parser=None,
                                  fail_on_absence=False):
        """
        Initialize all the Sections object and add them to the internal structure by parsing configuration file.

        :param cluster_label: the label of the section (if there)
        :param config_parser: the config parser object to parse
        :param fail_on_absence: if true, the initialization will fail if one section doesn't exist in the file
        """
        for section_definition in [ALIASES, GLOBAL]:
            self.__init_section_from_file(section_definition, config_parser)

        # get cluster by cluster_label
        if not cluster_label:
            cluster_label = (
                self.get_section("global").get_param_value("cluster_template")
                if self.get_section("global") else None)
        self.__init_section_from_file(CLUSTER,
                                      config_parser,
                                      section_label=cluster_label,
                                      fail_on_absence=fail_on_absence)

    def __init_section_from_file(self,
                                 section_definition,
                                 config_parser,
                                 section_label=None,
                                 fail_on_absence=False):
        """
        Initialize the Section object and add it to the internal structure.

        :param section_definition: the definition of the section to initialize
        :param config_parser: the config parser object to parse
        :param section_label: the label of the section (if there)
        :param fail_on_absence: if true, the initialization will fail if the section doesn't exist in the file
        """
        section_type = section_definition.get("type")
        section = section_type(section_definition=section_definition,
                               pcluster_config=self,
                               section_label=section_label)
        self.add_section(section)
        try:
            section.from_file(config_parser, fail_on_absence)
        except configparser.NoSectionError as e:
            self.error("Section '[{0}]' not found in the config file.".format(
                e.section))

    def __init_sections_from_cfn(self, cluster_name):
        try:
            stack = get_stack(get_stack_name(cluster_name))

            section_type = CLUSTER.get("type")
            section = section_type(section_definition=CLUSTER,
                                   pcluster_config=self).from_cfn_params(
                                       cfn_params=stack.get("Parameters", []))
            self.add_section(section)
        except ClientError as e:
            self.error(
                "Unable to retrieve the configuration of the cluster '{0}'.\n{1}"
                .format(cluster_name,
                        e.response.get("Error").get("Message")))

    def validate(self):
        """Validate the configuration."""
        for _, sections in self.sections.items():
            for _, section in sections.items():
                section.validate()

        # check AWS account limits
        self.__check_account_capacity()

    def get_master_availability_zone(self):
        """Get the Availability zone of the Master Subnet."""
        return self.get_section("vpc").get_param_value(
            "master_availability_zone")

    def __check_account_capacity(self):  # noqa: C901
        """Try to launch the requested number of instances to verify Account limits."""
        cluster_section = self.get_section("cluster")
        vpc_section = self.get_section("vpc")

        if (not cluster_section
                or cluster_section.get_param_value("scheduler") == "awsbatch"
                or cluster_section.get_param_value("cluster_type") == "spot"
                or not vpc_section):
            return

        master_instance_type = cluster_section.get_param_value(
            "master_instance_type")
        compute_instance_type = cluster_section.get_param_value(
            "compute_instance_type")
        # get max size
        if cluster_section.get_param_value("scheduler") == "awsbatch":
            max_vcpus = cluster_section.get_param_value("max_vcpus")
            vcpus = get_instance_vcpus(self.region, compute_instance_type)
            max_size = -(-max_vcpus // vcpus)
        else:
            max_size = cluster_section.get_param_value("max_queue_size")
        if max_size < 0:
            warn(
                "Unable to check AWS account capacity. Skipping limits validation"
            )
            return

        # Check for insufficient Account capacity
        compute_subnet = vpc_section.get_param_value("compute_subnet_id")
        master_subnet = vpc_section.get_param_value("master_subnet_id")
        if not compute_subnet:
            compute_subnet = master_subnet

        # Initialize CpuOptions
        disable_hyperthreading = cluster_section.get_param_value(
            "disable_hyperthreading")
        master_vcpus = get_instance_vcpus(self.region, master_instance_type)
        compute_vcpus = get_instance_vcpus(self.region, compute_instance_type)
        master_cpu_options = {
            "CoreCount": master_vcpus // 2,
            "ThreadsPerCore": 1
        } if disable_hyperthreading else {}
        compute_cpu_options = {
            "CoreCount": compute_vcpus // 2,
            "ThreadsPerCore": 1
        } if disable_hyperthreading else {}

        # Initialize Placement Group Logic
        placement_group = cluster_section.get_param_value("placement_group")
        placement = cluster_section.get_param_value("placement")
        master_placement_group = ({
            "GroupName": placement_group
        } if placement_group not in [None, "NONE", "DYNAMIC"]
                                  and placement == "cluster" else {})
        compute_placement_group = ({
            "GroupName": placement_group
        } if placement_group not in [None, "NONE", "DYNAMIC"] else {})

        # Test Master Instance Configuration
        self.__ec2_run_instance(
            max_size,
            InstanceType=master_instance_type,
            MinCount=1,
            MaxCount=1,
            ImageId=get_latest_alinux_ami_id(),
            SubnetId=master_subnet,
            CpuOptions=master_cpu_options,
            Placement=master_placement_group,
            DryRun=True,
        )

        # Test Compute Instances Configuration
        self.__ec2_run_instance(
            max_size,
            InstanceType=compute_instance_type,
            MinCount=max_size,
            MaxCount=max_size,
            ImageId=get_latest_alinux_ami_id(),
            SubnetId=compute_subnet,
            CpuOptions=compute_cpu_options,
            Placement=compute_placement_group,
            DryRun=True,
        )

    def __ec2_run_instance(self, max_size, **kwargs):
        """Wrap ec2 run_instance call. Useful since a successful run_instance call signals 'DryRunOperation'."""
        try:
            boto3.client("ec2").run_instances(**kwargs)
        except ClientError as e:
            code = e.response.get("Error").get("Code")
            message = e.response.get("Error").get("Message")
            if code == "DryRunOperation":
                pass
            elif code == "UnsupportedOperation":
                if "does not support specifying CpuOptions" in message:
                    self.error(
                        message.replace("CpuOptions",
                                        "disable_hyperthreading"))
                self.error(message)
            elif code == "InstanceLimitExceeded":
                self.error(
                    "The configured max size parameter {0} exceeds the AWS Account limit "
                    "in the {1} region.\n{2}".format(max_size, self.region,
                                                     message))
            elif code == "InsufficientInstanceCapacity":
                self.error(
                    "The configured max size parameter {0} exceeds the On-Demand capacity on AWS.\n{1}"
                    .format(max_size, message))
            elif code == "InsufficientFreeAddressesInSubnet":
                self.error(
                    "The configured max size parameter {0} exceeds the number of free private IP addresses "
                    "available in the Compute subnet.\n{1}".format(
                        max_size, message))
            elif code == "InvalidParameterCombination":
                self.error(message)
            else:
                self.error(
                    "Unable to check AWS Account limits. Please double check your cluster configuration.\n%s"
                    % message)

    def error(self, message):
        """Print an error message and Raise SystemExit exception to the stderr if fail_on_error is true."""
        if self.fail_on_error:
            sys.exit("ERROR: {0}".format(message))
        else:
            print("ERROR: {0}".format(message))

    def warn(self, message):
        """Print a warning message."""
        print("WARNING: {0}".format(message))

    @staticmethod
    def init_aws(config_file=None):
        """
        Initialize AWS env settings from pcluster config file.

        Useful when the only thing needed is to set AWS env variables, without really loading and checking the
        configuration settings.
        :param config_file: pcluster config file - None to use default
        """
        PclusterConfig(config_file=config_file,
                       fail_on_error=False,
                       fail_on_file_absence=False)
Beispiel #17
0
    "validators": [efs_validator],
    "cfn_param_mapping": "EFSOptions",  # All the parameters in the section are converted into a single CFN parameter
    "params": OrderedDict(  # Use OrderedDict because the parameters must respect the order in the CFN parameter
        [
            ("shared_dir", {
                "allowed_values": ALLOWED_VALUES["file_path"],
            }),
            ("efs_fs_id", {
                "allowed_values": ALLOWED_VALUES["efs_fs_id"],
                "validators": [efs_id_validator],
            }),
            ("performance_mode", {
                "default": "generalPurpose",
                "allowed_values": ["generalPurpose", "maxIO"],
            }),
            ("efs_kms_key_id", {}),
            ("provisioned_throughput", {
                "allowed_values": r"^([0-9]{1,3}|10[0-1][0-9]|102[0-4])(\.[0-9])?$",  # 0.0 to 1024.0
                "type": FloatParam,
            }),
            ("encrypted", {
                "type": BoolParam,
                "default": False,
            }),
            ("throughput_mode", {
                "default": "bursting",
                "allowed_values": ["provisioned", "bursting"],
            }),
        ]
    )
}
Beispiel #18
0
    def _generate_ip_permissions_task(self, endpoint, fw_group):
        task_ip_permissions = []
        ip_permissions = self.dict_item_as_list(fw_group, 'ipPermissions')

        if not ip_permissions:
            return None

        for ip_permission in ip_permissions:

            ip_ranges = self.dict_item_as_list(ip_permission, 'ipRanges')

            for ip_range in ip_ranges:
                task_ip_permission = OrderedDict((
                    ('ip_protocol', ip_permission['ipProtocol']),
                    ('in_out', ip_permission['inOut']),
                    ('cidr_ip', ip_range['cidrIp']),
                ))

                if 'fromPort' in ip_permission and ip_permission['fromPort']:
                    task_ip_permission['from_port'] = ip_permission['fromPort']

                if 'toPort' in ip_permission and ip_permission['toPort']:
                    task_ip_permission['to_port'] = ip_permission['toPort']

                if 'description' in ip_permission and ip_permission[
                        'description']:
                    task_ip_permission['description'] = ip_permission[
                        'description']

                task_ip_permissions.append(task_ip_permission)

            groups = self.dict_item_as_list(ip_permission, 'groups')

            for group in groups:
                task_ip_permission = OrderedDict((
                    ('ip_protocol', ip_permission['ipProtocol']),
                    ('in_out', ip_permission['inOut']),
                    ('group_name', group['groupName']),
                ))

                if 'fromPort' in ip_permission and ip_permission['fromPort']:
                    task_ip_permission['from_port'] = ip_permission['fromPort']

                if 'toPort' in ip_permission and ip_permission['toPort']:
                    task_ip_permission['to_port'] = ip_permission['toPort']

                if 'description' in ip_permission and ip_permission[
                        'description']:
                    task_ip_permission['description'] = ip_permission[
                        'description']

                task_ip_permissions.append(task_ip_permission)

        sorted_task_ip_permission = sorted(task_ip_permissions,
                                           key=lambda x: (
                                               x.get('ip_protocol', ''),
                                               x.get('in_out', ''),
                                               x.get('from_port', ''),
                                               x.get('to_port', ''),
                                               x.get('description', ''),
                                               x.get('group_name', ''),
                                               x.get('cidr_ip', ''),
                                           ))

        user = self._playhouse.user
        access_key = user.get_playbook_vars('access_key')
        secret_access_key = user.get_playbook_vars('secret_access_key')

        tags = [
            fw_group['groupName'],
            'nifcloud_fw',
            'nifcloud_fw_ip_permissions',
            'nifcloud_fw_ip_permissions_{}'.format(fw_group['groupName']),
        ]

        task = InspiredTask()
        task.template = 'nifcloud/default.task.fw_ip_permissions.yml.j2'
        task.content = OrderedDict((
            ('name',
             'configure {} ip_permissions'.format(fw_group['groupName'])),
            ('local_action',
             OrderedDict((
                 ('module', 'nifcloud_fw'),
                 ('access_key', access_key),
                 ('secret_access_key', secret_access_key),
                 ('endpoint', endpoint),
                 ('group_name', fw_group['groupName']),
                 ('purge_ip_permissions', True),
                 ('ip_permissions', sorted_task_ip_permission),
             ))),
            ('tags', tags),
        ))

        return task