Example #1
0
    def iso_version(self) -> config.ISOVersion:
        """
        Return version of ISO bundle scheme for the release

        Returns
        -------
        config.ISOVersion:
            return version of ISO bundle scheme for the release

        """
        if self._iso_version is None:
            if self.is_factory:
                # FIXME improve to support newer versions for deployment
                self._iso_version = config.ISOVersion.VERSION1
            else:
                # search in upgrade releases
                pillar_path = f'release/upgrade/repos/{self.version}'
                pillars = PillarResolver(local_minion_id()).get(
                    [PillarKey(pillar_path)], fail_on_undefined=True)

                upgrade_release = pillars[local_minion_id()][PillarKey(
                    pillar_path)]
                release_data = upgrade_release.get(self.version)
                # NOTE: release_data can be None after applying
                #  remove_swupgrade_repo command
                if (isinstance(release_data, dict)
                        and 'version' in release_data):
                    self._iso_version = config.ISOVersion(
                        release_data['version'])
                else:
                    # FIXME: it may be remote repo
                    self._iso_version = config.ISOVersion.VERSION1

        return self._iso_version
    def run(targets=LOCAL_MINION):
        """set_cluster_id command execution method.

        Will be executed only on primary node.
        Gets cluster id from pillar data.
        If not present, generates a uuid and sets.

        Execution:
        `provisioner set_cluster_id`
        Takes no mandatory argument as input.

        """
        try:
            node_role = grains_get("roles", local_minion_id())[
                local_minion_id()]["roles"]  # displays as a list

            if node_role[0] != "primary":
                logger.error(
                    "Error: ClusterID can be set only in the Primary node "
                    f"of the cluster. Node role received: '{node_role[0]}'.")
                raise ValueError(
                    "Error: ClusterID can be set only in the Primary node "
                    f"of the cluster. Node role received: '{node_role[0]}'.")

            logger.info("This is the Primary node of the cluster.")

            cluster_id_from_grains = grains_get(
                "cluster_id",
                local_minion_id())[local_minion_id()]["cluster_id"]

            # double verification
            cluster_id_from_pillar = GetClusterId.run(targets)

            if not cluster_id_from_grains:
                logger.info(
                    "ClusterID is not found in grains data. Generating one..")
                cluster_uuid = str(uuid.uuid4())
                logger.info(
                    "Setting the generated ClusterID across all nodes..")

                PillarSet().run('cluster/cluster_id',
                                f'{cluster_uuid}',
                                targets=targets)

            elif cluster_id_from_grains and not cluster_id_from_pillar:
                logger.info("ClusterID is not set in pillar data."
                            " Proceeding to set now..")
                PillarSet().run('cluster/cluster_id',
                                f'{cluster_id_from_grains}',
                                targets=targets)

            else:
                logger.info(
                    "Bootstrapping completed and ClusterID is already set!")

            logger.info("Success: Unique ClusterID assignment to pillar data.")

        except Exception as exc:
            raise ValueError("Failed: Encountered error while setting "
                             f"cluster_id to Pillar data: {str(exc)}")
Example #3
0
    def plan_upgrade(self, sw_list=None):
        if sw_list is None:
            pi_key = PillarKey('upgrade/sw_list')
            sw_list = PillarResolver(local_minion_id()).get(
                [pi_key], fail_on_undefined=True)[local_minion_id()][pi_key]
            logger.debug(f"Resolved sw list: {sw_list}")

        # TODO plan the sw order
        #   - (if not provided) plan according to upgrade ISO data
        pi_key = PillarKey('commons/sw_data')
        sw_data = PillarResolver(local_minion_id()).get(
            [pi_key], fail_on_undefined=True)[local_minion_id()][pi_key]
        logger.debug(f"Resolved sw data: {sw_data}")

        diff = set(sw_list) - set(sw_data)
        if diff:
            raise ValueError(f"Unexpected sw to upgrade: {diff}")

        sw_data = {
            _sw: _data
            for _sw, _data in sw_data.items() if _sw in sw_list
        }

        # FIXME return list of objects, e.g. SWData
        return sw_data
Example #4
0
 def run():
     cmd_out = cmd_run('hctl status --json', targets=local_minion_id())
     cmd_out = json.loads(cmd_out[local_minion_id()])
     result = {}
     result['pools'] = cmd_out.get('pools')
     result['profiles'] = cmd_out.get('profiles')
     result['filesystem'] = cmd_out.get('filesystem')
     result['nodes'] = cmd_out.get('nodes')
     return result
Example #5
0
    def __attrs_post_init__(self):    # noqa: D105
        params = attr.asdict(self)

        # If storage.cvg.metadata or storage.cvg.data is specified,
        # check entry for the other.
        for data_set in params.get('cvg'):
            logger.debug(f"DataSet being processed for CVG keys: {data_set}")
            if (
                data_set.get('data_devices') and
                (
                    (not data_set.get('metadata_devices')) or
                    (data_set.get('metadata_devices') == UNCHANGED) or
                    (data_set.get('metadata_devices') == '')
                )
            ):
                raise ValueError(
                    "List of data is specified. "
                    "However, list of metadata is unspecified."
                )
            elif (
                data_set.get('metadata_devices') and
                (
                    (not data_set.get('data_devices')) or
                    (data_set.get('data_devices') == UNCHANGED) or
                    (data_set.get('data_devices') == '')
                )
            ):
                raise ValueError(
                    "List of metadata is specified. "
                    "However, list of data is unspecified."
                )
        if (
            not 'physical' in GrainsGet().run(
                'virtual',
                targets=local_minion_id()
            )[local_minion_id()]['virtual']
        ):
            self._optional_param.extend([
                'bmc_user',
                'bmc_secret'
            ])

        missing_params = []
        for param, value in params.items():
            if value == UNCHANGED and param not in self._optional_param:
                missing_params.append(param)
        if len(missing_params) > 0:
            logger.error(
                f"{missing_params} cannot be left empty or blank.. "
                "These are mandatory to configure the setup."
            )
            raise ValueError(f"Mandatory param missing {missing_params}")
Example #6
0
    def run(self, targets=ALL_MINIONS):
        """cluster_id assignment

        Execution:
        `provisioner cluster_id`
        Takes no mandatory argument as input.
        Executed only on primary node.

        """
        try:
            node_role = grains_get("roles", local_minion_id())[
                local_minion_id()]["roles"]  # displays as a list

            if node_role[0] != "primary":
                raise ValueError(
                    "Error: ClusterID can be set only in the Primary node "
                    f"of the cluster. Node role received: '{node_role[0]}'.")

            logger.info("This is the Primary node of the cluster.")

            cluster_id_from_pillar = self._get_cluster_id()

            if not cluster_id_from_pillar:
                logger.info("ClusterID not set in pillar data. "
                            "Checking setup file..")

            # double verification
            cluster_id_from_setup = self._initial_check()

            if cluster_id_from_setup == cluster_id_from_pillar:
                logger.info("Bootstrapping completed and ClusterID is set!")

            elif (cluster_id_from_pillar
                  and cluster_id_from_setup != cluster_id_from_pillar):
                logger.error(
                    "Mismatch in cluster_id value between "
                    "setup and pillar data. Setting unique value now.."
                    "\nPossible warning: Check if cluster values "
                    "have been manually tampered with.")

            PillarSet().run('cluster/cluster_id',
                            f'{cluster_id_from_setup}',
                            targets=targets)

            return f"cluster_id: {cluster_id_from_setup}"

        except Exception as exc:
            raise ValueError("Failed: Encountered error while setting "
                             f"cluster_id to Pillar data: {str(exc)}")
def get_cluster_nodes():
    """
    Get current nodes in cluster
    """
    res = cmd_run('salt-key -L --out=json')
    res = json.loads(res[local_minion_id()])
    return res.get('minions')
Example #8
0
 def run():
     cmd_out = cmd_run('cortx cluster start', targets=local_minion_id())
     result = {
         key.replace("\"", ""): item.replace("\"", "")
         for key, item in cmd_out.items()
     }
     return result
Example #9
0
    def run(self, force=False):
        try:
            node_id = local_minion_id()
            self._validate_cert_installation()
            server_type = get_pillar_data(f'cluster/{node_id}/type')
            if server_type == 'HW':
                self._validate_health_map()
            self._validate_interfaces(node_id)
            server_type = self._validate_server_type(node_id)
            self._validate_devices(node_id, server_type)

            self.logger.info(
                "Node validations complete. Creating Field users..")
            self._create_field_users(node_id)
            self.logger.debug("Field users created.")
            self.logger.debug("Setting up Cron job")
            self.create_cron_job(SUPPORT_USER_NAME, SUPPORT_CRON_TIME)
            self._factory_backup()

        except CortxSetupError as exc:
            if force:
                self.logger.info(
                    f"One or more node validation(s) failed: {str(exc)}.\n"
                    "Forcibly creating users..")
                self._create_field_users(node_id)
                self.logger.info(
                    "Field users created. Check logs for more details on the validations error.."
                )
                self.logger.debug("Setting up Cron job")
                self.create_cron_job(SUPPORT_USER_NAME, SUPPORT_CRON_TIME)
                self._factory_backup()
            else:
                raise
Example #10
0
 def run(self):
     res = cmd_run('salt-key -L --out=json')
     res = json.loads(res[local_minion_id()])
     result = {}
     result['cluster_nodes'] = res.get('minions')
     result['non_cluster_nodes'] = res.get('minions_pre')
     return result
Example #11
0
 def installed_rpms(self) -> List:
     if self._installed_rpms is None:
         exclude_rpms = config.EXCLUDE_RPMS_RELEASE_VERSION
         res = cmd_run(f"rpm -qa|grep '^cortx-'|grep -Ev '{exclude_rpms}'",
                       targets=local_minion_id())
         rpms = res[next(iter(res))].split("\n")
         self._installed_rpms = [f'{rpm}.rpm' for rpm in rpms if rpm]
     return self._installed_rpms
 def run(self):
     minion_id = local_minion_id()
     if minion_id:
         self.logger.debug(f"removing minion {minion_id} from salt cluster")
         run_subprocess_cmd(f"salt-key -d {minion_id} -y")
     self.logger.debug("Remove minion_id and minion_master.pub from system")
     run_subprocess_cmd(f"rm -rf /etc/salt/minion_id")
     run_subprocess_cmd(f"rm -rf /etc/salt/pki/minion/minion_master.pub")
     self.logger.debug(f"Minion {minion_id} removed from salt configurtion")
Example #13
0
def disk_devices(device_type, devices):
    local_devices = None
    if device_type == HW_TYPE:
        local_devices = cmd_run("multipath -ll|grep mpath|sort -k2|cut -d' ' -f1|sed 's|mpath|/dev/disk/by-id/dm-name-mpath|g'|paste -s -d, -")  # noqa: E501
        local_devices = local_devices[local_minion_id()]
        if not local_devices:
            raise CortxSetupError(f"Devices are not present on system")
        local_devices = local_devices.split(',')
    if device_type == VM_TYPE:
        local_devices = cmd_run("lsblk -o name -lpn | awk '/dev\/sd/{print}'")  # noqa: W605, E501
        local_devices = local_devices[local_minion_id()]
        if not local_devices:
            raise CortxSetupError(f"Devices are not present on system")
        local_devices = local_devices.split('\n')
    local_devices = set(local_devices)
    devices = set(devices)

    if not devices.issubset(local_devices):
        raise CortxSetupError(f"Invalid device list provided {devices}")
Example #14
0
    def _import_gpg_public_key(gpg_pub_key: str):
        """
        Import GPG public key

        Returns
        -------

        """
        cmd = f'gpg --import {gpg_pub_key}'

        cmd_run(cmd,
                targets=local_minion_id(),
                fun_kwargs=dict(python_shell=True))
Example #15
0
    def _convert_key_to_open_pgp_format(pub_key_path: Path) -> Path:
        """
        Check if GPG Public key in ASCII Armor format. If so format it to
        OpenPGP format.

        Parameters
        ----------
        pub_key_path: Path
            Path to GPG public key

        Returns
        -------
        Path:
            path to the file with GPG public key in OpenPGP format

        """
        # NOTE: for the ASCII Armor format, please, refer to RFC4880
        #  https://datatracker.ietf.org/doc/html/rfc4880#section-6.2

        # NOTE: return given path itself if it is in OpenPGP format already
        res = pub_key_path
        with open(pub_key_path, "rb") as fh:
            # NOTE: read file as binary file since OpenPGP is binary format
            content = fh.readlines()
            armor_header = content[0]
            armor_tail = content[-1]

        # NOTE: we check that the armor header and armor tail in binary
        #  representation exist in the first and the last line of
        #  the pub key file.
        if (ARMOR_HEADER.encode() in armor_header
                and ARMOR_TAIL.encode() in armor_tail):
            # NOTE: it means that provided public key is in ASCII Armor format
            cmd = f"gpg --yes --dearmor {pub_key_path.resolve()}"
            try:
                # NOTE: by default gpg tool converts the given file to the file
                #  with the same name + '.gpg' extension at the end.
                #  Directory is the same
                cmd_run(cmd, targets=local_minion_id())
            except Exception as e:
                logger.error("Can't convert ASCII Armor GPG public key "
                             f"'{pub_key_path.resolve()}'"
                             f"to OpenPGP format: '{e}'")
                raise ValidationError(
                    f'Public key conversion error: "{e}"') from e
            else:
                # NOTE: because .with_suffix method replaces the last suffix
                suffix = pub_key_path.suffix + ".gpg"
                res = pub_key_path.with_suffix(suffix)

        return res
    def _validate_python_index(self, index_path: Path, dry_run: bool = False):
        """
        Perform the dynamic validation for SW upgrade Python index by
        the given index path `index_path`

        Parameters
        ----------
        index_path: Path
            Path to the SW upgrade Python index

        Returns
        -------
        None

        Raises
        ------
        SWUpdateRepoSourceError
            If Python index validation fails

        """
        logger.debug("Start Python index validation")
        if not index_path.exists() or not any(
                p for p in index_path.iterdir() if p.is_dir()):
            return

        pkgs = (p for p in index_path.iterdir() if p.is_dir())
        try:
            test_package_name = next(pkgs).name
        except StopIteration:
            logger.debug("Python index is empty, skip the validation")
            return

        with tempfile.TemporaryDirectory() as tmp_dir:
            cmd = (f"pip3 download {test_package_name} --dest={tmp_dir}/ "
                   f"--index-url file://{index_path.resolve()}")
            try:
                cmd_run(cmd,
                        targets=local_minion_id(),
                        fun_kwargs=dict(python_shell=True))
            except Exception as e:
                exc = SWUpdateRepoSourceError(
                    index_path, "Python index validation failed: "
                    f"{e}")
                if dry_run:
                    self._exceptions.append(exc)
                else:
                    raise exc

        logger.debug("Python index validation succeeded")
def get_pillar_data(key: str):
    """
    Get pillar_value for the specific key provided

    Parameters
    ----------
    key: str
        keypath for which value to be fetched.
        'key1/key2/key3'

    """
    pillar_key = PillarKey(key)
    pillar = PillarResolver(local_minion_id(), local=True).get([pillar_key])
    pillar = next(iter(pillar.values()))
    return pillar[PillarKey(key)]
Example #18
0
    def run(self, **kwargs):

        node_id = local_minion_id()
        firewall_pillar_sls = Path(f"{PRVSNR_PILLAR_DIR}/components/firewall.sls")

        self.logger.debug(f"updating firewall config on {node_id}")

        try:
            self.logger.debug(f"loading firewall configuration")
            firewall_config_arg = kwargs.get('config')
            self.load_conf_store('index1', firewall_config_arg)
            firewall_conf = {'firewall': Conf.get('index1','firewall') }
            dump_yaml(firewall_pillar_sls, dict(firewall_conf))

            function_run('saltutil.refresh_pillar', targets=node_id)

            self.logger.debug(f"Applying 'components.system.firewall' on {node_id}")
            StatesApplier.apply(
                ["components.system.firewall"],
                local_minion_id()
            )
        except Exception as ex:
            raise ex
        self.logger.debug("Done")
    def _get_cluster_id(self, targets=ALL_MINIONS):
        """Gets cluster_id value from pillar and returns to user."""

        res = None
        try:
            cluster_data = PillarGet().run('cluster', targets)
            cluster_id= []
            if cluster_data[local_minion_id()]["cluster"] is values.MISSED:
                logger.debug(
                    "Cluster data is not yet formed and ClusterID not found"
                )
            else:
                for node in cluster_data:
                    if ("cluster_id" not in cluster_data[node]["cluster"] or
                        cluster_data[node]["cluster"]["cluster_id"] is values.MISSED):
                        logger.debug("Cluster data is partially formed and ClusterID not found")
                    else:
                        cluster_id.append(
                           cluster_data[node]["cluster"]["cluster_id"]
                        )

            if cluster_id:
                if len(set(cluster_id)) != 1:
                    logger.warning(
                      "ClusterID assignment NOT unique across "
                      f"the nodes of the cluster: {cluster_id}. "
                      "Possible warning: Check if cluster values "
                      "have been manually tampered with."
                    )

                else:
                    res = cluster_id[0]

            else:
                logger.warning(
                     "ClusterID is not present in Pillar data for "
                     "either of the nodes."
                )

        # Raising error and returning None to handle in `run()`
        except ValueError as exc:
            logger.error(
                "Error in retrieving cluster_id from "
                f"Pillar data: {str(exc)}"
            )

        return res
 def run(self):
     node_id = local_minion_id()
     cortx_components = get_cortx_states()
     cmd_run(f"salt {node_id} saltutil.sync_all")
     for component in cortx_components:
         states = cortx_components[component]
         for state in states:
             try:
                 self.logger.debug(
                     f"Executing post_install command for {state} component"
                 )
                 deploy.Deploy()._apply_state(
                     f"components.{state}",
                     targets=node_id,
                     stages=['config.post_install'])
             except Exception as ex:
                 raise ex
     self.logger.debug("Done")
Example #21
0
 def _validate_config_devices(self, config):
     try:
         for section in config.sections():
             if ("srvnode" in section):
                 if (config.has_option(section,
                                       'storage.cvg.0.data_devices')):
                     devices = config[section][
                         'storage.cvg.0.data_devices'].split(",")
                     devices.extend(
                         config[section]
                         ['storage.cvg.0.metadata_devices'].split(","))
                     for device in devices:
                         if 'default' in section:
                             target = ALL_MINIONS
                         else:
                             target = local_minion_id()
                         cmd_run(f"ls {device}", targets=target)
     except Exception as exc:
         raise ValueError(f"Config Failed to apply: {str(exc)}")
Example #22
0
    def validate(self, path: Path) -> str:
        """
        Validate the file by a given path has a correct signature

        Parameters
        ----------
        path: Path
            path for the file authenticity validation

        Returns
        -------
        str:
            Comment message about GPG verification

        Raises
        ------
        ValidationError
            If validation is failed.

        """
        logger.debug(f"Start '{path}' file authenticity validation")

        if self.gpg_public_key is not None:
            # NOTE: for validation signature with the custom GPG pub key
            #  it is required to use pub key in OpenPGP format, not in
            #  ASCII Armor format (--armor option of gpg tool)
            open_pgp_key = self._convert_key_to_open_pgp_format(
                self.gpg_public_key)
            cmd = (f"gpg --no-default-keyring --keyring {open_pgp_key} "
                   f"--verify {self.signature} {path}")
        else:
            cmd = f"gpg --verify {self.signature} {path}"

        try:
            res = cmd_run(cmd, targets=local_minion_id())
        except Exception as e:
            logger.debug(f'Authenticity check is failed: "{e}"')
            raise ValidationError(
                f'Authenticity check is failed: "{e}"') from e

        return res
Example #23
0
 def _get_release_info_path(self):
     release_info = None
     update_repo = PillarKey('release/upgrade')
     pillar = PillarResolver(local_minion_id()).get([update_repo])
     pillar = next(iter(pillar.values()))
     upgrade_data = pillar[update_repo]
     base_dir = Path(upgrade_data['base_dir'])
     repos = upgrade_data['repos']
     for version in reversed(list(repos)):
         if version == config.REPO_CANDIDATE_NAME:
             continue
         release_info = base_dir / f'{version}/RELEASE.INFO'
         # Note. upgrade iso now may lack release file on top level
         if not release_info.exists():
             release_info = (
                 base_dir /
                 f'{version}/{config.CORTX_ISO_DIR}/RELEASE.INFO')
         if release_info.exists():
             release_rpms = self._get_rpms_from_release(release_info)
             if self._compare_rpms_info(release_rpms):
                 return release_info
Example #24
0
    def run(self, **kwargs):
        """
        Server configuration command execution method

        Command:
        `cortx_setup server config --name <name> --type {VM|HW}}`
        """

        node_id = local_minion_id()
        machine_id = get_machine_id(node_id)

        pillar_key_map = {
            'name': f'cluster/{node_id}/name',
            'type': f'cluster/{node_id}/type',
        }

        conf_key_map = {
            'name': f'server_node>{machine_id}>name',
            'type': f'server_node>{machine_id}>type',
        }

        Conf.load('node_info_index', f'json://{CONFSTORE_CLUSTER_FILE}')

        for key, value in kwargs.items():
            if value:
                if key in pillar_key_map.keys():
                    self.logger.debug(
                        f"Updating pillar with key:{pillar_key_map[key]} and value:{value}"
                    )
                    PillarSet().run(pillar_key_map[key], value, local=True)

                if key in conf_key_map.keys():
                    self.logger.debug(
                        f"Updating confstore with key:{conf_key_map[key]} and value:{value}"
                    )
                    Conf.set('node_info_index', conf_key_map[key], value)

        Conf.save('node_info_index')
        self.logger.debug("Done")
Example #25
0
    def run(self, name=None, count=None):
        try:
            node_id = local_minion_id()
            index = 'storage_create_index'

            # TODO: Addnl validation needed. Support for updating
            # values for multiple storagesets in a cluster.

            self.load_conf_store(index, f'json://{CONFSTORE_CLUSTER_FILE}')
            cluster_id = get_cluster_id()
            self.logger.debug(f"Updating storageset '{name}' with "
                              f"node count '{count}' in ConfStore")

            PillarSet().run('cluster/storage_set/name', name)

            #Not updating node count to Confstore
            #TODO: Handle scenario for multiple storagesets
            PillarSet().run('cluster/storage_set/count', count)

            try:
                tot_storageset = Conf.get('storage_create_index',
                                          f'cluster>{cluster_id}>storage_set')
                storage_set_index = len(tot_storageset)
            except Exception:
                self.logger.debug(
                    "No storage_set in confstore, setting storage_set_index to 0"
                )
                storage_set_index = 0

            Conf.set(
                index,
                f'cluster>{cluster_id}>storage_set[{storage_set_index}]>name',
                name)

            Conf.save(index)

        except ValueError as exc:
            raise ValueError(f"Failed to create Storageset: {str(exc)}")
    def run(self, **kwargs):

        self.provisioner = provisioner
        if 'username' in kwargs:
            self.provisioner.auth_init(kwargs['username'], kwargs['password'])

        self.logger.debug("Updating pillar data")
        for pillar in config.local_pillars:
            res_pillar = {}
            res = cmd_run(f"salt-call --local pillar.get {pillar} --out=json",
                          **kwargs)
            for key, value in res.items():
                value = json.loads(value)
                value = value['local']
                if pillar == 'cluster' and value.get('srvnode-0'):
                    value[key] = value.pop('srvnode-0')
                if pillar == 'storage' and value.get('enclosure-0'):
                    enc_num = key.split('-')
                    value[f'enclosure-{enc_num[1]}'] = value.pop('enclosure-0')
                res_pillar.update(value)
            self.logger.info(f"Updating {pillar} pillar data")
            self.provisioner.pillar_set(f'{pillar}', res_pillar)
        conf_path = str(PRVSNR_FACTORY_PROFILE_DIR / 'confstore')
        # backup local consftore data
        self.logger.debug(f"Copy local confstore file to {conf_path}")
        conf_create = 'components.provisioner.confstore_create'
        StatesApplier.apply([conf_create], targets=local_minion_id(), **kwargs)

        conf_copy = 'components.provisioner.confstore_copy'
        StatesApplier.apply([conf_copy])
        # backup local pillar data
        cmd_run(f"rm -rf {PRVSNR_DATA_ROOT_DIR}/.backup ", **kwargs)
        cmd_run(f"mkdir -p {PRVSNR_DATA_ROOT_DIR}/.backup", **kwargs)
        cmd_run(
            f"mv {PRVSNR_USER_LOCAL_PILLAR_DIR}/* "
            f"{PRVSNR_DATA_ROOT_DIR}/.backup/", **kwargs)
 def _ensure_upgrade_repos_configuration():
     StatesApplier.apply(['repos.upgrade'], local_minion_id())
    def run(self, *args, **kwargs):
        """pillar_export command execution method.

        Keyword arguments:
        *args: Pillar path in <root_node>/child_node format.
        (default: all pillar data)
        **kwargs: accepts the following keys:
            export_file: Output file for pillar dump
        """

        try:
            full_pillar_load = PillarGet().run(
                *args, targets=local_minion_id())[local_minion_id()]

            # Knock off the unwanted keys
            unwanted_keys = ["mine_functions", "provisioner", "glusterfs"]
            for key in full_pillar_load.copy().keys():
                if key in unwanted_keys:
                    del full_pillar_load[key]

            ip4_interfaces = grains_get(
                "ip4_interfaces",
                targets=local_minion_id())[local_minion_id()]["ip4_interfaces"]

            network_pillar = full_pillar_load["cluster"][
                local_minion_id()]["network"]

            # Update interface IPs
            # Public Management Interface
            mgmt_if_public = ("mgmt0" if "mgmt0" in ip4_interfaces else
                              network_pillar["mgmt"]["interfaces"][0])
            network_pillar["mgmt"]["public_ip"] = (
                ip4_interfaces[mgmt_if_public][0])

            # Public Data Interface
            data_if_public = ("data0" if "data0" in ip4_interfaces else
                              network_pillar["data"]["public_interfaces"][0])
            network_pillar["data"]["public_ip"] = (
                ip4_interfaces[data_if_public][0])

            # Private Data Interface
            data_if_private = ("data0" if "data0" in ip4_interfaces else
                               network_pillar["data"]["private_interfaces"][0])
            network_pillar["data"]["private_ip"] = (
                ip4_interfaces[data_if_private][0])

            convert_data = self._convert_to_str(full_pillar_load, "")

            pillar_dump_file = (kwargs["export_file"] if "export_file"
                                in kwargs else CORTX_CONFIG_DIR)

            Path(CORTX_CONFIG_DIR).mkdir(parents=True, exist_ok=True)
            with open(pillar_dump_file, "w") as file_value:
                json.dump(convert_data, file_value)

            logger.info("SUCCESS: Pillar data exported as JSON to file "
                        f"'{pillar_dump_file}'.")

        except Exception as exc:
            raise ValueError(
                f"Error in translating Pillar data to JSON: {str(exc)}")
    def run(self, targets=ALL_MINIONS):
        """cluster_id assignment

        Execution:
        `provisioner cluster_id`
        Takes no mandatory argument as input.
        Executed only on primary node.

        """
        try:
            node_role = grains_get(
                "roles",
                local_minion_id()
            )[local_minion_id()]["roles"]            # displays as a list

            cluster_id_from_pillar = self._get_cluster_id()

            if node_role[0] != "primary":
                logger.info(
                     f"Role of current node: '{node_role[0]}'."
                )
                cluster_id_from_setup = self._initial_check(
                                        node_role[0],
                                        cluster_id_from_pillar)

            else:
                logger.debug("This is the Primary node of the cluster.")

                if not cluster_id_from_pillar:
                    logger.debug(
                       "ClusterID not set in pillar data. "
                       "Checking setup file.."
                    )

                # double verification
                cluster_id_from_setup = self._initial_check(
                                        node_role[0],
                                        cluster_id_from_pillar)

                if cluster_id_from_setup == cluster_id_from_pillar:
                    logger.debug(
                      "A unique ClusterID is already set!"
                    )

                elif (cluster_id_from_pillar and
                            cluster_id_from_setup != cluster_id_from_pillar):
                    logger.warning(
                       "Mismatch in cluster_id value between "
                       "setup and pillar data. Setting unique value now.."
                       "\nPossible warning: Check if cluster values "
                       "have been manually tampered with."
                    )

                PillarSet().run(
                    'cluster/cluster_id',
                    f'{cluster_id_from_setup}',
                    targets=ALL_MINIONS
                )

                # Ensure cluster-id file is created in all nodes
                StatesApplier.apply(
                       ['components.provisioner.config.cluster_id',
                        'components.system.config.sync_salt'
                       ],
                       targets=ALL_MINIONS
                )

            return f"cluster_id: {cluster_id_from_setup}"

        except Exception as exc:
            raise ValueError(
                "Failed: Encountered error while setting "
                f"cluster_id to Pillar data: {str(exc)}"
            )
Example #30
0
from cortx_setup.commands.common_utils import (get_machine_id, get_pillar_data,
                                               encrypt_secret)
from cortx_setup.validate import ipv4
from collections import OrderedDict
from provisioner.commands import PillarSet
from provisioner.salt import cmd_run, local_minion_id
from provisioner.values import MISSED
from .enclosure_info import EnclosureInfo

#TODO: Add this path in the global config
prvsnr_cluster_path = Path(
    '/opt/seagate/cortx_configs/provisioner_cluster.json')

enc_file_path = Path('/etc/enclosure-id')

node_id = local_minion_id()
enc_num = "enclosure-" + ((node_id).split('-'))[1]

conf_key_map = {}
pillar_key_map = {}
"""Cortx Setup API for configuring the storage enclosure """


class StorageEnclosureConfig(Command):
    """
        Cortx Setup API for configuring Storage Enclosure
    """
    '''
    $ cortx_setup storage config --name <enclosure-name> --type {RBOD|JBOD|EBOD|virtual}
    $ cortx_setup storage config --controller {gallium|indium|virtual} --mode {primary|secondary}
    $ cortx_setup storage config --cvg <disk-group name> --metadata-devices <device list> --data-devices <device list>