Beispiel #1
0
    def _import_gpg_public_key(gpg_pub_key: str):
        """
        Import GPG public key

        Returns
        -------

        """
        cmd = f'gpg --import {gpg_pub_key}'

        cmd_run(cmd,
                targets=local_minion_id(),
                fun_kwargs=dict(python_shell=True))
Beispiel #2
0
    def _convert_key_to_open_pgp_format(pub_key_path: Path) -> Path:
        """
        Check if GPG Public key in ASCII Armor format. If so format it to
        OpenPGP format.

        Parameters
        ----------
        pub_key_path: Path
            Path to GPG public key

        Returns
        -------
        Path:
            path to the file with GPG public key in OpenPGP format

        """
        # NOTE: for the ASCII Armor format, please, refer to RFC4880
        #  https://datatracker.ietf.org/doc/html/rfc4880#section-6.2

        # NOTE: return given path itself if it is in OpenPGP format already
        res = pub_key_path
        with open(pub_key_path, "rb") as fh:
            # NOTE: read file as binary file since OpenPGP is binary format
            content = fh.readlines()
            armor_header = content[0]
            armor_tail = content[-1]

        # NOTE: we check that the armor header and armor tail in binary
        #  representation exist in the first and the last line of
        #  the pub key file.
        if (ARMOR_HEADER.encode() in armor_header
                and ARMOR_TAIL.encode() in armor_tail):
            # NOTE: it means that provided public key is in ASCII Armor format
            cmd = f"gpg --yes --dearmor {pub_key_path.resolve()}"
            try:
                # NOTE: by default gpg tool converts the given file to the file
                #  with the same name + '.gpg' extension at the end.
                #  Directory is the same
                cmd_run(cmd, targets=local_minion_id())
            except Exception as e:
                logger.error("Can't convert ASCII Armor GPG public key "
                             f"'{pub_key_path.resolve()}'"
                             f"to OpenPGP format: '{e}'")
                raise ValidationError(
                    f'Public key conversion error: "{e}"') from e
            else:
                # NOTE: because .with_suffix method replaces the last suffix
                suffix = pub_key_path.suffix + ".gpg"
                res = pub_key_path.with_suffix(suffix)

        return res
    def self_upgrade(self, flow):
        # here we can use python API (SWUpgradeNode) since
        # old provisioner version would be called anyway
        logger.info('Upgrading Provisioner on all the nodes')
        # FIXME what about backup hook for provisioner
        SWUpgradeNode().run(flow=flow, sw=['provisioner'], no_hooks=True)

        # support for a separate orchestrator module
        # todo: can be a python API call if no changes for provisioner
        # logger.info('Upgrading Orchestrator on all the nodes')
        # cmd_run('provisioner sw_upgrade_node --sw orchestrator')

        logger.info('Syncing salt minions')
        cmd_run('salt-call saltutil.sync_all')
    def _validate_python_index(self, index_path: Path, dry_run: bool = False):
        """
        Perform the dynamic validation for SW upgrade Python index by
        the given index path `index_path`

        Parameters
        ----------
        index_path: Path
            Path to the SW upgrade Python index

        Returns
        -------
        None

        Raises
        ------
        SWUpdateRepoSourceError
            If Python index validation fails

        """
        logger.debug("Start Python index validation")
        if not index_path.exists() or not any(
                p for p in index_path.iterdir() if p.is_dir()):
            return

        pkgs = (p for p in index_path.iterdir() if p.is_dir())
        try:
            test_package_name = next(pkgs).name
        except StopIteration:
            logger.debug("Python index is empty, skip the validation")
            return

        with tempfile.TemporaryDirectory() as tmp_dir:
            cmd = (f"pip3 download {test_package_name} --dest={tmp_dir}/ "
                   f"--index-url file://{index_path.resolve()}")
            try:
                cmd_run(cmd,
                        targets=local_minion_id(),
                        fun_kwargs=dict(python_shell=True))
            except Exception as e:
                exc = SWUpdateRepoSourceError(
                    index_path, "Python index validation failed: "
                    f"{e}")
                if dry_run:
                    self._exceptions.append(exc)
                else:
                    raise exc

        logger.debug("Python index validation succeeded")
Beispiel #5
0
 def run(self):
     res = cmd_run('salt-key -L --out=json')
     res = json.loads(res[local_minion_id()])
     result = {}
     result['cluster_nodes'] = res.get('minions')
     result['non_cluster_nodes'] = res.get('minions_pre')
     return result
Beispiel #6
0
 def run():
     cmd_out = cmd_run('cortx cluster start', targets=local_minion_id())
     result = {
         key.replace("\"", ""): item.replace("\"", "")
         for key, item in cmd_out.items()
     }
     return result
def get_cluster_nodes():
    """
    Get current nodes in cluster
    """
    res = cmd_run('salt-key -L --out=json')
    res = json.loads(res[local_minion_id()])
    return res.get('minions')
Beispiel #8
0
 def installed_rpms(self) -> List:
     if self._installed_rpms is None:
         exclude_rpms = config.EXCLUDE_RPMS_RELEASE_VERSION
         res = cmd_run(f"rpm -qa|grep '^cortx-'|grep -Ev '{exclude_rpms}'",
                       targets=local_minion_id())
         rpms = res[next(iter(res))].split("\n")
         self._installed_rpms = [f'{rpm}.rpm' for rpm in rpms if rpm]
     return self._installed_rpms
Beispiel #9
0
 def run():
     cmd_out = cmd_run('hctl status --json', targets=local_minion_id())
     cmd_out = json.loads(cmd_out[local_minion_id()])
     result = {}
     result['pools'] = cmd_out.get('pools')
     result['profiles'] = cmd_out.get('profiles')
     result['filesystem'] = cmd_out.get('filesystem')
     result['nodes'] = cmd_out.get('nodes')
     return result
    def delegate(self, flow=config.CortxFlows.UPGRADE):
        logger.info("SW Upgrade: delegating remaing phases"
                    " to upgraded orchestrator logic")

        cmd = "provisioner sw_upgrade --noprepare"
        if flow == config.CortxFlows.UPGRADE_OFFLINE:
            cmd += ' --offline'

        return cmd_run(cmd, targets=config.LOCAL_MINION)
 def run(self):
     node_id = local_minion_id()
     cortx_components = get_cortx_states()
     cmd_run(f"salt {node_id} saltutil.sync_all")
     for component in cortx_components:
         states = cortx_components[component]
         for state in states:
             try:
                 self.logger.debug(
                     f"Executing post_install command for {state} component"
                 )
                 deploy.Deploy()._apply_state(
                     f"components.{state}",
                     targets=node_id,
                     stages=['config.post_install'])
             except Exception as ex:
                 raise ex
     self.logger.debug("Done")
Beispiel #12
0
 def _validate_config_devices(self, config):
     try:
         for section in config.sections():
             if ("srvnode" in section):
                 if (config.has_option(section,
                                       'storage.cvg.0.data_devices')):
                     devices = config[section][
                         'storage.cvg.0.data_devices'].split(",")
                     devices.extend(
                         config[section]
                         ['storage.cvg.0.metadata_devices'].split(","))
                     for device in devices:
                         if 'default' in section:
                             target = ALL_MINIONS
                         else:
                             target = local_minion_id()
                         cmd_run(f"ls {device}", targets=target)
     except Exception as exc:
         raise ValueError(f"Config Failed to apply: {str(exc)}")
def disk_devices(device_type, devices):
    local_devices = None
    if device_type == HW_TYPE:
        local_devices = cmd_run("multipath -ll|grep mpath|sort -k2|cut -d' ' -f1|sed 's|mpath|/dev/disk/by-id/dm-name-mpath|g'|paste -s -d, -")  # noqa: E501
        local_devices = local_devices[local_minion_id()]
        if not local_devices:
            raise CortxSetupError(f"Devices are not present on system")
        local_devices = local_devices.split(',')
    if device_type == VM_TYPE:
        local_devices = cmd_run("lsblk -o name -lpn | awk '/dev\/sd/{print}'")  # noqa: W605, E501
        local_devices = local_devices[local_minion_id()]
        if not local_devices:
            raise CortxSetupError(f"Devices are not present on system")
        local_devices = local_devices.split('\n')
    local_devices = set(local_devices)
    devices = set(devices)

    if not devices.issubset(local_devices):
        raise CortxSetupError(f"Invalid device list provided {devices}")
def test_salt_cmd_run(monkeypatch):
    function_run_args = []

    def function_run(*args, **kwargs):
        nonlocal function_run_args
        function_run_args.append((args, kwargs))

    monkeypatch.setattr(salt, 'function_run', function_run)

    targets = 'some-targets'
    cmd = 'some_command'

    function_run_args = []
    salt.cmd_run(
        cmd,
        targets=targets,
    )
    assert function_run_args == [(('cmd.run', ),
                                  dict(fun_args=[cmd],
                                       targets=targets,
                                       fun_kwargs=dict(bg=False),
                                       **dict(timeout=None)))]
    def upgrade(self, flow, from_ver, to_ver):
        # TODO: can skip that if no changes for Orchestrator
        ret = cmd_run('provisioner --version', targets=config.LOCAL_MINION)
        new_prvsnr_version = next(iter(ret.values()))
        # Note. assumption: case new version < old version is validated
        #       as part of validate stage
        if new_prvsnr_version != __version__:
            return self.delegate(flow=flow)
        else:
            logger.info("SW Upgrade upgraded logic is the same as the"
                        " current one, proceeding without delegation")

        # noprepare is True or new logic is the same
        planned_node_groups = self.plan_upgrade(flow=flow)

        logger.info("Moving the Cortx cluster into standby mode")
        cluster_standby()

        self.upgrade_cluster(planned_node_groups, flow, from_ver, to_ver)

        logger.info("Starting the Cortx cluster")
        cluster_start()
        # cluster_start(unstandby=False)

        # TODO make the folllowing a part of migration
        #      routine on a node level
        # # re-apply provisioner configuration to ensure
        # # that updated pillar is taken into account
        # _apply_provisioner_config(targets)
        # config_salt_master()
        # minion_conf_changes = config_salt_minions()
        # ...
        # TODO make the folllowing a part of migration
        #      routine on a node level
        # # NOTE that should be the very final step of the logic
        # #      since salt client will be restarted so the current
        # #      process might start to wait itself
        # if minion_conf_changes:
        #     # TODO: Improve salt minion restart logic
        #     # please refer to task EOS-14114.
        #     try:
        #         _restart_salt_minions()
        #     except Exception:
        #         logger.exception('failed to restart salt minions')

        SetRelease(to_ver).run()
Beispiel #16
0
    def validate(self, path: Path) -> str:
        """
        Validate the file by a given path has a correct signature

        Parameters
        ----------
        path: Path
            path for the file authenticity validation

        Returns
        -------
        str:
            Comment message about GPG verification

        Raises
        ------
        ValidationError
            If validation is failed.

        """
        logger.debug(f"Start '{path}' file authenticity validation")

        if self.gpg_public_key is not None:
            # NOTE: for validation signature with the custom GPG pub key
            #  it is required to use pub key in OpenPGP format, not in
            #  ASCII Armor format (--armor option of gpg tool)
            open_pgp_key = self._convert_key_to_open_pgp_format(
                self.gpg_public_key)
            cmd = (f"gpg --no-default-keyring --keyring {open_pgp_key} "
                   f"--verify {self.signature} {path}")
        else:
            cmd = f"gpg --verify {self.signature} {path}"

        try:
            res = cmd_run(cmd, targets=local_minion_id())
        except Exception as e:
            logger.debug(f'Authenticity check is failed: "{e}"')
            raise ValidationError(
                f'Authenticity check is failed: "{e}"') from e

        return res
def test_yum_rollback_manager():
    minion_id = os.environ['TEST_MINION_ID']

    some_minion_id = 'some_minion_id'
    with pytest.raises(errors.SaltNoReturnError) as excinfo:
        with YumRollbackManager(some_minion_id) as rb_manager:
            pass

    with pytest.raises(ValueError) as excinfo:
        with YumRollbackManager(minion_id) as rb_manager:
            assert minion_id in rb_manager.last_txn_ids
            cmd_run('yum install -y vim', minion_id)
            cmd_run('rpm -qi vim-enhanced', minion_id)
            raise ValueError('some error')

    assert str(excinfo.value) == 'some error'

    with pytest.raises(errors.SaltError):
        cmd_run('rpm -qi vim-enhanced', minion_id)
    def run(self, **kwargs):

        self.provisioner = provisioner
        if 'username' in kwargs:
            self.provisioner.auth_init(kwargs['username'], kwargs['password'])

        self.logger.debug("Updating pillar data")
        for pillar in config.local_pillars:
            res_pillar = {}
            res = cmd_run(f"salt-call --local pillar.get {pillar} --out=json",
                          **kwargs)
            for key, value in res.items():
                value = json.loads(value)
                value = value['local']
                if pillar == 'cluster' and value.get('srvnode-0'):
                    value[key] = value.pop('srvnode-0')
                if pillar == 'storage' and value.get('enclosure-0'):
                    enc_num = key.split('-')
                    value[f'enclosure-{enc_num[1]}'] = value.pop('enclosure-0')
                res_pillar.update(value)
            self.logger.info(f"Updating {pillar} pillar data")
            self.provisioner.pillar_set(f'{pillar}', res_pillar)
        conf_path = str(PRVSNR_FACTORY_PROFILE_DIR / 'confstore')
        # backup local consftore data
        self.logger.debug(f"Copy local confstore file to {conf_path}")
        conf_create = 'components.provisioner.confstore_create'
        StatesApplier.apply([conf_create], targets=local_minion_id(), **kwargs)

        conf_copy = 'components.provisioner.confstore_copy'
        StatesApplier.apply([conf_copy])
        # backup local pillar data
        cmd_run(f"rm -rf {PRVSNR_DATA_ROOT_DIR}/.backup ", **kwargs)
        cmd_run(f"mkdir -p {PRVSNR_DATA_ROOT_DIR}/.backup", **kwargs)
        cmd_run(
            f"mv {PRVSNR_USER_LOCAL_PILLAR_DIR}/* "
            f"{PRVSNR_DATA_ROOT_DIR}/.backup/", **kwargs)
Beispiel #19
0
 def cluster_stop():
     res = cmd_run('cortx cluster stop --all || true ',
                   targets=local_minion_id())
     return next(iter(res.values()))
def interfaces(interface):
    for iface in interface:
        try:
            cmd_run(f"ip a | grep {iface}")
        except Exception as exc:
            raise CortxSetupError(f"Invalid interface {iface}\n {exc}")
    def get_packages_version(self,
                             release: str,
                             dry_run: bool = False) -> dict:
        """
        Static method returns information about CORTX packages and
        their versions. Public method.

        Parameters
        ----------
        release: str
            SW upgrade repository version
        dry_run: bool
            If this parameter is set to `True` this method doesn't raise
            Exceptions, just collects them

        Returns
        -------
        dict:
            return dictionary with CORTX packages and their versions

        """
        if self._source_version == ISOVersion.VERSION1:
            repo_name = CORTX_ISO_DIR
        elif self._source_version == ISOVersion.VERSION2:
            repo_name = UpgradeReposVer2.CORTX.value
        else:
            exc = ValueError(f"Unsupported source version: "
                             f"{self._source_version}")
            if dry_run:
                self._exceptions.append(exc)
            else:
                raise exc

        repo = f"sw_upgrade_{repo_name}_{release}"

        # NOTE: use --enablerepo to suspend message
        #  Repo sw_upgrade_cortx_iso_candidate has been automatically enabled.
        #  in case of candidate repo
        cmd = (f"yum --enablerepo={repo} --showduplicates repo-pkgs {repo} "
               f"list 2>/dev/null | grep '{repo}' | awk '{{print $1\" \"$2}}'")

        res = cmd_run(cmd,
                      targets=local_minion_id(),
                      fun_kwargs=dict(python_shell=True))

        packages = res[local_minion_id()].strip()

        if packages:
            logger.debug(f"List of packages in repository '{repo}':"
                         f" {packages}")
        else:
            logger.debug(f"There are no packages in repository '{repo}'")

            return dict()

        packages = packages.split('\n')
        res = dict()
        # NOTE: Format is following
        # ```
        #  {
        #      'cortx-motr': {
        #             'version': '2.0.0-277',
        #          },
        #  }
        # ```
        #
        # TODO: EOS-20507: Along the with 'version', field we need to add
        #  'constraint version' field to provide necessary information about
        #  compatibility with old versions
        for entry in packages:
            pkg, ver = entry.split(" ")
            # package architecture (.noarch, .x86_64 is not needed in
            # the name of the package
            pkg = pkg.split('.')[0]
            ver = normalize_rpm_version(ver)
            res[pkg] = {SWUpgradeInfoFields.VERSION.value: ver}

        return res
Beispiel #22
0
    def validate(iso_info):
        """

        Parameters
        ----------
        iso_info: CortxISOInfo
            CortxISOInfo instance with all necessary information about
            SW upgrade ISO

        Returns
        -------
        None

        Raises
        ------
        ValidationError
            If validation is failed.
        """
        packages = list(iso_info.packages.keys())

        if not packages:
            return  # nothing to validate

        # NOTE: the first line of `yum -q list installed` command is
        #  'Installed Packages' skip it via `tail -n +2`
        cmd = (f"yum -q list installed {' '.join(packages)} 2>/dev/null |"
               f" tail -n +2 | awk '{{print $1\" \"$2}}'")

        try:
            res = cmd_run(cmd, targets=local_minion_id())
        except Exception as e:
            logger.debug(f'Package compatibility check is failed: "{e}"')
            raise ValidationError(
                f'Package compatibility check is failed: "{e}"') from e

        res = res[local_minion_id()].strip()

        if res:
            logger.debug(f"List of installed CORTX packages: {res}")
        else:
            logger.warning(f"There are no installed CORTX packages")
            return  # Nothing to validate since there are not CORTX packages

        res = res.split('\n')

        packages = dict()
        for pkg in res:
            # Aggregate version information of installed CORTX packages
            pkg_name, pkg_version = pkg.split(" ")
            # remove architecture post-fix from the package name
            pkg_name = pkg_name.split(".")[0]
            packages[pkg_name] = utils.normalize_rpm_version(pkg_version)

        error_msg = list()

        compatibility = iso_info.packages.get(CORTX_VERSION, {}).get(
            SWUpgradeInfoFields.VERSION_COMPATIBILITY.value, None)
        if compatibility:
            cortx_version = GetRelease.cortx_version()
            if Version(cortx_version) in SpecifierSet(compatibility):
                logger.info(
                    f"The CORTX release version '{cortx_version}' "
                    f"satisfies the constraint version '{compatibility}'")
            else:
                msg = (f"The CORTX release version '{cortx_version}' does not "
                       f"satisfy the constraint version '{compatibility}'")
                logger.error(msg)
                error_msg.append(msg)

        for pkg in iso_info.packages:
            if (SWUpgradeInfoFields.VERSION_COMPATIBILITY.value
                    in iso_info.packages[pkg]):
                compatibility = iso_info.packages[pkg][
                    SWUpgradeInfoFields.VERSION_COMPATIBILITY.value]

                installed_ver = packages.get(pkg, None)
                if installed_ver is None:
                    msg = (f"There is version constraint '{compatibility}' for"
                           f" the CORTX package '{pkg}' that is not installed")
                    logger.debug(msg)
                    continue

                # NOTE: we used for comparison normalized values of RPM version
                #  For more details, please, review
                #  `provisioner.utils.normalize_rpm_version`
                #  There is some interesting behavior of packaging API for
                #  versions comparison:
                #  >>> Version('2.0.0-275') in SpecifierSet('> 2.0.0')
                #  False
                #  >>> Version('2.0.0-275') in SpecifierSet('>= 2.0.0')
                #  True
                #  >>> Version('2.0.0-275') in SpecifierSet('== 2.0.0')
                #  False
                #  >>> Version('2.0.0-275') in SpecifierSet('> 2.0.0-0')
                #  True
                # >>> version.parse('2.0.0-275') > version.parse('2.0.0')
                # True

                if Version(installed_ver) in SpecifierSet(compatibility):
                    logger.info(f"The CORTX package '{pkg}' of version "
                                f"'{installed_ver}' satisfies the constraint "
                                f"version '{compatibility}'")
                else:
                    msg = (f"The CORTX package '{pkg}' of version "
                           f"'{installed_ver}' does not satisfies the "
                           f"constraint version '{compatibility}'")
                    logger.error(msg)
                    error_msg.append(msg)

        if error_msg:
            raise ValidationError("During validation some compatibility "
                                  f"errors were found: {'/n'.join(error_msg)}")
Beispiel #23
0
    def run(self, **kwargs):
        """
        cortx cluster config command

        Bootstrap system, deploy cortx components

        Execution:
        `cortx_setup cluster create [nodes_fqdn] --name <cluster_name>`

        """
        try:
            self.provisioner = provisioner
            try:
                username = os.getenv('SUDO_USER') if os.getenv(
                    'SUDO_USER') else os.getenv('USER')
            except Exception as ex:
                raise ex
            if username != 'root':
                password = getpass(
                    prompt=f"Enter {username} user password for current node:")
                auth_args = {'username': username, 'password': password}
                self.provisioner.auth_init(username, password)
            else:
                auth_args = {}

            index = 'cluster_info_index'
            local_minion = None
            local_fqdn = socket.gethostname()
            cluster_args = [
                'name', 'site_count', 'storageset_count', 'virtual_host'
            ]

            # Ref: `nodes` will be removed from this args list.
            # Read more on https://github.com/Seagate/cortx-prvsnr/tree/pre-cortx-1.0/docs/design_updates.md#field-api-design-changes
            nodes = kwargs['nodes']
            target_build = kwargs['target_build']
            source_type = kwargs['source']

            self.logger.debug("Checking for basic details in place.")
            # Parsing nodes
            for idx, node in enumerate(nodes):
                if node == local_fqdn:
                    nodes[idx] = f"srvnode-1:{username}@{node}"
                    local_minion = 'srvnode-1'
                else:
                    nodes[idx] = f"srvnode-{idx+1}:{username}@{node}"

            # HA validation
            if len(nodes) > 1:
                kwargs['ha'] = True

            if target_build:
                if not target_build.startswith('http'):
                    raise ValueError(
                        f"Invalid target build provided: {target_build}"
                        " Please provide the valid http or https URL.")
                # target_build and source type iso are mutually exclusive
                if source_type == 'iso':
                    raise TypeError(
                        "The target_build option and the 'source' type "
                        "'iso' are not supported together."
                        " Please run the command with correct options.")
            else:
                # read target build from a file created during factory setup
                tbuild_path = "/opt/seagate/cortx_configs/provisioner_generated/target_build"
                self.logger.info("Fetching the Cortx build source")
                if not os.path.isfile(tbuild_path):
                    raise ValueError(
                        f"The file with Cortx build source"
                        f" doesn't exist: '{tbuild_path}'"
                        f" Please use the --target_build option to"
                        f" provide the correct build URL.")
                with open(tbuild_path, "r") as fh:
                    target_build = fh.readline().strip()

                if not target_build:
                    raise ValueError("Could not find the Cortx build source."
                                     " Please use the --target_build option to"
                                     " provide the build url")

                kwargs['target_build'] = target_build
                # The target build could be a file uri or http url
                # If it's file uri set the source to iso and target_build
                # to None.
                if target_build.startswith('file'):
                    #ISO based deployment
                    kwargs['source'] = 'iso'
                    kwargs['target_build'] = None
                elif not target_build.startswith('http'):
                    raise ValueError(
                        f"Invalid build source found: {target_build}"
                        " Please use --target_build or iso options to"
                        " to provide the correct build source.")

            # ISO files validation
            if kwargs['source'] == 'iso':
                if kwargs['iso_cortx'] and kwargs['iso_os']:
                    ISO_SINGLE_FILE = kwargs['iso_cortx']
                    ISO_OS_FILE = kwargs['iso_os']
                else:
                    self.logger.info("Checking the Cortx ISO files")
                    iso_files = [
                        fn for fn in os.listdir(CORTX_ISO_PATH)
                        if fn.endswith('.iso')
                    ]
                    for name in iso_files:
                        if "single" in name:
                            ISO_SINGLE_FILE = str(CORTX_ISO_PATH) + "/" + name
                        elif "os" in name:
                            ISO_OS_FILE = str(CORTX_ISO_PATH) + "/" + name
                    kwargs['iso_cortx'] = ISO_SINGLE_FILE
                    kwargs['iso_os'] = ISO_OS_FILE

                self.logger.info("Validating the Cortx ISO files")
                if not (os.path.isfile(ISO_SINGLE_FILE)
                        or os.path.isfile(ISO_OS_FILE)):
                    raise ValueError(
                        f"No Cortx ISOs found: "
                        f"{ISO_SINGLE_FILE} & {ISO_OS_FILE}, please"
                        " keep the ISOs at /opt/isos and try again.")

            cluster_dict = {
                key: kwargs[key]
                for key in kwargs if key in cluster_args
            }

            for arg in cluster_args:
                kwargs.pop(arg)

            self.logger.info(
                "Initial checks done. \n"
                "This step will take several minutes.. Follow logs for progress.\n"
                f"Starting bootstrap process now with args: {kwargs}")
            self.provisioner.bootstrap_provisioner(**kwargs)
            salt._local_minion_id = local_minion
            if SOURCE_PATH.exists():
                self.logger.debug(
                    "Cleanup existing storage config on all nodes")
                cmd_run(f"mv {SOURCE_PATH} {DEST_PATH}", **auth_args)
                self.logger.debug("Refreshing config")
                cmd_run("salt-call saltutil.refresh_pillar", **auth_args)

            self.logger.info(
                "Bootstrap Done. Starting with preparing environment. "
                "Syncing config data now..")
            PillarSync().run(**auth_args)

            self.logger.debug("Generating cluster")
            GenerateCluster().run(**auth_args)

            self.logger.debug("Creating service user")
            self.provisioner.create_service_user(user="******")

            node_id = 'srvnode-1'
            self.logger.debug("Setting up Cluster ID on the system")
            self.provisioner.cluster_id(targets=node_id)

            self.logger.debug("Encrypting config data")
            EncryptSecrets().run(**auth_args)

            self.logger.debug("Refreshing enclosure id on the system")
            RefreshEnclosureId().run(**auth_args)

            # NTP workaround.
            # TODO: move this to time.py after encryption issue
            self.logger.debug("Setting time on node with server & timezone")

            StatesApplier.apply([
                "components.system.chrony.install",
                "components.system.chrony.config",
                "components.system.chrony.stop",
                "components.system.chrony.start"
            ],
                                targets=ALL_MINIONS,
                                **auth_args)

            machine_id = self.provisioner.grains_get(
                "machine_id")[node_id]["machine_id"]
            enclosure_id = self.provisioner.grains_get(
                "enclosure_id")[node_id]["enclosure_id"]
            if enclosure_id:
                if not machine_id in enclosure_id:  # check if the system is VM or HW
                    self.logger.debug(
                        f"Setting time on enclosure with server & timezone")
                    StatesApplier.apply(["components.controller.ntp"],
                                        targets=ALL_MINIONS,
                                        **auth_args)
            StatesApplier.apply(['components.system.config.sync_salt'],
                                targets=ALL_MINIONS,
                                **auth_args)

            self.logger.info(
                "Environment set up! Proceeding to create a cluster..")

            if 'username' in auth_args:
                cmd_run(
                    f"chown -R {auth_args['username']}:{auth_args['username']} {CONFSTORE_CLUSTER_FILE}",
                    **auth_args)
            self.load_conf_store(index, f'json://{CONFSTORE_CLUSTER_FILE}')
            clust_id = self.provisioner.grains_get(
                "cluster_id")[node_id]["cluster_id"]

            for key, value in cluster_dict.items():
                if value and 'virtual_host' not in key:
                    self.logger.debug(
                        f"Updating {key} to {value} in confstore")
                    self.provisioner.pillar_set(f'cluster/{key}', value)
                    if 'storageset_count' in key:
                        conf_key = f'cluster>{clust_id}>site>storage_set_count'
                    else:
                        conf_key = f'cluster>{clust_id}>{key}'
                    Conf.set(index, conf_key, value)
                if value and 'virtual_host' in key:
                    self.logger.debug(
                        f"Updating virtual_host to {value} in confstore")
                    self.provisioner.pillar_set('cluster/mgmt_vip', value)
                    Conf.set(
                        index,
                        f'cluster>{clust_id}>network>management>virtual_host',
                        value)
            Conf.save(index)

            self.logger.debug("Exporting to Confstore")
            self.provisioner.confstore_export()

            self.logger.debug("Success: Cluster created")
            return f"Cluster created with node(s): {nodes}"

        except ValueError as exc:
            raise ValueError(f"Cluster Create Failed. Reason: {str(exc)}")
Beispiel #24
0
 def cluster_start():
     res = cmd_run('cortx cluster start', targets=local_minion_id())
     return next(iter(res.values()))
Beispiel #25
0
    def run(self, **kwargs):
        # valid combinations for cortx_setup storage config
        # Hardware
        # 1.  --controller galium --mode primary --ip <> --port <> --user <> --password <>
        # 2.  --name enc_rack1 --type RBOD
        # 3.  --mode primary --ip <> --port <>
        # 4.  --user <> --password
        # 5.  --controller galium
        # 6.  --cvg dg01 --data-devices /dev/sdb,/dev/sdc --metadata-devices /dev/sdd
        # VM
        # 1.  --controller virtual --mode primary --ip <> --port <> --user <> --password <>
        # 2.  --name virtual_rack1 --type virtual
        # 3.  --cvg dg02 --data-devices /dev/sdb,/dev/sdc --metadata-devices /dev/sdd

        user = kwargs.get('user')
        password = kwargs.get('password')
        ip = kwargs.get('ip')
        port = kwargs.get('port')

        name = kwargs.get('name')
        storage_type = kwargs.get('type')
        controller_type = kwargs.get('controller')
        self.mode = kwargs.get('mode')
        cred_validation = False
        cvg_name = kwargs.get('cvg')
        data_devices = []
        input_data_devices = kwargs.get('data_devices')
        if input_data_devices:
            data_devices = [
                device for device in input_data_devices.split(",")
                if device and len(device) > 1
            ]
        metadata_devices = []
        input_metadata_devices = kwargs.get('metadata_devices')
        if input_metadata_devices:
            metadata_devices = [
                device for device in input_metadata_devices.split(",")
                if device and len(device) > 1
            ]

        if (data_devices or metadata_devices) and not cvg_name:
            self.logger.exception(
                "argument cvg is must to set data and metadata devices")
            raise RuntimeError('Please provide cvg using --cvg option')

        self.machine_id = get_machine_id(node_id)
        self.refresh_key_map()

        Conf.load('node_info_index', f'json://{prvsnr_cluster_path}')

        setup_type = Conf.get('node_info_index',
                              f'server_node>{self.machine_id}>type')

        if setup_type == None:
            self.logger.error("Setup type is not set, please set the"
                              " setup type and try again")
            self.logger.error("Run following command to set the setup type"
                              ": 'cortx_setup server config type <VM|HW>'")
            raise RuntimeError("Could not find the setup type in conf store")

        if self.enclosure_id is None:
            self.enclosure_id = get_pillar_data(pillar_key_map['enclosure_id'])
            if self.enclosure_id is MISSED:
                self.enclosure_id = None
            self.refresh_key_map()
            self.logger.debug(f"enclosure id: {self.enclosure_id}")
            if self.enclosure_id is None and setup_type == "VM":
                self.enclosure_id = "enc_" + self.machine_id
                self.refresh_key_map()
                self.store_in_file()
                self.update_pillar_and_conf('enclosure_id', self.enclosure_id)

        ### THE "mode" SHOULD ALWAYS BE THE FIRST CHECK, DO NOT CHANGE THIS SEQ ###
        if self.mode is not None:
            if ip is None or port is None:
                # mandetory sub options for mode (ip and port) are missing.
                self.logger.exception(
                    f"Mandatory sub options for mode- ip & password are missing"
                )
                raise RuntimeError('Please provide ip and port')
            # Algorithm to update ip and port the enclosure id is needed.
            # if enclosure_id and user and password
            #    reset the enclosure id fetched from confstore
            #    this is to force fetch the enclosure if with
            #    current set of input parameters - user, passwd, ip, port.
            # if not self.enclosure_id:
            #     if hw:
            #         if user and password:
            #             #fetch enclosure id and store in confstore
            #         else:
            #             #error
            # if self.enclosure_id:
            #     # store ip and port in confstore
            #     if user and password:
            #         # store in confstore
            if (self.enclosure_id is not None and user is not None
                    and password is not None and setup_type == "HW"):
                # user has provided all the parameters that fetches the
                # enclosure id, so reset the enclosure id read from the
                # confstore and fetch it again with the current set of
                # parameters
                self.enclosure_id = None

            if self.enclosure_id is None:
                if setup_type == "HW":
                    # Fetch enclosure id if the user & password are also provided
                    if (user != None and password != None):
                        self.enclosure_id = EnclosureInfo(
                            ip, user, password, port).fetch_enclosure_serial()
                        if self.enclosure_id:
                            # store enclosure_id in /etc/enclosure-id
                            self.store_in_file()
                            self.refresh_key_map()
                            self.update_pillar_and_conf(
                                'enclosure_id', self.enclosure_id)
                            cred_validation = True
                        else:
                            self.logger.exception(
                                "Could not fetch the enclosure id")
                            raise RuntimeError(
                                'Please check if credentials, ip & port provided'
                                ' are correct.')
                    else:
                        self.logger.exception(
                            "Could not update ip and port in Cortx configuration"
                            " without enclosure id. Please provide user, password,"
                            " ip and port together to fetch the enclosure id from"
                            " attached enclosure.")
                        raise RuntimeError(
                            'Incomplete set of arguments provided')
            if self.enclosure_id is not None:
                if setup_type == "VM":
                    self.logger.warning("WARNING: This is VM")
                    self.logger.warning(
                        "WARNING: Adding ip and port in confstore without validation"
                    )
                if setup_type == "HW" and not cred_validation:
                    self.logger.warning(
                        "WARNING:  Adding ip and port in confstore without"
                        " validation To force the validation, please run:"
                        " cortx_setup storage config --controller <type>"
                        " --mode primary --ip <ip> --port <port>"
                        " --user <user> --password <password>")
                self.update_pillar_and_conf('ip', ip)
                self.update_pillar_and_conf('port', port)
            else:
                self.logger.exception(
                    "Could not update ip and port without enclosure id"
                    "Please provide user, password, ip & port together")
                raise RuntimeError('Incomplete set of arguments provided')

        if user is not None or password is not None:
            if (user is None) or (password is None):
                self.logger.error(
                    f"Please provide 'user' and 'passowrd' together")
                raise RuntimeError("Imcomplete arguments provided")
            if self.enclosure_id is not None and setup_type == "VM":
                self.logger.warning("WARNING: This is VM")
                self.logger.warning(
                    "WARNING: Adding user and password in confstore"
                    " without validation")
                self.update_pillar_and_conf('user', user)
                self.update_pillar_and_conf('password', password)
            elif self.enclosure_id is not None and setup_type == "HW":
                # Store user and password only after validation
                # Skip the validation if enclosure id was fetched
                #  using the same credentials
                if not cred_validation:
                    # Read ip & port from Pillar and validate by logging
                    # in to enclosure with user, passwd, ip and port
                    self.logger.debug(
                        "Validating the user and password provided")
                    host_in_pillar = get_pillar_data(
                        f"storage/{enc_num}/controller/primary/ip")
                    port_in_pillar = get_pillar_data(
                        f"storage/{enc_num}/controller/primary/port")
                    if not host_in_pillar or not port_in_pillar:
                        self.logger.error(
                            f"Could not read controller ip and secret from pillar"
                        )
                        raise RuntimeError(
                            "Could not validate user and password")
                    valid_connection_check = EnclosureInfo(
                        host_in_pillar, user, password,
                        port_in_pillar).connection_status()
                    if not valid_connection_check:
                        self.logger.error(
                            f"Could not establish connection with"
                            " controller with provided credentials")
                        raise ValueError("Invalid credentials provided")
                self.update_pillar_and_conf('user', user)
                self.update_pillar_and_conf('password', password)
            else:
                self.logger.error(
                    f"Enclosure ID is not set\n"
                    "Run following command to set the enclosure id:"
                    "cortx_setup storage config --user <user>"
                    " --password <passwd> --ip <ip> --port <port>")
                raise RuntimeError(
                    "Cannot set mode, ip and port without enclosure id")

        if ip is not None or port is not None:
            if self.mode is None:
                self.logger.exception(
                    f"mode is missing, please provide --mode argument")
                raise RuntimeError("Incomplete arguments provided")
            else:
                # This is already handled in 'mode' case
                pass

        if controller_type is not None:
            valid_ctrl_type = ['gallium', 'indium']
            if setup_type == "HW" and controller_type not in valid_ctrl_type:
                self.logger.error(
                    "Invalid controller provided, please provide the"
                    " supported controller type")
                raise ValueError("Incorrect argument value provided")
            if setup_type == "VM" and controller_type != "virtual":
                self.logger.error("Controller must be 'virtual' for VM")
                raise ValueError("Incorrect argument value provided")
            if self.enclosure_id is None:
                self.logger.error(
                    f"Enclosure ID is not set\n"
                    "Run following command to set the enclosure id:"
                    "cortx_setup storage config --controller primary --user"
                    " <user> --password <passwd> --ip <ip> --port <port>")
                raise RuntimeError(
                    "Cannot set controller type without enclosure id")
            # all checks are good, update confstore and pillar
            self.update_pillar_and_conf('controller_type', controller_type)

        if name is not None or storage_type is not None:
            if (name and not storage_type) or (storage_type and not name):
                self.logger.error(f"Please provide 'name' and 'type' together")
                raise RuntimeError("Imcomplete arguments provided")
            if self.enclosure_id is not None:
                self.update_pillar_and_conf('name', name)
                supported_type = ['RBOD', 'JBOD', 'EBOD']
                if setup_type == "HW" and storage_type not in supported_type:
                    self.logger.error(
                        "Invalid type provided, please provide the"
                        " supported storage type")
                    raise ValueError("Incorrect argument value provided")
                if setup_type == "VM" and storage_type != "virtual":
                    self.logger.error("Storage type must be 'virtual' for VM")
                    raise ValueError("Incorrect argument value provided")
            else:
                self.logger.error(
                    f"Enclosure ID is not set\n"
                    "Run following command to set the enclosure id:"
                    "cortx_setup storage config --user <user>"
                    " --password <passwd> --ip <ip> --port <port>")
                raise RuntimeError(
                    "Cannot set enclosure type without enclosure id")
            # all clear, update name and type in confstore and pillar
            self.update_pillar_and_conf('name', name)
            self.update_pillar_and_conf('storage_type', storage_type)

        if cvg_name:
            self.logger.debug(
                f"cvg_name:{cvg_name}, data_devices:{data_devices}, metadata_devices:{metadata_devices}"
            )
            if not data_devices or not metadata_devices:
                self.logger.error(
                    "ERROR: The parameters data-devices and metadata-devices"
                    " are missing")
                raise RuntimeError("ERROR: Incomplete arguments provided")

            current_cvg_count = Conf.get(
                'node_info_index',
                f'server_node>{self.machine_id}>storage>cvg_count')
            if not current_cvg_count:
                current_cvg_count = 0
            else:
                current_cvg_count = int(current_cvg_count)

            cvg_list = get_pillar_data('cluster/srvnode-0/storage/cvg')
            if not cvg_list or cvg_list is MISSED:
                cvg_list = []
            elif isinstance(cvg_list[0], OrderedDict):
                for i, key in enumerate(cvg_list):
                    cvg_list[i] = dict(key)
            if data_devices:
                self.logger.debug(f"data_devices: {data_devices}")
                for device in data_devices:
                    try:
                        cmd_run(f"ls {device}", targets=node_id)
                    except:
                        raise ValueError(
                            f"Validation for data device {device} failed\n"
                            "Please provide the correct device")
            if metadata_devices:
                self.logger.debug(f"metadata_devices: {metadata_devices}")
                for device in metadata_devices:
                    try:
                        cmd_run(f"ls {device}", targets=node_id)
                    except:
                        raise ValueError(
                            f"Validation for data device {device} failed\n"
                            "Please provide the correct device")
            cvg_list.insert(
                current_cvg_count, {
                    'name': cvg_name,
                    'data_devices': data_devices,
                    'metadata_devices': metadata_devices
                })
            cvg_count = current_cvg_count + 1
            self.update_pillar_and_conf('cvg', str(cvg_count))
            self.update_pillar_and_conf('cvg_devices', cvg_list)

        Conf.save('node_info_index')
        self.logger.debug("Done")