Ejemplo n.º 1
0
 def set_server_time(self):
     """Sets time on the server"""
     StatesApplier.apply(
         [
             "components.system.chrony.config",
             "components.system.chrony.stop",
             "components.system.chrony.start"
         ],
         targets= ALL_MINIONS
     )
Ejemplo n.º 2
0
    def run(self, targets=ALL_MINIONS, **kwargs):
        try:
            self.logger.debug("Refresh enclosure ID")

            for state in [
                'components.system.storage.enclosure_id',
                'components.system.config.sync_salt'
            ]:
                StatesApplier.apply([state], targets, **kwargs)

        except Exception as exc:
            self.logger.error(
               f"Error in refreshing enclosure ID. Reason: '{str(exc)}'"
            )
Ejemplo n.º 3
0
    def get_enc_id(self, targets=None, force=False):

        self.logger.info("Getting enclosure ID")
        _enc_id_file = Path(self._enclosure_id_file_path)

        if _enc_id_file.exists() and force is False:
            self.logger.info(
            f"Enclosure ID is already generated at {self._enclosure_id_file_path}"
            )
            return self.fetch_enc_id(targets)
        else:
            self.logger.info("Generating the enclosure ID")
            try:
                _set_enclosure_id_state = "components.system.storage.enclosure_id.config.set"
                StatesApplier.apply([_set_enclosure_id_state], targets)
                return self.fetch_enc_id(targets)
            except:
                raise exception("Error generating the enclosure ID")
Ejemplo n.º 4
0
    def run(self, targets=ALL_MINIONS, **kwargs):

        self.provisioner = provisioner
        if 'username' in kwargs:
            self.provisioner.auth_init(kwargs['username'], kwargs['password'])

        try:
            self.logger.debug("Encrypting config data")

            for state in [
                'components.system.config.pillar_encrypt',
                'components.system.config.sync_salt'
            ]:
                StatesApplier.apply([state], targets, **kwargs)

        except Exception as exc:
            self.logger.error(
               f"Error in data encryption. Reason: '{str(exc)}'"
            )
Ejemplo n.º 5
0
    def run(self, **kwargs):

        self.provisioner = provisioner
        if 'username' in kwargs:
            self.provisioner.auth_init(kwargs['username'], kwargs['password'])

        try:
            self.logger.debug("Generating cluster pillar")

            StatesApplier.apply([
                'components.provisioner.config.generate_cluster_pillar',
                'components.system.config.sync_salt'
            ],
                                targets=ALL_MINIONS,
                                **kwargs)

            self.logger.debug("Refreshing config")
            pillar_refresh(targets=ALL_MINIONS, **kwargs)

        except Exception as exc:
            self.logger.error(
                f"Error in cluster generation. Reason: '{str(exc)}'")
Ejemplo n.º 6
0
    def run(self, **kwargs):

        self.provisioner = provisioner
        if 'username' in kwargs:
            self.provisioner.auth_init(kwargs['username'], kwargs['password'])

        self.logger.debug("Updating pillar data")
        for pillar in config.local_pillars:
            res_pillar = {}
            res = cmd_run(f"salt-call --local pillar.get {pillar} --out=json",
                          **kwargs)
            for key, value in res.items():
                value = json.loads(value)
                value = value['local']
                if pillar == 'cluster' and value.get('srvnode-0'):
                    value[key] = value.pop('srvnode-0')
                if pillar == 'storage' and value.get('enclosure-0'):
                    enc_num = key.split('-')
                    value[f'enclosure-{enc_num[1]}'] = value.pop('enclosure-0')
                res_pillar.update(value)
            self.logger.info(f"Updating {pillar} pillar data")
            self.provisioner.pillar_set(f'{pillar}', res_pillar)
        conf_path = str(PRVSNR_FACTORY_PROFILE_DIR / 'confstore')
        # backup local consftore data
        self.logger.debug(f"Copy local confstore file to {conf_path}")
        conf_create = 'components.provisioner.confstore_create'
        StatesApplier.apply([conf_create], targets=local_minion_id(), **kwargs)

        conf_copy = 'components.provisioner.confstore_copy'
        StatesApplier.apply([conf_copy])
        # backup local pillar data
        cmd_run(f"rm -rf {PRVSNR_DATA_ROOT_DIR}/.backup ", **kwargs)
        cmd_run(f"mkdir -p {PRVSNR_DATA_ROOT_DIR}/.backup", **kwargs)
        cmd_run(
            f"mv {PRVSNR_USER_LOCAL_PILLAR_DIR}/* "
            f"{PRVSNR_DATA_ROOT_DIR}/.backup/", **kwargs)
Ejemplo n.º 7
0
    def run(self, **kwargs):

        node_id = local_minion_id()
        firewall_pillar_sls = Path(f"{PRVSNR_PILLAR_DIR}/components/firewall.sls")

        self.logger.debug(f"updating firewall config on {node_id}")

        try:
            self.logger.debug(f"loading firewall configuration")
            firewall_config_arg = kwargs.get('config')
            self.load_conf_store('index1', firewall_config_arg)
            firewall_conf = {'firewall': Conf.get('index1','firewall') }
            dump_yaml(firewall_pillar_sls, dict(firewall_conf))

            function_run('saltutil.refresh_pillar', targets=node_id)

            self.logger.debug(f"Applying 'components.system.firewall' on {node_id}")
            StatesApplier.apply(
                ["components.system.firewall"],
                local_minion_id()
            )
        except Exception as ex:
            raise ex
        self.logger.debug("Done")
Ejemplo n.º 8
0
 def upgrade_sw(self, sw, sw_data, targets):
     logger.info(f"Upgrading/Installing '{sw}' on '{targets}'")
     # FIXME hard-coded
     StatesApplier.apply([f"{sw_data['base_sls']}.install"], targets)
Ejemplo n.º 9
0
 def _ensure_upgrade_repos_configuration():
     StatesApplier.apply(['repos.upgrade'], local_minion_id())
Ejemplo n.º 10
0
    def run(self, **kwargs):
        """
        cortx cluster config command

        Bootstrap system, deploy cortx components

        Execution:
        `cortx_setup cluster create [nodes_fqdn] --name <cluster_name>`

        """
        try:
            self.provisioner = provisioner
            try:
                username = os.getenv('SUDO_USER') if os.getenv(
                    'SUDO_USER') else os.getenv('USER')
            except Exception as ex:
                raise ex
            if username != 'root':
                password = getpass(
                    prompt=f"Enter {username} user password for current node:")
                auth_args = {'username': username, 'password': password}
                self.provisioner.auth_init(username, password)
            else:
                auth_args = {}

            index = 'cluster_info_index'
            local_minion = None
            local_fqdn = socket.gethostname()
            cluster_args = [
                'name', 'site_count', 'storageset_count', 'virtual_host'
            ]

            # Ref: `nodes` will be removed from this args list.
            # Read more on https://github.com/Seagate/cortx-prvsnr/tree/pre-cortx-1.0/docs/design_updates.md#field-api-design-changes
            nodes = kwargs['nodes']
            target_build = kwargs['target_build']
            source_type = kwargs['source']

            self.logger.debug("Checking for basic details in place.")
            # Parsing nodes
            for idx, node in enumerate(nodes):
                if node == local_fqdn:
                    nodes[idx] = f"srvnode-1:{username}@{node}"
                    local_minion = 'srvnode-1'
                else:
                    nodes[idx] = f"srvnode-{idx+1}:{username}@{node}"

            # HA validation
            if len(nodes) > 1:
                kwargs['ha'] = True

            if target_build:
                if not target_build.startswith('http'):
                    raise ValueError(
                        f"Invalid target build provided: {target_build}"
                        " Please provide the valid http or https URL.")
                # target_build and source type iso are mutually exclusive
                if source_type == 'iso':
                    raise TypeError(
                        "The target_build option and the 'source' type "
                        "'iso' are not supported together."
                        " Please run the command with correct options.")
            else:
                # read target build from a file created during factory setup
                tbuild_path = "/opt/seagate/cortx_configs/provisioner_generated/target_build"
                self.logger.info("Fetching the Cortx build source")
                if not os.path.isfile(tbuild_path):
                    raise ValueError(
                        f"The file with Cortx build source"
                        f" doesn't exist: '{tbuild_path}'"
                        f" Please use the --target_build option to"
                        f" provide the correct build URL.")
                with open(tbuild_path, "r") as fh:
                    target_build = fh.readline().strip()

                if not target_build:
                    raise ValueError("Could not find the Cortx build source."
                                     " Please use the --target_build option to"
                                     " provide the build url")

                kwargs['target_build'] = target_build
                # The target build could be a file uri or http url
                # If it's file uri set the source to iso and target_build
                # to None.
                if target_build.startswith('file'):
                    #ISO based deployment
                    kwargs['source'] = 'iso'
                    kwargs['target_build'] = None
                elif not target_build.startswith('http'):
                    raise ValueError(
                        f"Invalid build source found: {target_build}"
                        " Please use --target_build or iso options to"
                        " to provide the correct build source.")

            # ISO files validation
            if kwargs['source'] == 'iso':
                if kwargs['iso_cortx'] and kwargs['iso_os']:
                    ISO_SINGLE_FILE = kwargs['iso_cortx']
                    ISO_OS_FILE = kwargs['iso_os']
                else:
                    self.logger.info("Checking the Cortx ISO files")
                    iso_files = [
                        fn for fn in os.listdir(CORTX_ISO_PATH)
                        if fn.endswith('.iso')
                    ]
                    for name in iso_files:
                        if "single" in name:
                            ISO_SINGLE_FILE = str(CORTX_ISO_PATH) + "/" + name
                        elif "os" in name:
                            ISO_OS_FILE = str(CORTX_ISO_PATH) + "/" + name
                    kwargs['iso_cortx'] = ISO_SINGLE_FILE
                    kwargs['iso_os'] = ISO_OS_FILE

                self.logger.info("Validating the Cortx ISO files")
                if not (os.path.isfile(ISO_SINGLE_FILE)
                        or os.path.isfile(ISO_OS_FILE)):
                    raise ValueError(
                        f"No Cortx ISOs found: "
                        f"{ISO_SINGLE_FILE} & {ISO_OS_FILE}, please"
                        " keep the ISOs at /opt/isos and try again.")

            cluster_dict = {
                key: kwargs[key]
                for key in kwargs if key in cluster_args
            }

            for arg in cluster_args:
                kwargs.pop(arg)

            self.logger.info(
                "Initial checks done. \n"
                "This step will take several minutes.. Follow logs for progress.\n"
                f"Starting bootstrap process now with args: {kwargs}")
            self.provisioner.bootstrap_provisioner(**kwargs)
            salt._local_minion_id = local_minion
            if SOURCE_PATH.exists():
                self.logger.debug(
                    "Cleanup existing storage config on all nodes")
                cmd_run(f"mv {SOURCE_PATH} {DEST_PATH}", **auth_args)
                self.logger.debug("Refreshing config")
                cmd_run("salt-call saltutil.refresh_pillar", **auth_args)

            self.logger.info(
                "Bootstrap Done. Starting with preparing environment. "
                "Syncing config data now..")
            PillarSync().run(**auth_args)

            self.logger.debug("Generating cluster")
            GenerateCluster().run(**auth_args)

            self.logger.debug("Creating service user")
            self.provisioner.create_service_user(user="******")

            node_id = 'srvnode-1'
            self.logger.debug("Setting up Cluster ID on the system")
            self.provisioner.cluster_id(targets=node_id)

            self.logger.debug("Encrypting config data")
            EncryptSecrets().run(**auth_args)

            self.logger.debug("Refreshing enclosure id on the system")
            RefreshEnclosureId().run(**auth_args)

            # NTP workaround.
            # TODO: move this to time.py after encryption issue
            self.logger.debug("Setting time on node with server & timezone")

            StatesApplier.apply([
                "components.system.chrony.install",
                "components.system.chrony.config",
                "components.system.chrony.stop",
                "components.system.chrony.start"
            ],
                                targets=ALL_MINIONS,
                                **auth_args)

            machine_id = self.provisioner.grains_get(
                "machine_id")[node_id]["machine_id"]
            enclosure_id = self.provisioner.grains_get(
                "enclosure_id")[node_id]["enclosure_id"]
            if enclosure_id:
                if not machine_id in enclosure_id:  # check if the system is VM or HW
                    self.logger.debug(
                        f"Setting time on enclosure with server & timezone")
                    StatesApplier.apply(["components.controller.ntp"],
                                        targets=ALL_MINIONS,
                                        **auth_args)
            StatesApplier.apply(['components.system.config.sync_salt'],
                                targets=ALL_MINIONS,
                                **auth_args)

            self.logger.info(
                "Environment set up! Proceeding to create a cluster..")

            if 'username' in auth_args:
                cmd_run(
                    f"chown -R {auth_args['username']}:{auth_args['username']} {CONFSTORE_CLUSTER_FILE}",
                    **auth_args)
            self.load_conf_store(index, f'json://{CONFSTORE_CLUSTER_FILE}')
            clust_id = self.provisioner.grains_get(
                "cluster_id")[node_id]["cluster_id"]

            for key, value in cluster_dict.items():
                if value and 'virtual_host' not in key:
                    self.logger.debug(
                        f"Updating {key} to {value} in confstore")
                    self.provisioner.pillar_set(f'cluster/{key}', value)
                    if 'storageset_count' in key:
                        conf_key = f'cluster>{clust_id}>site>storage_set_count'
                    else:
                        conf_key = f'cluster>{clust_id}>{key}'
                    Conf.set(index, conf_key, value)
                if value and 'virtual_host' in key:
                    self.logger.debug(
                        f"Updating virtual_host to {value} in confstore")
                    self.provisioner.pillar_set('cluster/mgmt_vip', value)
                    Conf.set(
                        index,
                        f'cluster>{clust_id}>network>management>virtual_host',
                        value)
            Conf.save(index)

            self.logger.debug("Exporting to Confstore")
            self.provisioner.confstore_export()

            self.logger.debug("Success: Cluster created")
            return f"Cluster created with node(s): {nodes}"

        except ValueError as exc:
            raise ValueError(f"Cluster Create Failed. Reason: {str(exc)}")
Ejemplo n.º 11
0
    def run(self, targets=ALL_MINIONS):
        """cluster_id assignment

        Execution:
        `provisioner cluster_id`
        Takes no mandatory argument as input.
        Executed only on primary node.

        """
        try:
            node_role = grains_get(
                "roles",
                local_minion_id()
            )[local_minion_id()]["roles"]            # displays as a list

            cluster_id_from_pillar = self._get_cluster_id()

            if node_role[0] != "primary":
                logger.info(
                     f"Role of current node: '{node_role[0]}'."
                )
                cluster_id_from_setup = self._initial_check(
                                        node_role[0],
                                        cluster_id_from_pillar)

            else:
                logger.debug("This is the Primary node of the cluster.")

                if not cluster_id_from_pillar:
                    logger.debug(
                       "ClusterID not set in pillar data. "
                       "Checking setup file.."
                    )

                # double verification
                cluster_id_from_setup = self._initial_check(
                                        node_role[0],
                                        cluster_id_from_pillar)

                if cluster_id_from_setup == cluster_id_from_pillar:
                    logger.debug(
                      "A unique ClusterID is already set!"
                    )

                elif (cluster_id_from_pillar and
                            cluster_id_from_setup != cluster_id_from_pillar):
                    logger.warning(
                       "Mismatch in cluster_id value between "
                       "setup and pillar data. Setting unique value now.."
                       "\nPossible warning: Check if cluster values "
                       "have been manually tampered with."
                    )

                PillarSet().run(
                    'cluster/cluster_id',
                    f'{cluster_id_from_setup}',
                    targets=ALL_MINIONS
                )

                # Ensure cluster-id file is created in all nodes
                StatesApplier.apply(
                       ['components.provisioner.config.cluster_id',
                        'components.system.config.sync_salt'
                       ],
                       targets=ALL_MINIONS
                )

            return f"cluster_id: {cluster_id_from_setup}"

        except Exception as exc:
            raise ValueError(
                "Failed: Encountered error while setting "
                f"cluster_id to Pillar data: {str(exc)}"
            )
Ejemplo n.º 12
0
    def run(self,
            hostname=None,
            network_type=None,
            gateway=None,
            netmask=None,
            ip_address=None,
            dns_servers=None,
            search_domains=None):
        """Network prepare execution method.

        Execution:
        `cortx_setup node prepare network --hostname <hostname>`
        `cortx_setup node prepare network --type <type> --ip_address <ip_address>
                --netmask <netmask> --gateway <gateway> --dns_servers <dns server>
		--search_domains <search domains>`
        """

        node_id = local_minion_id()
        machine_id = get_machine_id(node_id)
        self.load_conf_store('node_prepare_index',
                             f'json://{CONFSTORE_CLUSTER_FILE}')

        if hostname is not None:
            self.logger.debug(f"Setting up system hostname to {hostname}")
            try:
                set_hostname(hostname=hostname, local=True)
                Conf.set('node_prepare_index',
                         f'server_node>{machine_id}>hostname', hostname)
            except Exception as ex:
                raise ex

        if network_type is not None:

            server_type = function_run('grains.get',
                                       fun_args=['virtual'],
                                       targets=node_id)[f'{node_id}']
            if not server_type:
                raise Exception("server_type missing in grains")
            mtu = '1500' if server_type == 'virtual' or network_type == 'management' else '9000'

            config_method = 'Static' if ip_address else 'DHCP'
            self.logger.debug(
                f"Configuring {network_type} network using {config_method} method"
            )

            try:
                if network_type == 'management':
                    set_mgmt_network(mgmt_public_ip=ip_address,
                                     mgmt_netmask=netmask,
                                     mgmt_gateway=gateway,
                                     mgmt_mtu=mtu,
                                     local=True)
                elif network_type == 'data':
                    set_public_data_network(data_public_ip=ip_address,
                                            data_netmask=netmask,
                                            data_gateway=gateway,
                                            data_mtu=mtu,
                                            local=True)
                elif network_type == 'private':
                    set_private_data_network(data_private_ip=ip_address,
                                             data_mtu=mtu,
                                             local=True)
            except Exception as ex:
                raise ex

            if config_method == 'Static':
                self.update_network_confstore(network_type=network_type,
                                              key='private_ip' if network_type
                                              == 'private' else 'public_ip',
                                              value=ip_address,
                                              target=node_id)
                self.update_network_confstore(network_type=network_type,
                                              key='netmask',
                                              value=netmask,
                                              target=node_id)
                self.update_network_confstore(network_type=network_type,
                                              key='gateway',
                                              value=gateway,
                                              target=node_id)
        for key, val in {
                'dns_servers': dns_servers,
                'search_domains': search_domains
        }.items():
            if val:
                self.logger.debug(f"Setting up system {key} to {val}")
                PillarSet().run(f'cluster/{key}', val, local=True)

        function_run('saltutil.refresh_pillar', targets=node_id)
        if dns_servers and search_domains:
            StatesApplier.apply(["components.system.network.resolv_conf"],
                                local_minion_id())

        Conf.save('node_prepare_index')
        # call state applyer if dns n search doma

        self.logger.debug("Done")
Ejemplo n.º 13
0
    def run(self, **kwargs):
        reset_type = kwargs.get('type')
        cortx_components = get_reset_states()
        non_cortx_components = get_pillar_data('reset/non_cortx_components')
        system_components = get_pillar_data('reset/system_components')
        self.node_list = get_cluster_nodes()

        self.logger.debug(f"Reset to be done for type is {reset_type}")

        self.logger.debug("Stopping the cluster")
        self.cluster_stop()

        self._create_saltssh_client()

        if reset_type == 'data':
            self.logger.info("Calling reset for cortx components")
            cortx_components = get_reset_states()
            self._destroy(cortx_components, stage=["teardown.reset"])

            self.logger.info("Cleaning up provisioner logs and metadata")
            StatesApplier.apply(["components.provisioner.teardown.reset"],
                                targets=ALL_MINIONS)

            self.logger.info("starting the cluster")
            self.cluster_start()
            self.logger.info("Done")

        elif reset_type == 'all':
            self.logger.debug(
                "Performing post Factory reset for Cortx components.")
            self._destroy(cortx_components,
                          stage=["teardown.reset", "teardown.cleanup"])

            self.logger.debug("Preparing Reset for non-Cortx components")

            self._destroy(non_cortx_components, stage=["reset"])
            self.logger.debug("Preparing Reset for system components")

            self._destroy(system_components, stage=["teardown"])
            self.logger.debug("Preparing Reset for Provisioner commands")

            provisioner_components = [
                "system.reset", "provisioner.salt.stop",
                "system.storage.glusterfs.teardown.volume_remove",
                "system.storage.glusterfs.teardown.stop",
                "system.storage.glusterfs.teardown.remove_bricks",
                "system.storage.glusterfs.teardown.cache_remove"
            ]

            self._apply_states(provisioner_components, self.node_list)

            self.logger.debug("Removing cluster id file")
            self._run_cmd(
                ['chattr -i /etc/cluster-id', 'rm -rf /etc/cluster-id'],
                self.node_list)
            self._run_cmd([
                'mkdir -p /var/lib/seagate/cortx/provisioner/local/srv/pillar/groups/all',
                'mkdir -p /var/lib/seagate/cortx/provisioner/shared/locks'
            ], self.node_list)

            self.logger.debug("Performing provisioner cleanup")
            self._run_cmd(
                list(map(lambda el: 'rm -rf ' + str(el), CLEANUP_FILE_LIST)),
                self.node_list)

            self.logger.debug("Restoring provisioner backed-up files")
            for key, val in RESTORE_FILE_DICT.items():
                self._run_cmd([f'yes | cp -rf {str(key)} {str(val)}'],
                              self.node_list)

            self.logger.debug("Configuring salt at node level")
            self._run_cmd([f'sh {salt_basic_config_script}'], self.node_list)

            # self._run_cmd(
            #     ['systemctl restart salt-minion salt-master'],
            #     self.node_list)

            # # This is bit risky
            # self._run_cmd(
            #     [f'yes | cp -rf {BACKUP_FILE_DICT}/hosts /etc/hosts'])
            # self._run_cmd(['rm -rf /root/.ssh'], self.node_list)
            self.logger.debug("Done")
Ejemplo n.º 14
0
 def set_enclosure_time(self):
     """Sets time on the enclosure"""
     StatesApplier.apply(
         [ "components.controller.ntp" ],
         targets= ALL_MINIONS
     )