Beispiel #1
0
    def _validate_interfaces(self, node_id):
        self.logger.debug("Validating network interfaces check")
        mgmt = get_pillar_data(f'cluster/{node_id}/network/mgmt/interfaces')
        private_data = get_pillar_data(
            f'cluster/{node_id}/network/data/private_interfaces')  # noqa: E501
        public_data = get_pillar_data(
            f'cluster/{node_id}/network/data/public_interfaces')  # noqa: E501
        if not mgmt or mgmt is MISSED:
            raise CortxSetupError("Mgmt interfaces are not provided")
        if not private_data or private_data is MISSED:
            raise CortxSetupError("Private data interfaces are not provided")
        if not public_data or public_data is MISSED:
            raise CortxSetupError("Public data interfaces are not provided")

        for interface in mgmt:
            if interface in private_data or interface in public_data:
                raise CortxSetupError(
                    "Same interface provided for mgmt and data")

        for interface in private_data:
            if interface in public_data:
                raise CortxSetupError(
                    f"Same interface provided for public_data "
                    f"{public_data}& private_data: {private_data}")
        interfaces(mgmt + private_data + public_data)
        self.logger.debug("Network interfaces check: Success")
Beispiel #2
0
    def run(self, **kwargs):

        if kwargs["health"]:
            # get resource_map
            # resource_map_path =
            # ResourceDiscover.get_resource_map(resource_type =
            # kwargs['resource_type'])
            resource_path = get_pillar_data(HEALTH_PATH)
            resource_dict = ResourceShow.parse_resource_file(resource_path)

            if kwargs['resource_type']:
                resource_dict = self.filter_resource_type(
                    kwargs['resource_type'], resource_dict)

            if kwargs['resource_state']:
                resource_dict = self.resource_filter_status(
                    kwargs['resource_state'], resource_dict)

            self.logger.info(json.dumps(resource_dict, indent=4))
            # return json.dumps(resource_dict, indent=4)

        elif kwargs["manifest"]:
            manifest_path = get_pillar_data(MANIFEST_PATH)
            manifest_dict = ResourceShow.parse_resource_file(manifest_path)

            if kwargs['resource_type']:
                manifest_dict = self.filter_resource_type(
                    kwargs['resource_type'], manifest_dict)

            self.logger.info(json.dumps(manifest_dict, indent=4))
            # return json.dumps(manifest_dict, indent=4)

            self.logger.debug("discover HW/SW Manifest for all resources")
        self.logger.debug("Done")
    def validate_credentials(self, user=None, passwd=None):
        """
            Compares user and password values provided by the user as input to
            that of the user and password values present in the pillar.
        """

        self.logger.debug(
            f"Validating the provided credentials ({user}/{passwd})"
        )
        _ctrl_user = get_pillar_data(
            f'storage/{enc_num}/controller/user'
        )
        _ctrl_secret_enc = get_pillar_data(
            f'storage/{enc_num}/controller/secret'
        )

        # Decrypt the secret read from the storage pillar

        if not enc_id_on_node:
            self.logger.error(
                "Could not fetch enclosure_id id from pillar"
            )
            raise ValueError("Enclosure ID is not set on node")

        _ctrl_secret = self.decrypt_passwd(_ctrl_secret_enc)

        _user_check = True
        _passwd_check = True

        if user != _ctrl_user:
            self.logger.warning(
                f"Username provided {user} does not match"
                " with the user name in configuration"
            )
            _user_check = False
        else:
            self.logger.debug(
                f"Username provide {user} matches with"
                " the user name in configuration"
            )

        if passwd != _ctrl_secret:
            self.logger.warning(
                f"The password provided ({passwd}) does not match"
                " with the password in configuration"
            )
            _passwd_check = False
        else:
            self.logger.debug(
                f"Password provided ({passwd}) matches with"
                " the password in configuration"
            )

        if not _user_check or not _passwd_check:
            return False

        return True
Beispiel #4
0
    def run(self, force=False):
        try:
            node_id = local_minion_id()
            self._validate_cert_installation()
            server_type = get_pillar_data(f'cluster/{node_id}/type')
            if server_type == 'HW':
                self._validate_health_map()
            self._validate_interfaces(node_id)
            server_type = self._validate_server_type(node_id)
            self._validate_devices(node_id, server_type)

            self.logger.info(
                "Node validations complete. Creating Field users..")
            self._create_field_users(node_id)
            self.logger.debug("Field users created.")
            self.logger.debug("Setting up Cron job")
            self.create_cron_job(SUPPORT_USER_NAME, SUPPORT_CRON_TIME)
            self._factory_backup()

        except CortxSetupError as exc:
            if force:
                self.logger.info(
                    f"One or more node validation(s) failed: {str(exc)}.\n"
                    "Forcibly creating users..")
                self._create_field_users(node_id)
                self.logger.info(
                    "Field users created. Check logs for more details on the validations error.."
                )
                self.logger.debug("Setting up Cron job")
                self.create_cron_job(SUPPORT_USER_NAME, SUPPORT_CRON_TIME)
                self._factory_backup()
            else:
                raise
Beispiel #5
0
 def _validate_server_type(self, node_id):
     self.logger.debug("Validating server type check")
     server_type = get_pillar_data(f'cluster/{node_id}/type')
     if not server_type or server_type is MISSED:
         raise CortxSetupError("Server type is not provided")
     self.logger.debug("Server type check: Success")
     return server_type
Beispiel #6
0
 def _validate_health_map(self):
     self.logger.debug("Validating node health check")
     resource_paths = [HEALTH_PATH, MANIFEST_PATH]
     for resource in resource_paths:
         path = get_pillar_data(resource)
         if not path or path is MISSED:
             raise CortxSetupError(f"{resource} resource is not configured")
         path = path.split('://')[1]
         if not Path(path).is_file():
             raise CortxSetupError(f"Validation failed: "
                                   f"File not present {path}")
     self.logger.debug("Node health check: Success")
Beispiel #7
0
    def _validate_devices(self, node_id, s_type):
        self.logger.debug("Validating cvg devices check")
        cvgs = get_pillar_data(f'cluster/{node_id}/storage/cvg')
        if not cvgs or cvgs is MISSED:
            raise CortxSetupError("Devices are not provided")
        for cvg in cvgs:
            meta_data = cvg.get('metadata_devices')
            data = cvg.get('data_devices')
            if not meta_data or not data:
                raise CortxSetupError("metadata or data devices are missing")

            for meta in meta_data:
                if meta in data:
                    raise CortxSetupError(
                        f"{meta} is common in metadata and data device list")
            disk_devices(s_type, data + meta_data)
        self.logger.debug("Cvg devices check: Success")
Beispiel #8
0
    def run(self, storage_set_name=None, server_node=None):
        try:
            index = 'storage_add_index'
            cluster_id = get_cluster_id()
            machine_id = []

            self.load_conf_store(index, f'json://{CONFSTORE_CLUSTER_FILE}')

            try:
                storageset = Conf.get(index,
                                      f'cluster>{cluster_id}>storage_set')
                storage_set_len = len(storageset)
            except Exception:
                self.logger.debug(
                    "No storage-set found, setting storage_set_len to 0")
                storage_set_len = 0

            if storage_set_len == 0:
                self.logger.debug("storage-set object is empty")
                raise Exception(
                    f"Error: Storage-set {storage_set_name} does not exist."
                    " Use command - cortx_setup storageset create")

            ss_found = False
            for ss_index in range(0, storage_set_len):
                ss_name = Conf.get(
                    index,
                    f'cluster>{cluster_id}>storage_set[{ss_index}]>name')
                if ss_name == storage_set_name:
                    ss_found = True
                else:
                    continue

            if ss_found == False:
                self.logger.debug(
                    f"Cannot find storage-set: {storage_set_name}")
                raise Exception(
                    f"Error: Storage-set {storage_set_name} does not exist."
                    " Use command - cortx_setup storageset create")

            node_count = get_pillar_data("cluster/storage_set/count")

            # TODO: Addnl validation needed. Change server_node from list
            # to string and allow only one node to be updated at a time?

            input_nodes_count = len(server_node)

            if input_nodes_count > node_count:
                raise ValueError(
                    f"Invalid count: {input_nodes_count} number of nodes received. "
                    f"Given Storageset can accept a maximum of {node_count} nodes. "
                    "Update it with `cortx_setup storageset create` command.")

            # Get corresponding machine-id of each node
            for node in server_node:
                machine_id.append(get_machine_id(node))

            self.logger.debug(
                f"Adding machine_id '{machine_id}' to storage-set "
                f"'{storage_set_name}' in ConfStore.")

            PillarSet().run('cluster/storage_set/server_nodes', machine_id)
            Conf.set(
                index,
                f'cluster>{cluster_id}>storage_set[{ss_index}]>server_nodes',
                machine_id)

            for node in server_node:
                machine_id = get_machine_id(node)
                self.logger.debug(
                    f"Adding storage set ID:{storage_set_name} to "
                    f"server {node} with machine id: {machine_id}")
                Conf.set(index, f'server_node>{machine_id}>storage_set_id',
                         storage_set_name)

            Conf.save(index)
            self.logger.debug(f"Server nodes {server_node} with correspoding "
                              f"machine_ids added to Storageset")

        except ValueError as exc:
            raise ValueError(
                f"Failed to add node to storageset. Reason: {str(exc)}")
Beispiel #9
0
    def run(self, **kwargs):
        # valid combinations for cortx_setup storage config
        # Hardware
        # 1.  --controller galium --mode primary --ip <> --port <> --user <> --password <>
        # 2.  --name enc_rack1 --type RBOD
        # 3.  --mode primary --ip <> --port <>
        # 4.  --user <> --password
        # 5.  --controller galium
        # 6.  --cvg dg01 --data-devices /dev/sdb,/dev/sdc --metadata-devices /dev/sdd
        # VM
        # 1.  --controller virtual --mode primary --ip <> --port <> --user <> --password <>
        # 2.  --name virtual_rack1 --type virtual
        # 3.  --cvg dg02 --data-devices /dev/sdb,/dev/sdc --metadata-devices /dev/sdd

        user = kwargs.get('user')
        password = kwargs.get('password')
        ip = kwargs.get('ip')
        port = kwargs.get('port')

        name = kwargs.get('name')
        storage_type = kwargs.get('type')
        controller_type = kwargs.get('controller')
        self.mode = kwargs.get('mode')
        cred_validation = False
        cvg_name = kwargs.get('cvg')
        data_devices = []
        input_data_devices = kwargs.get('data_devices')
        if input_data_devices:
            data_devices = [
                device for device in input_data_devices.split(",")
                if device and len(device) > 1
            ]
        metadata_devices = []
        input_metadata_devices = kwargs.get('metadata_devices')
        if input_metadata_devices:
            metadata_devices = [
                device for device in input_metadata_devices.split(",")
                if device and len(device) > 1
            ]

        if (data_devices or metadata_devices) and not cvg_name:
            self.logger.exception(
                "argument cvg is must to set data and metadata devices")
            raise RuntimeError('Please provide cvg using --cvg option')

        self.machine_id = get_machine_id(node_id)
        self.refresh_key_map()

        Conf.load('node_info_index', f'json://{prvsnr_cluster_path}')

        setup_type = Conf.get('node_info_index',
                              f'server_node>{self.machine_id}>type')

        if setup_type == None:
            self.logger.error("Setup type is not set, please set the"
                              " setup type and try again")
            self.logger.error("Run following command to set the setup type"
                              ": 'cortx_setup server config type <VM|HW>'")
            raise RuntimeError("Could not find the setup type in conf store")

        if self.enclosure_id is None:
            self.enclosure_id = get_pillar_data(pillar_key_map['enclosure_id'])
            if self.enclosure_id is MISSED:
                self.enclosure_id = None
            self.refresh_key_map()
            self.logger.debug(f"enclosure id: {self.enclosure_id}")
            if self.enclosure_id is None and setup_type == "VM":
                self.enclosure_id = "enc_" + self.machine_id
                self.refresh_key_map()
                self.store_in_file()
                self.update_pillar_and_conf('enclosure_id', self.enclosure_id)

        ### THE "mode" SHOULD ALWAYS BE THE FIRST CHECK, DO NOT CHANGE THIS SEQ ###
        if self.mode is not None:
            if ip is None or port is None:
                # mandetory sub options for mode (ip and port) are missing.
                self.logger.exception(
                    f"Mandatory sub options for mode- ip & password are missing"
                )
                raise RuntimeError('Please provide ip and port')
            # Algorithm to update ip and port the enclosure id is needed.
            # if enclosure_id and user and password
            #    reset the enclosure id fetched from confstore
            #    this is to force fetch the enclosure if with
            #    current set of input parameters - user, passwd, ip, port.
            # if not self.enclosure_id:
            #     if hw:
            #         if user and password:
            #             #fetch enclosure id and store in confstore
            #         else:
            #             #error
            # if self.enclosure_id:
            #     # store ip and port in confstore
            #     if user and password:
            #         # store in confstore
            if (self.enclosure_id is not None and user is not None
                    and password is not None and setup_type == "HW"):
                # user has provided all the parameters that fetches the
                # enclosure id, so reset the enclosure id read from the
                # confstore and fetch it again with the current set of
                # parameters
                self.enclosure_id = None

            if self.enclosure_id is None:
                if setup_type == "HW":
                    # Fetch enclosure id if the user & password are also provided
                    if (user != None and password != None):
                        self.enclosure_id = EnclosureInfo(
                            ip, user, password, port).fetch_enclosure_serial()
                        if self.enclosure_id:
                            # store enclosure_id in /etc/enclosure-id
                            self.store_in_file()
                            self.refresh_key_map()
                            self.update_pillar_and_conf(
                                'enclosure_id', self.enclosure_id)
                            cred_validation = True
                        else:
                            self.logger.exception(
                                "Could not fetch the enclosure id")
                            raise RuntimeError(
                                'Please check if credentials, ip & port provided'
                                ' are correct.')
                    else:
                        self.logger.exception(
                            "Could not update ip and port in Cortx configuration"
                            " without enclosure id. Please provide user, password,"
                            " ip and port together to fetch the enclosure id from"
                            " attached enclosure.")
                        raise RuntimeError(
                            'Incomplete set of arguments provided')
            if self.enclosure_id is not None:
                if setup_type == "VM":
                    self.logger.warning("WARNING: This is VM")
                    self.logger.warning(
                        "WARNING: Adding ip and port in confstore without validation"
                    )
                if setup_type == "HW" and not cred_validation:
                    self.logger.warning(
                        "WARNING:  Adding ip and port in confstore without"
                        " validation To force the validation, please run:"
                        " cortx_setup storage config --controller <type>"
                        " --mode primary --ip <ip> --port <port>"
                        " --user <user> --password <password>")
                self.update_pillar_and_conf('ip', ip)
                self.update_pillar_and_conf('port', port)
            else:
                self.logger.exception(
                    "Could not update ip and port without enclosure id"
                    "Please provide user, password, ip & port together")
                raise RuntimeError('Incomplete set of arguments provided')

        if user is not None or password is not None:
            if (user is None) or (password is None):
                self.logger.error(
                    f"Please provide 'user' and 'passowrd' together")
                raise RuntimeError("Imcomplete arguments provided")
            if self.enclosure_id is not None and setup_type == "VM":
                self.logger.warning("WARNING: This is VM")
                self.logger.warning(
                    "WARNING: Adding user and password in confstore"
                    " without validation")
                self.update_pillar_and_conf('user', user)
                self.update_pillar_and_conf('password', password)
            elif self.enclosure_id is not None and setup_type == "HW":
                # Store user and password only after validation
                # Skip the validation if enclosure id was fetched
                #  using the same credentials
                if not cred_validation:
                    # Read ip & port from Pillar and validate by logging
                    # in to enclosure with user, passwd, ip and port
                    self.logger.debug(
                        "Validating the user and password provided")
                    host_in_pillar = get_pillar_data(
                        f"storage/{enc_num}/controller/primary/ip")
                    port_in_pillar = get_pillar_data(
                        f"storage/{enc_num}/controller/primary/port")
                    if not host_in_pillar or not port_in_pillar:
                        self.logger.error(
                            f"Could not read controller ip and secret from pillar"
                        )
                        raise RuntimeError(
                            "Could not validate user and password")
                    valid_connection_check = EnclosureInfo(
                        host_in_pillar, user, password,
                        port_in_pillar).connection_status()
                    if not valid_connection_check:
                        self.logger.error(
                            f"Could not establish connection with"
                            " controller with provided credentials")
                        raise ValueError("Invalid credentials provided")
                self.update_pillar_and_conf('user', user)
                self.update_pillar_and_conf('password', password)
            else:
                self.logger.error(
                    f"Enclosure ID is not set\n"
                    "Run following command to set the enclosure id:"
                    "cortx_setup storage config --user <user>"
                    " --password <passwd> --ip <ip> --port <port>")
                raise RuntimeError(
                    "Cannot set mode, ip and port without enclosure id")

        if ip is not None or port is not None:
            if self.mode is None:
                self.logger.exception(
                    f"mode is missing, please provide --mode argument")
                raise RuntimeError("Incomplete arguments provided")
            else:
                # This is already handled in 'mode' case
                pass

        if controller_type is not None:
            valid_ctrl_type = ['gallium', 'indium']
            if setup_type == "HW" and controller_type not in valid_ctrl_type:
                self.logger.error(
                    "Invalid controller provided, please provide the"
                    " supported controller type")
                raise ValueError("Incorrect argument value provided")
            if setup_type == "VM" and controller_type != "virtual":
                self.logger.error("Controller must be 'virtual' for VM")
                raise ValueError("Incorrect argument value provided")
            if self.enclosure_id is None:
                self.logger.error(
                    f"Enclosure ID is not set\n"
                    "Run following command to set the enclosure id:"
                    "cortx_setup storage config --controller primary --user"
                    " <user> --password <passwd> --ip <ip> --port <port>")
                raise RuntimeError(
                    "Cannot set controller type without enclosure id")
            # all checks are good, update confstore and pillar
            self.update_pillar_and_conf('controller_type', controller_type)

        if name is not None or storage_type is not None:
            if (name and not storage_type) or (storage_type and not name):
                self.logger.error(f"Please provide 'name' and 'type' together")
                raise RuntimeError("Imcomplete arguments provided")
            if self.enclosure_id is not None:
                self.update_pillar_and_conf('name', name)
                supported_type = ['RBOD', 'JBOD', 'EBOD']
                if setup_type == "HW" and storage_type not in supported_type:
                    self.logger.error(
                        "Invalid type provided, please provide the"
                        " supported storage type")
                    raise ValueError("Incorrect argument value provided")
                if setup_type == "VM" and storage_type != "virtual":
                    self.logger.error("Storage type must be 'virtual' for VM")
                    raise ValueError("Incorrect argument value provided")
            else:
                self.logger.error(
                    f"Enclosure ID is not set\n"
                    "Run following command to set the enclosure id:"
                    "cortx_setup storage config --user <user>"
                    " --password <passwd> --ip <ip> --port <port>")
                raise RuntimeError(
                    "Cannot set enclosure type without enclosure id")
            # all clear, update name and type in confstore and pillar
            self.update_pillar_and_conf('name', name)
            self.update_pillar_and_conf('storage_type', storage_type)

        if cvg_name:
            self.logger.debug(
                f"cvg_name:{cvg_name}, data_devices:{data_devices}, metadata_devices:{metadata_devices}"
            )
            if not data_devices or not metadata_devices:
                self.logger.error(
                    "ERROR: The parameters data-devices and metadata-devices"
                    " are missing")
                raise RuntimeError("ERROR: Incomplete arguments provided")

            current_cvg_count = Conf.get(
                'node_info_index',
                f'server_node>{self.machine_id}>storage>cvg_count')
            if not current_cvg_count:
                current_cvg_count = 0
            else:
                current_cvg_count = int(current_cvg_count)

            cvg_list = get_pillar_data('cluster/srvnode-0/storage/cvg')
            if not cvg_list or cvg_list is MISSED:
                cvg_list = []
            elif isinstance(cvg_list[0], OrderedDict):
                for i, key in enumerate(cvg_list):
                    cvg_list[i] = dict(key)
            if data_devices:
                self.logger.debug(f"data_devices: {data_devices}")
                for device in data_devices:
                    try:
                        cmd_run(f"ls {device}", targets=node_id)
                    except:
                        raise ValueError(
                            f"Validation for data device {device} failed\n"
                            "Please provide the correct device")
            if metadata_devices:
                self.logger.debug(f"metadata_devices: {metadata_devices}")
                for device in metadata_devices:
                    try:
                        cmd_run(f"ls {device}", targets=node_id)
                    except:
                        raise ValueError(
                            f"Validation for data device {device} failed\n"
                            "Please provide the correct device")
            cvg_list.insert(
                current_cvg_count, {
                    'name': cvg_name,
                    'data_devices': data_devices,
                    'metadata_devices': metadata_devices
                })
            cvg_count = current_cvg_count + 1
            self.update_pillar_and_conf('cvg', str(cvg_count))
            self.update_pillar_and_conf('cvg_devices', cvg_list)

        Conf.save('node_info_index')
        self.logger.debug("Done")
Beispiel #10
0
    def run(self, **kwargs):
        reset_type = kwargs.get('type')
        cortx_components = get_reset_states()
        non_cortx_components = get_pillar_data('reset/non_cortx_components')
        system_components = get_pillar_data('reset/system_components')
        self.node_list = get_cluster_nodes()

        self.logger.debug(f"Reset to be done for type is {reset_type}")

        self.logger.debug("Stopping the cluster")
        self.cluster_stop()

        self._create_saltssh_client()

        if reset_type == 'data':
            self.logger.info("Calling reset for cortx components")
            cortx_components = get_reset_states()
            self._destroy(cortx_components, stage=["teardown.reset"])

            self.logger.info("Cleaning up provisioner logs and metadata")
            StatesApplier.apply(["components.provisioner.teardown.reset"],
                                targets=ALL_MINIONS)

            self.logger.info("starting the cluster")
            self.cluster_start()
            self.logger.info("Done")

        elif reset_type == 'all':
            self.logger.debug(
                "Performing post Factory reset for Cortx components.")
            self._destroy(cortx_components,
                          stage=["teardown.reset", "teardown.cleanup"])

            self.logger.debug("Preparing Reset for non-Cortx components")

            self._destroy(non_cortx_components, stage=["reset"])
            self.logger.debug("Preparing Reset for system components")

            self._destroy(system_components, stage=["teardown"])
            self.logger.debug("Preparing Reset for Provisioner commands")

            provisioner_components = [
                "system.reset", "provisioner.salt.stop",
                "system.storage.glusterfs.teardown.volume_remove",
                "system.storage.glusterfs.teardown.stop",
                "system.storage.glusterfs.teardown.remove_bricks",
                "system.storage.glusterfs.teardown.cache_remove"
            ]

            self._apply_states(provisioner_components, self.node_list)

            self.logger.debug("Removing cluster id file")
            self._run_cmd(
                ['chattr -i /etc/cluster-id', 'rm -rf /etc/cluster-id'],
                self.node_list)
            self._run_cmd([
                'mkdir -p /var/lib/seagate/cortx/provisioner/local/srv/pillar/groups/all',
                'mkdir -p /var/lib/seagate/cortx/provisioner/shared/locks'
            ], self.node_list)

            self.logger.debug("Performing provisioner cleanup")
            self._run_cmd(
                list(map(lambda el: 'rm -rf ' + str(el), CLEANUP_FILE_LIST)),
                self.node_list)

            self.logger.debug("Restoring provisioner backed-up files")
            for key, val in RESTORE_FILE_DICT.items():
                self._run_cmd([f'yes | cp -rf {str(key)} {str(val)}'],
                              self.node_list)

            self.logger.debug("Configuring salt at node level")
            self._run_cmd([f'sh {salt_basic_config_script}'], self.node_list)

            # self._run_cmd(
            #     ['systemctl restart salt-minion salt-master'],
            #     self.node_list)

            # # This is bit risky
            # self._run_cmd(
            #     [f'yes | cp -rf {BACKUP_FILE_DICT}/hosts /etc/hosts'])
            # self._run_cmd(['rm -rf /root/.ssh'], self.node_list)
            self.logger.debug("Done")
Beispiel #11
0
    def run(self, **kwargs):
        """
        Execution:
        `cortx_setup node prepare storage --user <user> --password <password>`
        """

        _enc_user = kwargs.get('user')
        _enc_passwd = kwargs.get('password')

        try:
            self.validate_credentials(_enc_user, _enc_passwd)

            # Fetch enclosure id from the enclosure
            # This will use the user/password from pillar
            # and keep the enclosure id in /etc/enclosure-id
            # TODO: use the credentials provided by user
            _enc_ip = get_pillar_data(
                f'storage/{enc_num}/controller/primary/ip',
            )
            _enc_port = get_pillar_data(
                f'storage/{enc_num}/controller/primary/port',
            )

            _enc_id_on_enc = EnclosureInfo(_enc_ip, _enc_user, _enc_passwd, _enc_port).fetch_enclosure_serial()
        except Exception as e:
            self.logger.error(
                f"Could not fetch the enclosure id:\n"
                f"Possible reasons:\n"
                f"1. User name or password provided are not correct.\n"
                f"   Please rerun the command with correct credentials.\n"
                f"2. The storage enclosure connected to the node is\n"
                f"   different than the one that was used in factory.\n"
                "    You can either connect the correct enclosure and"
                "    try again or rerun the following command to use"
                f"   this enclosure with node henceforth:\n"
                "    cortx_setup storage config --user <user> --password <pass>"
            )
            raise e

        # Compare the enclosure id fetched from the enclosure with the
        # one generated in factory (and stored in grains).
        if _enc_id_on_enc != enc_id_on_node:
            self.logger.warning(
                "The enclosure id from enclosure don't match with"
                " the enclosure id stored on the node"
            )
            self.logger.warning(
                "The storage enclosure connected to the node seems to be"
                " different than the one that was used in factory."
            )
            self.logger.info(
                "Updating the new enclosure id on the node"
            )
            self.logger.debug(
                f"Updating the enclosure id {_enc_id_on_enc} in pillar"
            )
            PillarSet().run(
                f'storage/{enc_num}/enclosure_id',
                _enc_id_on_enc,
                local=True
            )
            self.logger.debug(
                f"Updating the enclosure id {_enc_id_on_enc} in confstore"
            )
            self.load_conf_store(
                'node_info_index',
                f'json://{CONFSTORE_CLUSTER_FILE}'
            )
            Conf.set(
                'node_info_index',
                f'storage_enclosure>enclosure_id',
                _enc_id_on_enc
            )
            Conf.save('node_info_index')

        else:
            self.logger.info(
                "Enclosure ID fetched from enclosure matches with the"
                " one stored on the node"
            )

        #TODO: Configure storage by LUN identification, segregation
        #  based on priority/affinity to controller and partition for metadata.
        #configure_storage()

        self.logger.info("Done")
Beispiel #12
0
#

from cortx_setup.commands.command import Command
from cortx_setup.commands.storage.enclosure_info import EnclosureInfo
from cortx_setup.config import CONFSTORE_CLUSTER_FILE
from cortx_setup.commands.common_utils import get_pillar_data
from provisioner.commands import PillarSet
from provisioner.salt import local_minion_id
from cortx.utils.conf_store import Conf
from cortx.utils.security.cipher import Cipher

node_id = local_minion_id()
enc_num = "enclosure-" + ((node_id).split('-'))[1]

enc_id_on_node = get_pillar_data(
    f'storage/{enc_num}/enclosure_id'
)

class NodePrepareStorage(Command):
    """Cortx Setup API for Preparing Storage in field"""
    _args = {
        'user': {
            'type': str,
            'default': None,
            'optional': True
        },
        'password': {
            'type': str,
            'default': None,
            'optional': True
        }
Beispiel #13
0
    def run(self, storage_set_name=None, storage_enclosure=None):
        try:
            index = 'storage_enclosure_index'
            cluster_id = get_cluster_id()
            enclosure_id = []

            # `storage_enclosure` adds enc_id of each node
            # so input is node-name: srvnode-1, srvnode-2.
            # Will/Should it be enclosure-1, enclosure-2 ?

            self.load_conf_store(index, f'json://{CONFSTORE_CLUSTER_FILE}')

            try:
                storage_set = Conf.get(index,
                                       f'cluster>{cluster_id}>storage_set')
                storage_set_len = len(storage_set)
            except Exception:
                self.logger.debug(
                    "No storage-set, setting storage_set_len to 0")
                storage_set_len = 0

            if storage_set_len == 0:
                self.logger.debug("storage_set object is empty")
                raise Exception(
                    f"Error: Storage-set {storage_set_name} does not exist."
                    " Use command - cortx_setup storageset create")

            ss_found = False
            for ss_index in range(0, storage_set_len):
                ss_name = Conf.get(
                    index,
                    f'cluster>{cluster_id}>storage_set[{ss_index}]>name')
                if ss_name == storage_set_name:
                    ss_found = True
                else:
                    continue

            if ss_found == False:
                self.logger.debug(
                    f"Can not find storage-set: {storage_set_name}")
                raise Exception(
                    f"Error: Storage-set {storage_set_name} does not exist."
                    " Use command - cortx_setup storageset create")

            node_count = get_pillar_data("cluster/storage_set/count")

            # TODO: This is Placeholder. Exact API not provided yet.
            # Addnl validation needed. Change `storage_enclosure` from list
            # to string and allow only one enclosure to be updated at a time?

            input_nodes_count = len(storage_enclosure)
            if input_nodes_count > node_count:
                raise ValueError(
                    f"Invalid count: {input_nodes_count} number of nodes received. "
                    f"Given Storageset can accept a maximum of {node_count} enclosures. "
                    "Update node count with `cortx_setup storageset create` command."
                )

            # Get corresponding enclosure-id of each node
            for node in storage_enclosure:
                enclosure_id.append(get_enclosure_id(node))

            self.logger.debug(
                f"Adding enclosure_id '{enclosure_id}' to storageset "
                f"'{storage_set_name}' in ConfStore.")

            PillarSet().run('cluster/storage_set/storage_enclosures',
                            enclosure_id)
            Conf.set(
                index,
                f'cluster>{cluster_id}>storage_set[{ss_index}]>storage_enclosures',
                enclosure_id)

            for node in storage_enclosure:
                enclosure_id = get_enclosure_id(node)
                self.logger.debug(
                    f"Adding storage set ID:{storage_set_name} to "
                    f"enclosure {node} with enclosure id: {enclosure_id}")
                Conf.set(index,
                         f'storage_enclosure>{enclosure_id}>storage_set_id',
                         storage_set_name)

            Conf.save(index)
            self.logger.debug(
                f"Storage enclosure for nodes {storage_enclosure} with corresponding "
                f"enclosure_id {enclosure_id} added to Storageset")

        except ValueError as exc:
            raise ValueError(
                f"Failed to add storage enclosure to storageset. Reason: {str(exc)}"
            )