def cim_job_of_job_id(self, job_id, property_list=None):
        """
        Return CIM_ConcreteJob for given job_id.
        """
        if property_list is None:
            property_list = SmisCommon.cim_job_pros()
        else:
            property_list = merge_list(property_list,
                                       SmisCommon.cim_job_pros())

        cim_jobs = self.EnumerateInstances('CIM_ConcreteJob',
                                           PropertyList=property_list)
        real_job_id = SmisCommon.parse_job_id(job_id)[0]
        for cim_job in cim_jobs:
            if md5(cim_job['InstanceID']) == real_job_id:
                return cim_job

        raise LsmError(ErrorNumber.NOT_FOUND_JOB, "Job %s not found" % job_id)
Exemple #2
0
 def capabilities(self, system, flags=Client.FLAG_RSVD):
     cur_lsm_syss = self.systems()
     if system.id not in list(s.id for s in cur_lsm_syss):
         raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM, "System not found")
     cap = Capabilities()
     cap.set(Capabilities.DISKS)
     cap.set(Capabilities.DISK_VPD83_GET)
     cap.set(Capabilities.VOLUMES)
     cap.set(Capabilities.VOLUME_RAID_INFO)
     cap.set(Capabilities.POOL_MEMBER_INFO)
     cap.set(Capabilities.VOLUME_RAID_CREATE)
     cap.set(Capabilities.BATTERIES)
     cap.set(Capabilities.VOLUME_CACHE_INFO)
     cap.set(Capabilities.VOLUME_PHYSICAL_DISK_CACHE_UPDATE)
     cap.set(Capabilities.VOLUME_WRITE_CACHE_POLICY_UPDATE_WRITE_BACK)
     cap.set(Capabilities.VOLUME_WRITE_CACHE_POLICY_UPDATE_AUTO)
     cap.set(Capabilities.VOLUME_WRITE_CACHE_POLICY_UPDATE_WRITE_THROUGH)
     cap.set(Capabilities.VOLUME_DELETE)
     return cap
Exemple #3
0
 def export_fs(self,
               fs_id,
               export_path,
               root_list,
               rw_list,
               ro_list,
               anon_uid=NfsExport.ANON_UID_GID_NA,
               anon_gid=NfsExport.ANON_UID_GID_NA,
               auth_type=None,
               options=None,
               flags=Client.FLAG_RSVD):
     if self.nfs_conn is not None:
         return self.nfs_conn.export_fs(fs_id, export_path, root_list,
                                        rw_list, ro_list, anon_uid,
                                        anon_gid, auth_type, options, flags)
     raise LsmError(
         ErrorNumber.NO_SUPPORT,
         "NFS plugin is not loaded, please load nfsd kernel "
         "module and related services")
Exemple #4
0
    def volume_create(self, pool, volume_name, size_bytes, provisioning,
                      flags=0):
        if provisioning != Volume.PROVISION_DEFAULT:
            raise LsmError(ErrorNumber.INVALID_ARGUMENT,
                           "Unsupported provisioning")

        # Make sure size_bytes round up with _LVM_SECTOR_SIZE
        if size_bytes:
            remainder = size_bytes % _LVM_SECTOR_SIZE
            if remainder:
                size_bytes = size_bytes + _LVM_SECTOR_SIZE - remainder
        else:
            size_bytes = _LVM_SECTOR_SIZE

        self._jsonrequest("vol_create", dict(pool=pool.id,
                                             name=volume_name,
                                             size=size_bytes))

        return None, self._get_volume(pool.id, volume_name)
Exemple #5
0
def _pri_cim_ext_of_cim_disk(smis_common, cim_disk_path, property_list=None):
    """
    Usage:
        Find out the Primordial CIM_StorageExtent of CIM_DiskDrive
        In SNIA SMI-S 1.4 rev.6 Block book, section 11.1.1 'Base Model'
        quote:
        A disk drive is modeled as a single MediaAccessDevice (DiskDrive)
        That shall be linked to a single StorageExtent (representing the
        storage in the drive) by a MediaPresent association. The
        StorageExtent class represents the storage of the drive and
        contains its size.
    Parameter:
        cim_disk_path   # CIM_InstanceName of CIM_DiskDrive
        property_list   # a List of properties needed on returned
                        # CIM_StorageExtent
    Returns:
        cim_pri_ext     # The CIM_Instance of Primordial CIM_StorageExtent
    Exceptions:
        LsmError
            ErrorNumber.LSM_PLUGIN_BUG  # Failed to find out pri cim_ext
    """
    if property_list is None:
        property_list = ['Primordial']
    else:
        property_list = merge_list(property_list, ['Primordial'])

    cim_exts = smis_common.Associators(
        cim_disk_path,
        AssocClass='CIM_MediaPresent',
        ResultClass='CIM_StorageExtent',
        PropertyList=property_list)
    cim_exts = [p for p in cim_exts if p["Primordial"]]
    if len(cim_exts) == 1:
        # As SNIA commanded, only _ONE_ Primordial CIM_StorageExtent for
        # each CIM_DiskDrive
        return cim_exts[0]
    else:
        raise LsmError(ErrorNumber.PLUGIN_BUG,
                       "_pri_cim_ext_of_cim_disk(): "
                       "Got unexpected count of Primordial " +
                       "CIM_StorageExtent for CIM_DiskDrive: %s, %s " %
                       (cim_disk_path, cim_exts))
Exemple #6
0
def root_cim_sys(smis_common, property_list=None):
    """
    Use this association to find out the root CIM_ComputerSystem:
        CIM_RegisteredProfile       # Root Profile('Array') in interop
                 |
                 | CIM_ElementConformsToProfile
                 v
        CIM_ComputerSystem          # vendor namespace
    """
    id_pros = cim_sys_id_pros()
    if property_list is None:
        property_list = id_pros
    else:
        property_list = merge_list(property_list, id_pros)

    cim_syss = []
    if smis_common.is_megaraid():
        cim_syss = smis_common.EnumerateInstances('CIM_ComputerSystem',
                                                  PropertyList=property_list)
    else:
        cim_syss = smis_common.Associators(
            smis_common.root_blk_cim_rp.path,
            ResultClass='CIM_ComputerSystem',
            AssocClass='CIM_ElementConformsToProfile',
            PropertyList=property_list)

        if len(cim_syss) == 0:
            raise LsmError(
                ErrorNumber.NO_SUPPORT,
                "Current SMI-S provider does not provide "
                "the root CIM_ComputerSystem associated "
                "to 'Array' CIM_RegisteredProfile.")

    # System URI Filtering
    if smis_common.system_list:
        needed_cim_syss = []
        for cim_sys in cim_syss:
            if sys_id_of_cim_sys(cim_sys) in smis_common.system_list:
                needed_cim_syss.extend([cim_sys])
        return needed_cim_syss
    else:
        return cim_syss
Exemple #7
0
    def _sacli_exec(self, sacli_cmds, flag_convert=True):
        """
        If flag_convert is True, convert data into dict.
        """
        sacli_cmds.insert(0, self._sacli_bin)
        try:
            output = cmd_exec(sacli_cmds)
        except OSError as os_error:
            if os_error.errno == errno.ENOENT:
                raise LsmError(
                    ErrorNumber.INVALID_ARGUMENT,
                    "hpssacli binary '%s' is not exist or executable." %
                    self._sacli_bin)
            else:
                raise

        if flag_convert:
            return _parse_hpssacli_output(output)
        else:
            return output
Exemple #8
0
    def capabilities(self, system, flags=Client.FLAG_RSVD):
        cur_lsm_syss = self.systems()
        if system.id not in list(s.id for s in cur_lsm_syss):
            raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM, "System not found")
        cap = Capabilities()
        cap.set(Capabilities.DISKS)
        cap.set(Capabilities.SYS_FW_VERSION_GET)
        cap.set(Capabilities.SYS_MODE_GET)
        cap.set(Capabilities.DISK_LOCATION)
        cap.set(Capabilities.DISK_VPD83_GET)
        if system.mode != System.MODE_HBA:
            cap.set(Capabilities.VOLUMES)
            cap.set(Capabilities.VOLUME_RAID_CREATE)
            cap.set(Capabilities.VOLUME_DELETE)
            cap.set(Capabilities.POOL_MEMBER_INFO)
            cap.set(Capabilities.VOLUME_RAID_INFO)
            cap.set(Capabilities.VOLUME_LED)
            cap.set(Capabilities.VOLUME_ENABLE)

        return cap
Exemple #9
0
    def access_group_initiator_add(self, access_group, init_id, init_type,
                                   flags=0):
        """
        Adds an initiator to an access group
        """
        if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN:
            raise LsmError(ErrorNumber.NO_SUPPORT,
                           "Nstor only support iSCSI Access Group")

        init_ids = self._access_group_initiators(access_group)
        if init_id in init_ids:
            # Already in requested group.
            return copy.deepcopy(access_group)

        self._add_initiator(access_group.name, init_id)
        init_ids = self._request("list_hostgroup_members", "stmf",
                                 [access_group.name])
        return AccessGroup(access_group.id, access_group.name,
                           init_ids, AccessGroup.INIT_TYPE_ISCSI_IQN,
                           self.system.id)
Exemple #10
0
    def _cim_srv_of_sys_id(self, srv_name, sys_id, raise_error):
        property_list = ['SystemName']

        try:
            cim_srvs = self.EnumerateInstances(srv_name,
                                               PropertyList=property_list)
            for cim_srv in cim_srvs:
                if cim_srv['SystemName'] == sys_id:
                    return cim_srv
        except CIMError:
            if raise_error:
                raise
            else:
                return None

        if raise_error:
            raise LsmError(
                ErrorNumber.NO_SUPPORT,
                "Cannot find any '%s' for requested system ID" % srv_name)
        return None
Exemple #11
0
    def export_fs(self,
                  fs_id,
                  export_path,
                  root_list,
                  rw_list,
                  ro_list,
                  anon_uid,
                  anon_gid,
                  auth_type,
                  options,
                  flags=0):
        """
        Exports a filesystem as specified in the export
        """
        if export_path is None:
            raise LsmError(ErrorNumber.INVALID_ARGUMENT,
                           "Export path is required")

        md5_id = md5(export_path)
        fs_dict = {'auth_type': 'sys', 'anonymous': 'false'}
        if ro_list:
            fs_dict['read_only'] = ','.join(ro_list)
        if rw_list:
            fs_dict['read_write'] = ','.join(rw_list)
        if anon_uid or anon_gid:
            fs_dict['anonymous'] = 'true'
        if root_list:
            fs_dict['root'] = ','.join(root_list)
        if auth_type:
            fs_dict['auth_type'] = str(auth_type)
        if '*' in rw_list:
            fs_dict['anonymous'] = 'true'
        if options:
            fs_dict['extra_options'] = str(options)

        result = self._request(
            "share_folder", "netstorsvc",
            ['svc:/network/nfs/server:default', fs_id, fs_dict])
        return NfsExport(md5_id, fs_id, export_path, auth_type, root_list,
                         rw_list, ro_list, anon_uid, anon_gid, options)
Exemple #12
0
    def fs_snapshot_restore(self,
                            fs,
                            snapshot,
                            files,
                            restore_files,
                            all_files=False,
                            flags=0):
        """
        WARNING: Destructive!

        Reverts a file-system or just the specified files from the snapshot.
        If a list of files is supplied but the array cannot restore just them
        then the operation will fail with an LsmError raised.
        If files == None and all_files = True then all files on the
        file-system are restored.

        Restore_file if not None must be the same length as files with each
        index in each list referring to the associated file.

        Returns None on success, else job id, LsmError exception on error
        """
        raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported")
    def volume_write_cache_policy_update(self,
                                         volume,
                                         wcp,
                                         flags=Client.FLAG_RSVD):
        """
        Depending on "storcli /c0/vX set wrcache=<wt|wb|awb>" command.
        """
        _ = flags
        vd_path = _vd_path_of_lsm_vol(volume)
        # Check whether we are working on cache I/O which ignore write cache
        # setting and always cache write.
        vol_show_output = self._storcli_exec([vd_path, "show", "all"])
        vd_basic_info = vol_show_output[vd_path][0]
        lsi_cache_setting = vd_basic_info["Cache"]
        if lsi_cache_setting.endswith("C"):
            flag_cache_io = True
        else:
            flag_cache_io = False

        cmd = [vd_path, "set"]
        if wcp == Volume.WRITE_CACHE_POLICY_WRITE_BACK:
            cmd.append("wrcache=awb")
        elif wcp == Volume.WRITE_CACHE_POLICY_AUTO:
            if flag_cache_io:
                self._storcli_exec([vd_path, "set", "iopolicy=Direct"])
            cmd.append("wrcache=wb")
        elif wcp == Volume.WRITE_CACHE_POLICY_WRITE_THROUGH:
            if flag_cache_io:
                self._storcli_exec([vd_path, "set", "iopolicy=Direct"])
            cmd.append("wrcache=wt")
        else:
            raise LsmError(
                ErrorNumber.PLUGIN_BUG,
                "Got unknown wcp: {:d}".format(wcp),
            )
        self._storcli_exec(cmd)
Exemple #14
0
def mask_type(smis_common, raise_error=False):
    """
    Return MASK_TYPE_NO_SUPPORT, MASK_TYPE_MASK or MASK_TYPE_GROUP
    if 'Group Masking and Mapping' profile is supported, return
    MASK_TYPE_GROUP

    If raise_error == False, just return MASK_TYPE_NO_SUPPORT
    or, raise NO_SUPPORT error.
    """
    if smis_common.profile_check(SmisCommon.SNIA_GROUP_MASK_PROFILE,
                                 SmisCommon.SMIS_SPEC_VER_1_5,
                                 raise_error=False):
        return MASK_TYPE_GROUP
    if smis_common.profile_check(SmisCommon.SNIA_MASK_PROFILE,
                                 SmisCommon.SMIS_SPEC_VER_1_4,
                                 raise_error=False):
        return MASK_TYPE_MASK
    if raise_error:
        raise LsmError(
            ErrorNumber.NO_SUPPORT, "Target SMI-S provider does not support "
            "%s version %s or %s version %s" %
            (SmisCommon.SNIA_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4,
             SmisCommon.SNIA_GROUP_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_5))
    return MASK_TYPE_NO_SUPPORT
Exemple #15
0
    def run(self):
        # Don't need to invoke this when running stand alone as a cmdline
        if self.cmdline:
            return

        need_shutdown = False
        msg_id = 0

        try:
            while True:
                try:
                    # result = None

                    msg = self.tp.read_req()

                    method = msg['method']
                    msg_id = msg['id']
                    params = msg['params']

                    # Check to see if this plug-in implements this operation
                    # if not return the expected error.
                    if hasattr(self.plugin, method):
                        if params is None:
                            result = getattr(self.plugin, method)()
                        else:
                            result = getattr(self.plugin,
                                             method)(**msg['params'])
                    else:
                        raise LsmError(ErrorNumber.NO_SUPPORT,
                                       "Unsupported operation")

                    self.tp.send_resp(result)

                    if method == 'plugin_register':
                        need_shutdown = True

                    if method == 'plugin_unregister':
                        # This is a graceful plugin_unregister
                        need_shutdown = False
                        self.tp.close()
                        break

                except ValueError as ve:
                    error(traceback.format_exc())
                    self.tp.send_error(msg_id, -32700, str(ve))
                except AttributeError as ae:
                    error(traceback.format_exc())
                    self.tp.send_error(msg_id, -32601, str(ae))
                except LsmError as lsm_err:
                    self.tp.send_error(msg_id, lsm_err.code, lsm_err.msg,
                                       lsm_err.data)
        except _SocketEOF:
            # Client went away and didn't meet our expectations for protocol,
            # this error message should not be seen as it shouldn't be
            # occurring.
            if need_shutdown:
                error('Client went away, exiting plug-in')
        except socket.error as se:
            if se.errno == errno.EPIPE:
                error('Client went away, exiting plug-in')
            else:
                error("Unhandled exception in plug-in!\n" +
                      traceback.format_exc())
        except Exception:
            error("Unhandled exception in plug-in!\n" + traceback.format_exc())

            try:
                self.tp.send_error(msg_id, ErrorNumber.PLUGIN_BUG,
                                   "Unhandled exception in plug-in",
                                   str(traceback.format_exc()))
            except Exception:
                pass

        finally:
            if need_shutdown:
                # Client wasn't nice, we will allow plug-in to cleanup
                self.plugin.plugin_unregister()
                sys.exit(2)
Exemple #16
0
    def plugin_register(self, uri, password, timeout, flags=Client.FLAG_RSVD):
        self._tmo_ms = timeout

        supported_plugins = set(LocalPlugin._KMOD_PLUGIN_MAP.values())
        if os.geteuid() != 0:
            raise LsmError(
                ErrorNumber.INVALID_ARGUMENT,
                "This plugin requires root privilege both daemon and client")
        uri_parsed = uri_parse(uri)
        uri_vars = uri_parsed.get("parameters", {})
        ignore_init_error = bool(uri_vars.get("ignore_init_error", "false"))

        sub_uri_paras = {}
        for plugin_name in supported_plugins:
            sub_uri_paras[plugin_name] = []

        for key in uri_vars.keys():
            for plugin_name in supported_plugins:
                if key.startswith("%s_" % plugin_name):
                    sub_uri_paras[plugin_name].append(
                        "%s=%s" %
                        (key[len("%s_" % plugin_name):], uri_vars[key]))

        only_plugin = uri_vars.get("only", "")
        if only_plugin and only_plugin not in supported_plugins:
            raise LsmError(
                ErrorNumber.INVALID_ARGUMENT,
                "Plugin defined in only=%s is not supported" % only_plugin)
        if only_plugin:
            requested_plugins = [only_plugin]
        else:
            # Check kernel module to determine which plugin to load
            requested_plugins = []
            cur_kmods = os.listdir("/sys/module/")
            for kmod_name, plugin_name in LocalPlugin._KMOD_PLUGIN_MAP.items():
                if kmod_name in cur_kmods:
                    requested_plugins.append(plugin_name)
            # smartpqi could be managed both by hpsa and arcconf plugin, hence
            # need extra care here: if arcconf binary tool is installed, we use
            # it, if not, we try hpsa binary tool. If none was installed, we
            # raise error generated by arcconf plugin.
            if "smartpqi" in cur_kmods:
                if Arcconf.find_arcconf():
                    requested_plugins.append("arcconf")
                elif SmartArray.find_sacli():
                    requested_plugins.append("hpsa")
                else:
                    # None was found, still use arcconf plugin which will
                    # generate proper error to user if ignore_init_error=false.
                    requested_plugins.append("arcconf")

            requested_plugins = set(requested_plugins)

        if not requested_plugins:
            raise LsmError(ErrorNumber.NO_SUPPORT,
                           "No supported hardware found")

        for plugin_name in requested_plugins:
            plugin_uri = "%s://" % plugin_name
            if sub_uri_paras[plugin_name]:
                plugin_uri += "?%s" % "&".join(sub_uri_paras[plugin_name])
            try:
                conn = Client(plugin_uri, None, timeout, flags)
                # So far, no local plugins require password
                self.conns.append(conn)
                if plugin_name == 'nfs':
                    self.nfs_conn = conn
            except LsmError as lsm_err:
                if ignore_init_error:
                    pass
                else:
                    raise lsm_err
        for conn in self.conns:
            for sys in conn.systems():
                self.sys_con_map[sys.id] = conn
                self.syss.append(sys)
        if not self.sys_con_map:
            raise LsmError(ErrorNumber.NO_SUPPORT,
                           "No supported systems found")
Exemple #17
0
 def _exec(self, sys_id, func_name, parameters):
     if sys_id not in self.sys_con_map.keys():
         raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM, "System not found")
     return getattr(self.sys_con_map[sys_id], func_name)(**parameters)
Exemple #18
0
 def job_free(self, job_id, flags=0):
     raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported")
Exemple #19
0
    def export_fs(self,
                  fs_id,
                  export_path,
                  root_list,
                  rw_list,
                  ro_list,
                  anon_uid=NfsExport.ANON_UID_GID_NA,
                  anon_gid=NfsExport.ANON_UID_GID_NA,
                  auth_type=None,
                  options=None,
                  flags=0):

        if export_path is not None:
            raise LsmError(ErrorNumber.INVALID_ARGUMENT,
                           'export_path required to be None')

        base_opts = []

        if anon_uid is not None:
            base_opts.append('anonuid=%s' % str(anon_uid))

        if anon_gid is not None:
            base_opts.append('anongid=%s' % str(anon_gid))

        if auth_type is not None:
            base_opts.append('sec=%s' % str(auth_type))

        fs_path = self._get_fs_path(fs_id)
        if fs_path is None:
            raise LsmError(ErrorNumber.NOT_FOUND_FS, "File system not found")

        for host in rw_list:
            tmp_opts = copy.copy(base_opts)
            if host in root_list:
                tmp_opts.append('no_root_squash')

            tmp_opts.append('rw')

            self._jsonrequest(
                "nfs_export_add",
                dict(host=host,
                     path=fs_path,
                     export_path=None,
                     options=tmp_opts))

        for host in ro_list:
            tmp_opts = copy.copy(base_opts)
            if host in root_list:
                tmp_opts.append('no_root_squash')

            tmp_opts.append('ro')

            self._jsonrequest(
                "nfs_export_add",
                dict(host=host,
                     path=fs_path,
                     export_path=None,
                     options=tmp_opts))

        # Kind of a pain to determine which export was newly created as it
        # could get merged into an existing record, doh!
        # Make sure fs_id's match and that one of the hosts is in the
        # record.
        exports = self.exports()
        h = []
        h.extend(rw_list)
        h.extend(ro_list)
        for host in exports:
            if host.fs_id == fs_id:
                l = []
                l.extend(host.ro)
                l.extend(host.rw)
                for host_entry in h:
                    if host_entry in l:
                        return host

        raise LsmError(ErrorNumber.PLUGIN_BUG, "Failed to create export")
def _use_c_lib_function(func_ref, arg):
    (data, err_no, err_msg) = func_ref(arg)
    if err_no != ErrorNumber.OK:
        raise LsmError(err_no, err_msg)
    return data
    def invoke_method_wait(self, cmd, cim_path, in_params,
                           out_key=None, expect_class=None,
                           flag_out_array=False):
        """
        InvokeMethod and wait it until done.
        Return a CIMInstanceName from out[out_key] or from cim_job:
            CIM_ConcreteJob
                |
                | CIM_AffectedJobElement
                v
            CIMInstanceName # expect_class
        If flag_out_array is True, return the first element of out[out_key].
        """
        cim_job = dict()
        (rc, out) = self._wbem_conn.InvokeMethod(cmd, cim_path, **in_params)

        try:
            if rc == SmisCommon.SNIA_INVOKE_OK:
                if out_key is None:
                    return None
                if out_key in out:
                    if flag_out_array:
                        if len(out[out_key]) == 1:
                            return out[out_key][0]
                        else:
                            raise LsmError(
                                ErrorNumber.PLUGIN_BUG,
                                "invoke_method_wait(), output contains %d " %
                                len(out[out_key]) +
                                "elements: %s" % out[out_key])
                    return out[out_key]
                else:
                    raise LsmError(ErrorNumber.PLUGIN_BUG,
                                   "invoke_method_wait(), %s not exist "
                                   "in out %s" % (out_key, list(out.items())))

            elif rc == SmisCommon.SNIA_INVOKE_ASYNC:
                cim_job = {}
                cim_job_path = out['Job']
                loop_counter = 0
                job_pros = ['JobState', 'ErrorDescription',
                            'OperationalStatus']
                cim_xxxs_path = []
                while loop_counter <= SmisCommon._INVOKE_MAX_LOOP_COUNT:
                    cim_job = self.GetInstance(cim_job_path,
                                               PropertyList=job_pros)
                    job_state = cim_job['JobState']
                    if job_state in (dmtf.JOB_STATE_NEW,
                                     dmtf.JOB_STATE_STARTING,
                                     dmtf.JOB_STATE_RUNNING):
                        loop_counter += 1
                        time.sleep(SmisCommon._INVOKE_CHECK_INTERVAL)
                        continue
                    elif job_state == dmtf.JOB_STATE_COMPLETED:
                        if not SmisCommon.cim_job_completed_ok(cim_job):
                            raise LsmError(
                                ErrorNumber.PLUGIN_BUG,
                                str(cim_job['ErrorDescription']))
                        if expect_class is None:
                            return None
                        cim_xxxs_path = self.AssociatorNames(
                            cim_job.path,
                            AssocClass='CIM_AffectedJobElement',
                            ResultClass=expect_class)
                        break
                    else:
                        raise LsmError(
                            ErrorNumber.PLUGIN_BUG,
                            "invoke_method_wait(): Got unknown job state "
                            "%d: %s" % (job_state, list(cim_job.items())))

                if loop_counter > SmisCommon._INVOKE_MAX_LOOP_COUNT:
                    raise LsmError(
                        ErrorNumber.TIMEOUT,
                        "The job generated by %s() failed to finish in %ds" %
                        (cmd,
                         SmisCommon._INVOKE_CHECK_INTERVAL *
                         SmisCommon._INVOKE_MAX_LOOP_COUNT))

                if len(cim_xxxs_path) == 1:
                    return cim_xxxs_path[0]
                else:
                    raise LsmError(
                        ErrorNumber.PLUGIN_BUG,
                        "invoke_method_wait(): got unexpected(not 1) "
                        "return from CIM_AffectedJobElement: "
                        "%s, out: %s, job: %s" %
                        (cim_xxxs_path, list(out.items()),
                         list(cim_job.items())))
            else:
                self._dump_wbem_xml(cmd)
                raise LsmError(
                    ErrorNumber.PLUGIN_BUG,
                    "invoke_method_wait(): Got unexpected rc code "
                    "%d, out: %s" % (rc, list(out.items())))
        except Exception:
            exc_info = sys.exc_info()
            # Make sure to save off current exception as we could cause
            # another when trying to dump debug data.
            self._dump_wbem_xml(cmd)
            six.reraise(*exc_info)
Exemple #22
0
    def volume_raid_create(self, name, raid_type, disks, strip_size,
                           flags=Client.FLAG_RSVD):

        arcconf_raid_level = _lsm_raid_type_to_arcconf(raid_type)
        arcconf_disk_ids = []
        ctrl_num = None
        disk_channel = ''
        disk_device = ''
        disk_dict = {'Channel': '0', 'Device': '1'}
        lsm_vols = []

        for disk in disks:
            if not disk.plugin_data:
                raise LsmError(
                    ErrorNumber.INVALID_ARGUMENT,
                    "Illegal input disks argument: missing plugin_data "
                    "property")
            (cur_ctrl_num, disk_channel, disk_device) = \
                disk.plugin_data.split(',')[:3]

            requested_disks = [d.name for d in disks]
            for disk_name in requested_disks:
                if str(disk_name) == str(disk.name):
                    disk_channel = str(disk_channel.strip())
                    disk_device = str(disk_device.strip())
                    disk_dict.update(
                        {'Channel': disk_channel, 'Device': disk_device})
                    arcconf_disk_ids.append(disk_dict.copy())
                    disk_dict = {}
                    if ctrl_num is None:
                        ctrl_num = cur_ctrl_num
                    elif ctrl_num != cur_ctrl_num:
                        raise LsmError(
                            ErrorNumber.INVALID_ARGUMENT,
                            "Illegal input disks argument: disks "
                            "are not from the same controller/system.")

        cmds = ["create", ctrl_num, "logicaldrive", "1024", arcconf_raid_level]
        for disk_channel_device in arcconf_disk_ids:
            cmds.append(disk_channel_device['Channel'])
            cmds.append(disk_channel_device['Device'])

        try:
            self._arcconf_exec(cmds, flag_force=True)
        except ExecError:
            # Check whether disk is free
            requested_disk_ids = [d.id for d in disks]
            for cur_disk in self.disks():
                if cur_disk.id in requested_disk_ids and \
                   not cur_disk.status & Disk.STATUS_FREE:
                    raise LsmError(
                        ErrorNumber.DISK_NOT_FREE,
                        "Disk %s is not in STATUS_FREE state" % cur_disk.id)
            raise

        # Generate pool_id from system id and array.
        decoded_json = self._get_detail_info_list()[int(ctrl_num) - 1]

        latest_ld = len(decoded_json['Controller']['LogicalDrive']) - 1
        ld_info = decoded_json['Controller']['LogicalDrive'][latest_ld]
        ld_num = ld_info['logicalDriveID']
        pool_id = '%s:%s' % (ctrl_num, ld_num)

        lsm_vols = self.volumes(search_key='pool_id', search_value=pool_id)

        if len(lsm_vols) < 1:
            raise LsmError(
                ErrorNumber.PLUGIN_BUG,
                "volume_raid_create(): Got unexpected count(not 1) of new "
                "volumes: %s" % lsm_vols)
        return lsm_vols[0]
Exemple #23
0
 def job_status(self, job_id, flags=0):
     raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet")
Exemple #24
0
    def volume_cache_info(self, volume, flags=Client.FLAG_RSVD):
        """
        Depending on these commands:
            storcli /c0/v0 show all J
        """
        flag_has_ram = False
        flag_battery_ok = False

        vd_path = _vd_path_of_lsm_vol(volume)

        vol_show_output = self._storcli_exec([vd_path, "show", "all"])
        vd_basic_info = vol_show_output[vd_path][0]
        vd_id = int(vd_basic_info['DG/VD'].split('/')[-1])
        vd_prop_info = vol_show_output['VD%d Properties' % vd_id]

        sys_all_output = self._storcli_exec(
            ["/%s" % vd_path.split('/')[1], "show", "all"])

        ram_size = _mega_size_to_lsm(sys_all_output['HwCfg'].get(
            'On Board Memory Size', '0 KB'))
        if ram_size > 0:
            flag_has_ram = True

        lsm_bats = self.batteries()
        for lsm_bat in lsm_bats:
            if lsm_bat.status == Battery.STATUS_OK:
                flag_battery_ok = True

        lsi_cache_setting = vd_basic_info['Cache']
        # According to MegaRAID document, read I/O is always cached for direct
        # I/O and cache I/O.
        read_cache_policy = Volume.READ_CACHE_POLICY_ENABLED
        write_cache_status = Volume.WRITE_CACHE_STATUS_WRITE_THROUGH
        read_cache_status = Volume.READ_CACHE_STATUS_DISABLED

        if lsi_cache_setting.endswith('D'):
            # Direct I/O
            if 'AWB' in lsi_cache_setting:
                write_cache_policy = Volume.WRITE_CACHE_POLICY_WRITE_BACK
            elif 'WB' in lsi_cache_setting:
                write_cache_policy = Volume.WRITE_CACHE_POLICY_AUTO
            elif 'WT' in lsi_cache_setting:
                write_cache_policy = Volume.WRITE_CACHE_POLICY_WRITE_THROUGH
            else:
                raise LsmError(
                    ErrorNumber.PLUGIN_BUG,
                    "Unknown write cache %s for volume %s" %
                    (lsi_cache_setting, vd_path))
        elif lsi_cache_setting.endswith('C'):
            # cache I/O always caches write and read and ignore changes.
            write_cache_policy = Volume.WRITE_CACHE_POLICY_WRITE_BACK
        else:
            raise LsmError(
                ErrorNumber.PLUGIN_BUG, "Unknown I/O type %s for volume %s" %
                (lsi_cache_setting, vd_path))

        if flag_has_ram:
            read_cache_status = Volume.READ_CACHE_STATUS_ENABLED
            if write_cache_policy == Volume.WRITE_CACHE_POLICY_WRITE_BACK:
                write_cache_status = Volume.WRITE_CACHE_STATUS_WRITE_BACK
            elif write_cache_policy == Volume.WRITE_CACHE_POLICY_AUTO:
                if flag_battery_ok:
                    write_cache_status = Volume.WRITE_CACHE_STATUS_WRITE_BACK

        # TODO(Gris Ge): When 'Block SSD Write Disk Cache Change' of
        #                'Supported Adapter Operations' is 'Yes'
        lsi_disk_cache_setting = vd_prop_info['Disk Cache Policy']
        if lsi_disk_cache_setting == 'Disabled':
            phy_disk_cache = Volume.PHYSICAL_DISK_CACHE_DISABLED
        elif lsi_disk_cache_setting == 'Enabled':
            phy_disk_cache = Volume.PHYSICAL_DISK_CACHE_ENABLED
        elif lsi_disk_cache_setting == "Disk's Default":
            phy_disk_cache = Volume.PHYSICAL_DISK_CACHE_USE_DISK_SETTING
        else:
            raise LsmError(
                ErrorNumber.PLUGIN_BUG,
                "Unknown disk cache policy '%s' for volume %s" %
                (lsi_disk_cache_setting, vd_path))

        return [
            write_cache_policy, write_cache_status, read_cache_policy,
            read_cache_status, phy_disk_cache
        ]
Exemple #25
0
def _vd_path_of_lsm_vol(lsm_vol):
    if not lsm_vol.plugin_data:
        raise LsmError(
            ErrorNumber.INVALID_ARGUMENT,
            "Illegal input volume argument: missing plugin_data property")
    return str(lsm_vol.plugin_data)
Exemple #26
0
def _lsm_raid_type_to_mega(lsm_raid_type):
    try:
        return _LSM_RAID_TYPE_CONV[lsm_raid_type]
    except KeyError:
        raise LsmError(ErrorNumber.NO_SUPPORT,
                       "RAID type %d not supported" % lsm_raid_type)
Exemple #27
0
 def job_free(self, job_id, flags=Client.FLAG_RSVD):
     raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet")
Exemple #28
0
    def cim_wrapper(*args, **kwargs):
        try:
            return method(*args, **kwargs)
        except LsmError:
            raise
        except pywbem.CIMError as ce:
            error_code = ce.args[0]
            desc = ce.args[1]

            if error_code == 0:
                if 'Socket error' in desc:
                    if 'Errno 111' in desc:
                        raise LsmError(ErrorNumber.NETWORK_CONNREFUSED,
                                       'Connection refused')
                    if 'Errno 113' in desc:
                        raise LsmError(ErrorNumber.NETWORK_HOSTDOWN,
                                       'Host is down')
                    if 'Errno 104' in desc:
                        raise LsmError(ErrorNumber.NETWORK_CONNREFUSED,
                                       'Connection reset by peer')
                    # We know we have a socket error of some sort, lets
                    # report a generic network error with the string from the
                    # library.
                    raise LsmError(ErrorNumber.NETWORK_ERROR, str(ce))
                elif 'SSL error' in desc:
                    raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc)
                elif 'The web server returned a bad status line' in desc:
                    raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc)
                elif 'HTTP error' in desc:
                    raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc)
            raise LsmError(ErrorNumber.PLUGIN_BUG, desc)
        except AuthError:
            raise LsmError(ErrorNumber.PLUGIN_AUTH_FAILED, "Unauthorized user")
        except Error as te:
            raise LsmError(ErrorNumber.NETWORK_ERROR, str(te))
        except Exception as e:
            error("Unexpected exception:\n" + traceback.format_exc())
            raise LsmError(ErrorNumber.PLUGIN_BUG, str(e),
                           traceback.format_exc())
Exemple #29
0
    def volume_raid_create(self,
                           name,
                           raid_type,
                           disks,
                           strip_size,
                           flags=Client.FLAG_RSVD):
        """
        Work flow:
            1. Create RAID volume
                storcli /c0 add vd RAID10 drives=252:1-4 pdperarray=2 J
            2. Find out pool/DG base on one disk.
                storcli /c0/e252/s1 show J
            3. Find out the volume/VD base on pool/DG using self.volumes()
        """
        mega_raid_type = _lsm_raid_type_to_mega(raid_type)
        ctrl_num = None
        slot_nums = []
        enclosure_str = None

        for disk in disks:
            if not disk.plugin_data:
                raise LsmError(
                    ErrorNumber.INVALID_ARGUMENT,
                    "Illegal input disks argument: missing plugin_data "
                    "property")
            # Disk should from the same controller.
            # Please be informed, the enclosure_str could be a empty(space).
            (cur_ctrl_num, cur_enclosure_str, slot_num) = \
                disk.plugin_data.split(':')

            cur_ctrl_num = int(cur_ctrl_num)

            if ctrl_num is not None and cur_ctrl_num != ctrl_num:
                raise LsmError(
                    ErrorNumber.INVALID_ARGUMENT,
                    "Illegal input disks argument: disks are not from the "
                    "same controller/system.")

            if enclosure_str is not None and \
               cur_enclosure_str != enclosure_str:
                raise LsmError(
                    ErrorNumber.INVALID_ARGUMENT,
                    "Illegal input disks argument: disks are not from the "
                    "same disk enclosure.")

            ctrl_num = cur_ctrl_num
            enclosure_str = cur_enclosure_str
            slot_nums.append(slot_num)

        # Handle request volume name, LSI only allow 15 characters.
        name = re.sub('[^0-9a-zA-Z_\-]+', '', name)[:15]

        if enclosure_str == ' ':
            drives_str = "drives=%s" % ','.join(slot_nums)
        else:
            drives_str = "drives=%s:%s" % (enclosure_str, ','.join(slot_nums))

        cmds = [
            "/c%s" % ctrl_num, "add", "vd", mega_raid_type, 'size=all',
            "name=%s" % name, drives_str
        ]

        if raid_type == Volume.RAID_TYPE_RAID10 or \
           raid_type == Volume.RAID_TYPE_RAID50 or \
           raid_type == Volume.RAID_TYPE_RAID60:
            cmds.append("pdperarray=%d" % int(int_div(len(disks), 2)))

        if strip_size != Volume.VCR_STRIP_SIZE_DEFAULT:
            cmds.append("strip=%d" % int(int_div(strip_size, 1024)))

        try:
            self._storcli_exec(cmds)
        except ExecError:
            req_disk_ids = [d.id for d in disks]
            for cur_disk in self.disks():
                if cur_disk.id in req_disk_ids and \
                   not cur_disk.status & Disk.STATUS_FREE:
                    raise LsmError(
                        ErrorNumber.DISK_NOT_FREE,
                        "Disk %s is not in STATUS_FREE state" % cur_disk.id)
            # Check whether got unsupported RAID type or stripe size
            supported_raid_types, supported_strip_sizes = \
                self._vcr_cap_get("/c%s" % ctrl_num)

            if raid_type not in supported_raid_types:
                raise LsmError(ErrorNumber.NO_SUPPORT,
                               "Provided 'raid_type' is not supported")

            if strip_size != Volume.VCR_STRIP_SIZE_DEFAULT and \
               strip_size not in supported_strip_sizes:
                raise LsmError(ErrorNumber.NO_SUPPORT,
                               "Provided 'strip_size' is not supported")

            raise

        # Find out the DG ID from one disk.
        dg_show_output = self._storcli_exec(
            ["/c%s/e%s/s%s" % tuple(disks[0].plugin_data.split(":")), "show"])

        dg_id = dg_show_output['Drive Information'][0]['DG']
        if dg_id == '-':
            raise LsmError(
                ErrorNumber.PLUGIN_BUG,
                "volume_raid_create(): No error found in output, "
                "but RAID is not created: %s" % list(dg_show_output.items()))
        else:
            dg_id = int(dg_id)

        pool_id = _pool_id_of(dg_id, self._sys_id_of_ctrl_num(ctrl_num))

        lsm_vols = self.volumes(search_key='pool_id', search_value=pool_id)
        if len(lsm_vols) != 1:
            raise LsmError(
                ErrorNumber.PLUGIN_BUG,
                "volume_raid_create(): Got unexpected volume count(not 1) "
                "when creating RAID volume")

        return lsm_vols[0]
    def invoke_method(self, cmd, cim_path, in_params, out_handler=None,
                      error_handler=None, retrieve_data=None,
                      method_data=None):
        """
        cmd
            A string of command, example:
                'CreateOrModifyElementFromStoragePool'
        cim_path
            the CIMInstanceName, example:
                CIM_StorageConfigurationService.path
        in_params
            A dictionary of input parameter, example:
                {'ElementName': volume_name,
                 'ElementType': dmtf_element_type,
                 'InPool': cim_pool_path,
                 'Size': wbem.Uint64(size_bytes)}
        out_handler
            A reference to a method to parse output, example:
                self._new_vol_from_name
        error_handler
            A reference to a method to handle all exceptions.
        retrieve_data
            SmisCommon.JOB_RETRIEVE_XXX, it will be used only
            when a ASYNC job has been created.
        method_data
            A string which will be stored in job_id, it could be used by
            job_status() to do error checking.
        """
        if retrieve_data is None:
            retrieve_data = SmisCommon.JOB_RETRIEVE_NONE
        try:
            (rc, out) = self._wbem_conn.InvokeMethod(
                cmd, cim_path, **in_params)

            # Check to see if operation is done
            if rc == SmisCommon.SNIA_INVOKE_OK:
                if out_handler is None:
                    return None, None
                else:
                    return None, out_handler(out)

            elif rc == SmisCommon.SNIA_INVOKE_ASYNC:
                # We have an async operation
                job_id = SmisCommon._job_id_of_cim_job(
                    out['Job'], retrieve_data, method_data)
                return job_id, None
            elif rc == SmisCommon.SNIA_INVOKE_NOT_SUPPORTED:
                raise LsmError(
                    ErrorNumber.NO_SUPPORT,
                    'SMI-S error code indicates operation not supported')
            else:
                self._dump_wbem_xml(cmd)
                raise LsmError(ErrorNumber.PLUGIN_BUG,
                               "Error: %s rc= %s" % (cmd, str(rc)))

        except Exception:
            exc_info = sys.exc_info()
            # Make sure to save off current exception as we could cause
            # another when trying to dump debug data.
            self._dump_wbem_xml(cmd)
            if error_handler is not None:
                error_handler(self, method_data, exc_info)
            else:
                raise