def volume_ident_led_off(self, volume, flags=Client.FLAG_RSVD): """ :param volume: volume id to stop identification :param flags: for future use :return: Depends on command: arcconf identify <ctrlNo> logicaldrive <ldNo> stop """ if not volume.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input volume argument: missing plugin_data property") volume_info = volume.plugin_data.split(':') ctrl_id = str(volume_info[6]) volume_id = str(volume_info[4]) try: self._arcconf_exec( ['IDENTIFY', ctrl_id, 'LOGICALDRIVE', volume_id, 'STOP'], flag_force=True) except ExecError: raise LsmError(ErrorNumber.NO_STATE_CHANGE, 'Looks like none of the LEDs are blinking.') return None
def _find_storcli(self): """ Try _DEFAULT_BIN_PATHS """ working_bins = [] for cur_path in MegaRAID._DEFAULT_BIN_PATHS: if os.path.lexists(cur_path) and os.access(cur_path, os.X_OK): self._storcli_bin = cur_path try: self._storcli_exec(['-v'], flag_json=False) working_bins.append(cur_path) except Exception: pass if len(working_bins) == 1: self._storcli_bin = working_bins[0] return # Server might have both storcli and perccli installed. elif len(working_bins) >= 2: for cur_path in working_bins: self._storcli_bin = cur_path try: if len(self.systems()) >= 1: return except Exception: pass raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Both storcli and perccli are installed, but none " "could find a valid MegaRAID card") raise LsmError( ErrorNumber.INVALID_ARGUMENT, "MegaRAID storcli or perccli is not installed correctly")
def _profile_check(profile_dict, profile_name, spec_ver, raise_error=False): """ Check whether we support certain profile at certain SNIA specification version. Profile spec version later or equal than require spec_ver will also be consider as found. Require profile_dict provided by SmisCommon.profile_register_load() Will raise LsmError(ErrorNumber.NO_SUPPORT, 'xxx') if raise_error is True when nothing found. """ request_ver_num = _profile_spec_ver_to_num(spec_ver) if profile_name not in list(profile_dict.keys()): if raise_error: raise LsmError( ErrorNumber.NO_SUPPORT, "SNIA SMI-S %s '%s' profile is not supported by " % (profile_name, spec_ver) + "target SMI-S provider") return False support_ver_num = _profile_spec_ver_to_num(profile_dict[profile_name]) if support_ver_num < request_ver_num: if raise_error: raise LsmError( ErrorNumber.NO_SUPPORT, "SNIA SMI-S %s '%s' profile is not supported by " % (profile_name, spec_ver) + "target SMI-S provider. Only version %s is supported" % profile_dict[profile_name]) else: return False return True
def volume_ident_led_on(self, volume, flags=Client.FLAG_RSVD): """ :param volume: volume id to be identified :param flags: for future use :return: Depends on command: arcconf identify <ctrlNo> logicaldrive <ldNo> time 3600 default led blink time is set to 1 hour """ if not volume.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input volume argument: missing plugin_data property") volume_info = volume.plugin_data.split(':') ctrl_id = str(volume_info[6]) volume_id = str(volume_info[4]) try: self._arcconf_exec([ 'IDENTIFY', ctrl_id, 'LOGICALDRIVE', volume_id, 'TIME', '3600' ], flag_force=True) except ExecError: raise LsmError(ErrorNumber.PLUGIN_BUG, 'Volume-ident-led-on failed unexpectedly') return None
def access_group_delete(self, access_group, flags=0): if access_group.id.startswith(TargetdStorage._FAKE_AG_PREFIX): raise LsmError( ErrorNumber.NO_SUPPORT, "Cannot delete old initiator simulated access group, " "they will be automatically deleted when no volume masked to") if self._flag_ag_support is False: raise LsmError( ErrorNumber.NO_SUPPORT, "Please upgrade your targetd package to support " "access_group_delete()") self._lsm_ag_of_id( access_group.id, LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) if list(m for m in self._tgt_masks() if m['ag_id'] == access_group.id): raise LsmError( ErrorNumber.IS_MASKED, "Cannot delete access group which has volume masked to") self._jsonrequest("access_group_destroy", {'ag_name': access_group.name}) return None
def access_group_initiator_add(self, access_group, init_id, init_type, flags=0): if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError(ErrorNumber.NO_SUPPORT, "Targetd only support iscsi") lsm_ag = self._lsm_ag_of_id( access_group.name, LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) # Pre-check for NO_STATE_CHANGE error as targetd silently pass # if initiator is already in requested access group. if init_id in lsm_ag.init_ids: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Requested init_id is already in defined access group") self._jsonrequest( "access_group_init_add", dict(ag_name=access_group.name, init_id=init_id, init_type='iscsi')) return self._lsm_ag_of_id( access_group.name, LsmError( ErrorNumber.PLUGIN_BUG, "access_group_initiator_add(): " "Failed to find the updated access group"))
def access_group_create(self, name, init_id, init_type, system, flags=0): """ Creates of access group """ if system.id != self.system.id: raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM, "System %s not found" % system.id) if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError(ErrorNumber.NO_SUPPORT, "Nstor only support iSCSI Access Group") # Check that init_id is not a part of another hostgroup for ag in self.access_groups(): if init_id in ag.init_ids: raise LsmError( ErrorNumber.EXISTS_INITIATOR, "%s is already part of %s access group" % (init_id, ag.name)) if name == ag.name: raise LsmError(ErrorNumber.NAME_CONFLICT, "Access group with name exists!") self._request("create_hostgroup", "stmf", [name]) self._add_initiator(name, init_id) return AccessGroup(name, name, [init_id], init_type, system.id)
def _find_storcli(self): """ Try _DEFAULT_BIN_PATHS """ working_bins = [] for cur_path in MegaRAID._DEFAULT_BIN_PATHS: if os.path.lexists(cur_path) and os.access(cur_path, os.X_OK): self._storcli_bin = cur_path try: self._storcli_exec("-v", flag_json=False) except Exception: pass else: working_bins.append(cur_path) if len(working_bins) == 1: self._storcli_bin = working_bins[0] return elif len(working_bins) > 1: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "More than one MegaRAID storcli or perccli found ({})".format( ", ".join(sorted(working_bins)), ), ) else: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "MegaRAID storcli or perccli is not installed correctly", )
def pool_member_info(self, pool, flags=Client.FLAG_RSVD): """ Depend on command: hpssacli ctrl slot=0 show config detail """ if not pool.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Ilegal input volume argument: missing plugin_data property") (ctrl_num, array_num) = pool.plugin_data.split(":") ctrl_data = self._sacli_exec( ["ctrl", "slot=%s" % ctrl_num, "show", "config", "detail"]).values()[0] disk_ids = [] raid_type = Volume.RAID_TYPE_UNKNOWN for key_name in ctrl_data.keys(): if key_name == "Array: %s" % array_num: for array_key_name in ctrl_data[key_name].keys(): if array_key_name.startswith("Logical Drive: ") and \ raid_type == Volume.RAID_TYPE_UNKNOWN: raid_type = _hp_raid_level_to_lsm( ctrl_data[key_name][array_key_name]) elif array_key_name.startswith("physicaldrive"): hp_disk = ctrl_data[key_name][array_key_name] if hp_disk['Drive Type'] == 'Data Drive': disk_ids.append(hp_disk['Serial Number']) break if len(disk_ids) == 0: raise LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found") return raid_type, Pool.MEMBER_TYPE_DISK, disk_ids
def volume_unmask(self, access_group, volume, flags=0): self._lsm_ag_of_id( access_group.id, LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) self._lsm_vol_of_id( volume.id, LsmError(ErrorNumber.NOT_FOUND_VOLUME, "Volume not found")) # Pre-check if already unmasked if not self._is_masked(access_group.id, volume.pool_id, volume.name): raise LsmError(ErrorNumber.NO_STATE_CHANGE, "Volume is not masked to requested access group") if access_group.id.startswith(TargetdStorage._FAKE_AG_PREFIX): self._jsonrequest( "export_destroy", dict(pool=volume.pool_id, vol=volume.name, initiator_wwn=access_group.init_ids[0])) else: self._jsonrequest( "access_group_map_destroy", { 'pool_name': volume.pool_id, 'vol_name': volume.name, 'ag_name': access_group.id, }) return None
def volume_physical_disk_cache_update(self, volume, pdc, flags=Client.FLAG_RSVD): """ Depending on "storcli /c0/vX set pdcache=<on|off>" command. """ cmd = [_vd_path_of_lsm_vol(volume), "set"] if pdc == Volume.PHYSICAL_DISK_CACHE_ENABLED: cmd.append("pdcache=on") elif pdc == Volume.PHYSICAL_DISK_CACHE_DISABLED: cmd.append("pdcache=off") else: raise LsmError(ErrorNumber.PLUGIN_BUG, "Got unknown pdc: %d" % pdc) try: self._storcli_exec(cmd) # On SSD disk, the command will return 0 for failure, only # json output will indicate error. except LsmError as lsm_err: if lsm_err.code == ErrorNumber.PLUGIN_BUG and \ "SSD Pd is present" in lsm_err.msg: raise LsmError( ErrorNumber.NO_SUPPORT, "Changing SSD physical disk cache is not allowed " "on MegaRAID") raise
def volume_delete(self, volume, flags=0): """ Depends on command: arcconf delete <ctrlNo> logicaldrive <ld#> noprompt """ if not volume.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input volume argument: missing plugin_data property") (ctrl_num, array_num) = volume.plugin_data.split(":")[:2] try: self._arcconf_exec(['delete', ctrl_num, 'logicaldrive', array_num], flag_force=True) except ExecError: ctrl_info = self._get_detail_info_list()[int(ctrl_num) - 1] for ld in range(len(ctrl_info['LogicalDrive'])): ld_info = ctrl_info['LogicalDrive'] # TODO (Raghavendra) Need to find the scenarios when this can # occur. If volume is detected correctly, but deletion of # volume fails due to arcconf delete command failure. if array_num == ld_info[ld]['logicalDriveID']: raise LsmError(ErrorNumber.PLUGIN_BUG, "volume_delete failed unexpectedly") raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, "Volume not found") return None
def _update_exports(): """Update the system""" cmd = ["/usr/sbin/exportfs", "-ar"] try: NFSPlugin._run_cmd(cmd) except subprocess.CalledProcessError: raise LsmError(ErrorNumber.INVALID_ARGUMENT, 'exportfs failed') except OSError: raise LsmError(ErrorNumber.PLUGIN_BUG, 'error calling exportfs')
def plugin_register(self, uri, password, timeout, flags=0): self.uri = uri_parse(uri) self.password = password self.tmo = timeout user = self.uri.get('username', DEFAULT_USER) port = self.uri.get('port', DEFAULT_PORT) self.host_with_port = "%s:%s" % (self.uri['host'], port) if self.uri['scheme'].lower() == 'targetd+ssl': self.scheme = 'https' else: self.scheme = 'http' self.url = urlunsplit( (self.scheme, self.host_with_port, PATH, None, None)) user_name_pass = '******' % (user, self.password) auth = base64.b64encode(user_name_pass.encode('utf-8')).decode('utf-8') self.headers = { 'Content-Type': 'application/json', 'Authorization': 'Basic %s' % (auth, ) } if "no_ssl_verify" in self.uri["parameters"] \ and self.uri["parameters"]["no_ssl_verify"] == 'yes': self.no_ssl_verify = True if "ca_cert_file" in self.uri["parameters"]: # Check for file existence and throw error now if not present self.ca_cert_file = self.uri["parameters"]["ca_cert_file"] if not os.path.isfile(self.ca_cert_file): raise LsmError( ErrorNumber.INVALID_ARGUMENT, 'ca_cert_file URI parameter does not exist %s' % self.ca_cert_file) if self.no_ssl_verify and self.ca_cert_file: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Specifying 'no_ssl_verify' and 'ca_cert_file' " "is unsupported combination.") if not SSL_DEFAULT_CONTEXT and (self.no_ssl_verify or self.ca_cert_file): raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Cannot specify no_ssl_verify or ca_cert_file for this" "version of python!") try: self._jsonrequest('access_group_list', default_error_handler=False) except TargetdError as te: if te.errno == TargetdError.INVALID_METHOD: self._flag_ag_support = False else: raise
def cim_wrapper(*args, **kwargs): try: return method(*args, **kwargs) except LsmError as lsm: raise except CIMError as ce: error_code, desc = ce if error_code == 0: if 'Socket error' in desc: if 'Errno 111' in desc: raise LsmError(ErrorNumber.NETWORK_CONNREFUSED, 'Connection refused') if 'Errno 113' in desc: raise LsmError(ErrorNumber.NETWORK_HOSTDOWN, 'Host is down') elif 'SSL error' in desc: raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc) elif 'The web server returned a bad status line': raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc) elif 'HTTP error' in desc: raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc) raise LsmError(ErrorNumber.PLUGIN_BUG, desc) except pywbem.cim_http.AuthError as ae: raise LsmError(ErrorNumber.PLUGIN_AUTH_FAILED, "Unauthorized user") except pywbem.cim_http.Error as te: raise LsmError(ErrorNumber.NETWORK_ERROR, str(te)) except Exception as e: error("Unexpected exception:\n" + traceback.format_exc()) raise LsmError(ErrorNumber.PLUGIN_BUG, str(e), traceback.format_exc())
def volume_raid_info(self, volume, flags=Client.FLAG_RSVD) -> List: _ = flags if not volume.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Ilegal input volume argument: missing plugin_data property", ) vd_path = _vd_path_of_lsm_vol(volume) vol_show_output = self._storcli_exec([vd_path, "show", "all"]) vd_basic_info = vol_show_output[vd_path][0] vd_id = int(vd_basic_info["DG/VD"].split("/")[-1]) vd_prop_info = vol_show_output[f"VD{vd_id:d} Properties"] raid_type = _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info) strip_size = _mega_size_to_lsm(vd_prop_info["Strip Size"]) disk_count = (int(vd_prop_info["Number of Drives Per Span"]) * int(vd_prop_info["Span Depth"])) if raid_type == Volume.RAID_TYPE_RAID0: strip_count = disk_count elif raid_type == Volume.RAID_TYPE_RAID1: strip_count = 1 elif raid_type == Volume.RAID_TYPE_RAID5: strip_count = disk_count - 1 elif raid_type == Volume.RAID_TYPE_RAID6: strip_count = disk_count - 2 elif raid_type == Volume.RAID_TYPE_RAID50: strip_count = ( (int(vd_prop_info["Number of Drives Per Span"]) - 1) * int(vd_prop_info["Span Depth"])) elif raid_type == Volume.RAID_TYPE_RAID60: strip_count = ( (int(vd_prop_info["Number of Drives Per Span"]) - 2) * int(vd_prop_info["Span Depth"])) elif raid_type == Volume.RAID_TYPE_RAID10: strip_count = (int(vd_prop_info["Number of Drives Per Span"]) / 2 * int(vd_prop_info["Span Depth"])) else: # MegaRAID does not support 15 or 16 yet. raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_info(): Got unexpected RAID type: {}".format( vd_basic_info["TYPE"], ), ) return [ raid_type, strip_size, disk_count, strip_size, strip_size * strip_count, ]
def lsm_pool_to_cim_pool_path(smis_common, lsm_pool): """ Convert lsm.Pool to CIMInstanceName of CIM_StoragePool using lsm.Pool.plugin_data """ if not lsm_pool.plugin_data: raise LsmError(ErrorNumber.PLUGIN_BUG, "Got lsm.Pool instance with empty plugin_data") if smis_common.system_list and \ lsm_pool.system_id not in smis_common.system_list: raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM, "System filtered in URI") return path_str_to_cim_path(lsm_pool.plugin_data)
def volume_mask(self, access_group, volume, flags=0): self._lsm_ag_of_id( access_group.id, LsmError( ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) self._lsm_vol_of_id( volume.id, LsmError( ErrorNumber.NOT_FOUND_VOLUME, "Volume not found")) tgt_masks = self._tgt_masks() if self._is_masked( access_group.id, volume.pool_id, volume.name, tgt_masks): raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is already masked to requested access group") if access_group.id.startswith(TargetdStorage._FAKE_AG_PREFIX): free_h_lun_ids = ( set(range(TargetdStorage._MAX_H_LUN_ID + 1)) - set([m['h_lun_id'] for m in tgt_masks])) if len(free_h_lun_ids) == 0: # TODO(Gris Ge): Add SYSTEM_LIMIT error into API raise LsmError( ErrorNumber.PLUGIN_BUG, "System limit: targetd only allows %s LUN masked" % TargetdStorage._MAX_H_LUN_ID) h_lun_id = free_h_lun_ids.pop() self._jsonrequest( "export_create", { 'pool': volume.pool_id, 'vol': volume.name, 'initiator_wwn': access_group.init_ids[0], 'lun': h_lun_id }) else: self._jsonrequest( 'access_group_map_create', { 'pool_name': volume.pool_id, 'vol_name': volume.name, 'ag_name': access_group.id, }) return None
def pool_member_info(self, pool, flags=Client.FLAG_RSVD): lsi_dg_path = pool.plugin_data # Check whether pool exists. try: dg_show_all_output = self._storcli_exec( [lsi_dg_path, "show", "all"]) except ExecError as exec_error: try: json_output = json.loads(exec_error.stdout) detail_error = json_output[ 'Controllers'][0]['Command Status']['Detailed Status'] except Exception: raise exec_error if detail_error and detail_error[0]['Status'] == 'Not found': raise LsmError( ErrorNumber.NOT_FOUND_POOL, "Pool not found") raise ctrl_num = lsi_dg_path.split('/')[1][1:] lsm_disk_map = {} disk_ids = [] for lsm_disk in self.disks(): lsm_disk_map[lsm_disk.plugin_data] = lsm_disk.id for dg_disk_info in dg_show_all_output['DG Drive LIST']: cur_lsi_disk_id = "%s:%s" % (ctrl_num, dg_disk_info['EID:Slt']) if cur_lsi_disk_id in lsm_disk_map.keys(): disk_ids.append(lsm_disk_map[cur_lsi_disk_id]) else: raise LsmError( ErrorNumber.PLUGIN_BUG, "pool_member_info(): Failed to find disk id of %s" % cur_lsi_disk_id) raid_type = Volume.RAID_TYPE_UNKNOWN dg_num = lsi_dg_path.split('/')[2][1:] for dg_top in dg_show_all_output['TOPOLOGY']: if dg_top['Arr'] == '-' and \ dg_top['Row'] == '-' and \ int(dg_top['DG']) == int(dg_num): raid_type = _RAID_TYPE_MAP.get( dg_top['Type'], Volume.RAID_TYPE_UNKNOWN) break if raid_type == Volume.RAID_TYPE_RAID1 and len(disk_ids) >= 4: raid_type = Volume.RAID_TYPE_RAID10 return raid_type, Pool.MEMBER_TYPE_DISK, disk_ids
def plugin_register(self, uri, password, timeout, flags=Client.FLAG_RSVD): if os.geteuid() != 0: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "This plugin requires root privilege both daemon and client") uri_parsed = uri_parse(uri) self._arcconf_bin = uri_parsed.get('parameters', {}).get('arcconf') if not self._arcconf_bin: self._arcconf_bin = Arcconf.find_arcconf() if not self._arcconf_bin: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "arcconf is not installed correctly") self._arcconf_exec(['list'])
def volume_raid_info(self, volume, flags=Client.FLAG_RSVD): """ :param volume: volume id - Volume to query :param flags: optional. Reserved for future use. :return: [raid_type, strip_size, disk_count, min_io_size, opt_io_size] Depends on command: arcconf getconfigjson <ctrlNo> array <arrayNo> """ if not volume.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input volume argument: missing plugin_data property") volume_info = volume.plugin_data.split(':') ctrl_id = str(volume_info[6]) array_id = str(volume_info[5]) volume_raid_level = str(volume_info[1]) # convert to Kibibyte stripe_size = int(volume_info[2]) * 1024 # convert to Kibibyte full_stripe_size = int(volume_info[3]) * 1024 device_count = 0 array_info = self._arcconf_exec( ['GETCONFIGJSON', ctrl_id, 'ARRAY', array_id], flag_force=True) array_json_info = self._filter_cmd_output(array_info)['Array'] for chunk in array_json_info['Chunk']: if 'deviceID' in chunk.keys(): device_count += 1 else: continue raid_level = _arcconf_raid_level_to_lsm(volume_raid_level) if device_count == 0: if stripe_size == Volume.STRIP_SIZE_UNKNOWN: raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_info(): Got logical drive %s entry, " "but no physicaldrive entry" % volume.id) raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, "Volume not found") return [ raid_level, stripe_size, device_count, stripe_size, full_stripe_size ]
def lsm_ag_to_cim_spc_path(smis_common, lsm_ag): """ Convert lsm.AccessGroup to CIMInstanceName of CIM_SCSIProtocolController using lsm.AccessGroup.plugin_data. This method does not check whether plugin_data is cim_spc or cim_init_mg, caller should make sure that. """ if not lsm_ag.plugin_data: raise LsmError(ErrorNumber.PLUGIN_BUG, "Got lsm.AccessGroup instance with empty plugin_data") if smis_common.system_list and \ lsm_ag.system_id not in smis_common.system_list: raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM, "System filtered in URI") return path_str_to_cim_path(lsm_ag.plugin_data)
def _wrapper(*args, **kwargs): try: return method(*args, **kwargs) except LsmError: raise except KeyError as key_error: raise LsmError( ErrorNumber.PLUGIN_BUG, "Expected key missing from MegaRAID storcli output:%s" % key_error) except ExecError as exec_error: raise LsmError(ErrorNumber.PLUGIN_BUG, str(exec_error)) except Exception as common_error: raise LsmError(ErrorNumber.PLUGIN_BUG, "Got unexpected error %s" % common_error)
def volume_write_cache_policy_update(self, volume, wcp, flags=Client.FLAG_RSVD): """ Depending on "storcli /c0/vX set wrcache=<wt|wb|awb>" command. """ vd_path = _vd_path_of_lsm_vol(volume) # Check whether we are working on cache I/O which ignore write cache # setting and always cache write. vol_show_output = self._storcli_exec([vd_path, "show", "all"]) vd_basic_info = vol_show_output[vd_path][0] lsi_cache_setting = vd_basic_info['Cache'] if lsi_cache_setting.endswith('C'): flag_cache_io = True else: flag_cache_io = False cmd = [vd_path, "set"] if wcp == Volume.WRITE_CACHE_POLICY_WRITE_BACK: cmd.append("wrcache=awb") elif wcp == Volume.WRITE_CACHE_POLICY_AUTO: if flag_cache_io: self._storcli_exec([vd_path, "set", "iopolicy=Direct"]) cmd.append("wrcache=wb") elif wcp == Volume.WRITE_CACHE_POLICY_WRITE_THROUGH: if flag_cache_io: self._storcli_exec([vd_path, "set", "iopolicy=Direct"]) cmd.append("wrcache=wt") else: raise LsmError(ErrorNumber.PLUGIN_BUG, "Got unknown wcp: %d" % wcp) self._storcli_exec(cmd)
def cim_init_path_check_or_create(smis_common, system_id, init_id, init_type): """ Check whether CIM_StorageHardwareID exists, if not, create new one. """ cim_inits = smis_common.EnumerateInstances('CIM_StorageHardwareID', PropertyList=_CIM_INIT_PROS) if len(cim_inits): for cim_init in cim_inits: if init_id_of_cim_init(cim_init) == init_id: return cim_init.path # Create new one dmtf_id_type = None if init_type == AccessGroup.INIT_TYPE_WWPN: dmtf_id_type = dmtf.ID_TYPE_WWPN elif init_type == AccessGroup.INIT_TYPE_ISCSI_IQN: dmtf_id_type = dmtf.ID_TYPE_ISCSI else: raise LsmError( ErrorNumber.PLUGIN_BUG, "cim_init_path_check_or_create(): Got invalid init_type: %d" % init_type) cim_hwms = smis_common.cim_hwms_of_sys_id(system_id) in_params = { 'StorageID': init_id, 'IDType': dmtf_id_type, } return smis_common.invoke_method_wait('CreateStorageHardwareID', cim_hwms.path, in_params, out_key='HardwareID', expect_class='CIM_StorageHardwareID')
def export_auth(self, flags=Client.FLAG_RSVD): if self.nfs_conn is not None: return self.nfs_conn.export_auth(flags=flags) raise LsmError( ErrorNumber.NO_SUPPORT, "NFS plugin is not loaded, please load nfsd kernel " "module and related services")
def _default_error_handler(error_code, msg): if error_code in TargetdStorage._ERROR_MAPPING: ec = TargetdStorage._ERROR_MAPPING[error_code]['ec'] msg_d = TargetdStorage._ERROR_MAPPING[error_code]['msg'] if not msg_d: msg_d = msg raise LsmError(ec, msg_d)
def sys_id_of_cim_disk(cim_disk): if 'SystemName' not in cim_disk: raise LsmError( ErrorNumber.PLUGIN_BUG, "sys_id_of_cim_disk(): Got cim_disk with no " "SystemName property: %s, %s" % (cim_disk.path, cim_disk.items())) return cim_disk['SystemName']
def list(): """ Version: 1.3 Usage: Query local disk paths. Currently, only SCSI, ATA and NVMe disks will be included. Parameters: N/A Returns: [disk_path] List of string. Empty list if not disk found. The disk_path string format is '/dev/sd[a-z]+' for SCSI and ATA disks, '/dev/nvme[0-9]+n[0-9]+' for NVMe disks. SpecialExceptions: LsmError ErrorNumber.LIB_BUG Internal bug. ErrorNumber.INVALID_ARGUMENT Invalid disk_path. Should be like '/dev/sdb'. ErrorNumber.NOT_FOUND_DISK Provided disk is not found. Capability: N/A No capability required as this is a library level method. """ (disk_paths, err_no, err_msg) = _local_disk_list() if err_no != ErrorNumber.OK: raise LsmError(err_no, err_msg) return disk_paths
def _str_to_enum(type_str, conv_dict): keys = [k for k, v in list(conv_dict.items()) if v.lower() == type_str.lower()] if len(keys) > 0: return keys[0] raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Failed to convert %s to lsm type" % type_str)