def _create_lun_helper(self, lun, pool=None, find_new_pid=True): # DS8K supports ECKD ESE volume from 8.1 connection_type = self._helper.get_connection_type() if connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: thin_provision = self._helper.get_thin_provision() if lun.type_thin and thin_provision: if lun.type_replication: msg = _("The primary or the secondary storage " "can not support ECKD ESE volume.") else: msg = _("Backend can not support ECKD ESE volume.") LOG.error(msg) raise restclient.APIException(message=msg) # There is a time gap between find available LSS slot and # lun actually occupies it. excluded_lss = [] while True: try: if lun.type_replication and not lun.is_snapshot: lun.lss_pair = self._replication.find_available_lss_pair( excluded_lss) else: lun.lss_pair['source'] = self._helper.find_available_lss( pool, find_new_pid, excluded_lss) return self._helper.create_lun(lun) except restclient.LssFullException: LOG.warning("LSS %s is full, find another one.", lun.lss_pair['source'][1]) excluded_lss.append(lun.lss_pair['source'][1])
def wait_flashcopy_finished(self, src_luns, tgt_luns): finished = False try: fc_state = [False] * len(tgt_luns) while True: eventlet.sleep(5) for i in range(len(tgt_luns)): if not fc_state[i]: fcs = self.get_flashcopy(tgt_luns[i].ds_id) if not fcs: fc_state[i] = True continue if fcs[0]['state'] not in ('valid', 'validation_required'): msg = (_('Flashcopy ended up in bad state %s. ' 'Rolling back.') % fcs[0]['state']) raise restclient.APIException(data=msg) if fc_state.count(False) == 0: break finished = True finally: if not finished: for src_lun, tgt_lun in zip(src_luns, tgt_luns): self.delete_flashcopy(src_lun.ds_id, tgt_lun.ds_id) return finished
def terminate_connection(self, vol_id, connector, force, **kwargs): # If a fake connector is generated by nova when the host # is down, then the connector will not have a wwpns property. if 'wwpns' in connector: host = self._get_host(connector) host_wwpn_set = set(wwpn.upper() for wwpn in connector['wwpns']) host_ports = self._get_host_ports(host_wwpn_set) defined_hosts = set(hp['host_id'] for hp in host_ports if hp['host_id']) delete_ports = set(hp['wwpn'] for hp in host_ports if not hp['host_id']) else: host_ports = None delete_ports = None defined_hosts = self._find_host(vol_id) msg = ("terminate_connection: host_ports: %(host)s, defined_hosts: " "%(defined)s, delete_ports: %(delete)s.") LOG.debug(msg, { "host": host_ports, "defined": defined_hosts, "delete": delete_ports }) if not defined_hosts: LOG.info(_LI('Could not find host.')) return None elif len(defined_hosts) > 1: raise restclient.APIException(_('More than one host found.')) else: host_id = defined_hosts.pop() mappings = self._get_mappings(host_id) lun_ids = [ m['lunid'] for m in mappings if m['volume']['id'] == vol_id ] msg = _LI('Volumes attached to host %(host)s are %(vols)s.') LOG.info(msg, {'host': host_id, 'vols': ','.join(lun_ids)}) for lun_id in lun_ids: self._delete_mappings(host_id, lun_id) if not lun_ids: msg = _LW("Volume %(vol)s is already not mapped to " "host %(host)s.") LOG.warning(msg, {'vol': vol_id, 'host': host.name}) # if this host only has volumes that have been detached, # remove the host and its ports ret_info = {'driver_volume_type': 'fibre_channel', 'data': {}} if len(mappings) == len(lun_ids): if delete_ports: self._delete_host_ports(",".join(delete_ports)) self._delete_host(host_id) if 'wwpns' in connector: target_ports = [p['wwpn'] for p in self._get_ioports()] target_map = { initiator.upper(): target_ports for initiator in connector['wwpns'] } ret_info['data']['initiator_target_map'] = target_map return ret_info return ret_info
def initialize_connection(self, vol_id, connector, **kwargs): host = self._get_host(connector) # Find defined host and undefined host ports host_wwpn_set = set(wwpn.upper() for wwpn in connector['wwpns']) host_ports = self._get_host_ports(host_wwpn_set) LOG.debug("host_ports: %s", host_ports) defined_hosts = set(hp['host_id'] for hp in host_ports if hp['host_id']) unknown_ports = host_wwpn_set - set(hp['wwpn'] for hp in host_ports) unconfigured_ports = set(hp['wwpn'] for hp in host_ports if not hp['host_id']) msg = ("initialize_connection: defined_hosts: %(defined)s, " "unknown_ports: %(unknown)s, unconfigured_ports: " "%(unconfigured)s.") LOG.debug( msg, { "defined": defined_hosts, "unknown": unknown_ports, "unconfigured": unconfigured_ports }) # Create host if it is not defined if not defined_hosts: host_id = self._create_host(host)['id'] elif len(defined_hosts) == 1: host_id = defined_hosts.pop() else: msg = _('More than one host defined for requested ports.') raise restclient.APIException(message=msg) LOG.info(_LI('Volume will be attached to host %s.'), host_id) # Create missing host ports if unknown_ports or unconfigured_ports: self._assign_host_port(host_id, list(unknown_ports | unconfigured_ports)) # Map the volume to host lun_id = self._map_volume_to_host(host_id, vol_id) target_ports = [p['wwpn'] for p in self._get_ioports()] return { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': False, 'target_lun': int(lun_id, 16), 'target_wwn': target_ports, 'initiator_target_map': {initiator: target_ports for initiator in host_wwpn_set} } }
def create_pprc_path(self, lun, is_group=False): switch = lun.failed_over if is_group else False src_helper, tgt_helper = ((self._target_helper, self._source_helper) if switch else (self._source_helper, self._target_helper)) src_lss = lun.pool_lss_pair['source'][1] tgt_lss = lun.pool_lss_pair['target'][1] # check whether the pprc path exists and is healthy or not. pid = (src_helper.backend['storage_wwnn'] + '_' + src_lss + ':' + tgt_helper.backend['storage_wwnn'] + '_' + tgt_lss) state = self._is_pprc_paths_healthy(pid, switch) LOG.info("The state of PPRC path %(path)s is %(state)s.", { 'path': pid, 'state': state }) if state == PPRC_PATH_HEALTHY: return # create the pprc path pathData = { 'target_system_wwnn': tgt_helper.backend['storage_wwnn'], 'source_lss_id': src_lss, 'target_lss_id': tgt_lss, 'port_pairs': tgt_helper.backend['port_pairs'] } if lun.group and lun.group.consisgroup_replication_enabled: pathData['pprc_consistency_group'] = 'enable' LOG.info("PPRC path %(src)s:%(tgt)s will be created.", { 'src': src_lss, 'tgt': tgt_lss }) src_helper.create_pprc_path(pathData) # check the state of the pprc path LOG.debug("Checking the state of the new PPRC path.") for retry in range(4): eventlet.sleep(2) if self._is_pprc_paths_healthy(pid, switch) == PPRC_PATH_HEALTHY: break if retry == 3: src_helper.delete_pprc_path(pid) raise restclient.APIException( data=(_("Failed to create PPRC path %(src)s:%(tgt)s.") % { 'src': src_lss, 'tgt': tgt_lss })) LOG.debug("Create the new PPRC path successfully.")
def enable_replication(self, lun, delete_source=False): state, lun.pool_lss_pair = (self._mm_manager.find_from_pprc_paths( lun.ds_id[0:2])) LOG.debug("enable_replication: pool_lss_pair is %s.", lun.pool_lss_pair) if state == PPRC_PATH_UNHEALTHY: raise restclient.APIException( data=(_("The path(s) for volume %(name)s isn't available " "any more, please make sure the state of the path(s) " "which source LSS is %(lss)s is success.") % { 'name': lun.cinder_name, 'lss': lun.ds_id[0:2] })) elif state == PPRC_PATH_NOT_EXIST: pid = self._source_helper.get_pool(lun.ds_id[0:2]) lun.pool_lss_pair = {'source': (pid, lun.ds_id[0:2])} lun.pool_lss_pair.update(self.find_new_lss_for_target()) lun = self.create_replica(lun, delete_source) return lun
def create_pprc_path(self, pool_lss_pair): src_lss = pool_lss_pair['source'][1] tgt_lss = pool_lss_pair['target'][1] # check whether the pprc path exists and is healthy or not firstly. pid = (self._source.backend['storage_wwnn'] + '_' + src_lss + ':' + self._target.backend['storage_wwnn'] + '_' + tgt_lss) state = self._is_pprc_paths_healthy(pid) LOG.info("The state of PPRC path %(path)s is %(state)s.", { 'path': pid, 'state': state }) if state == PPRC_PATH_HEALTHY: return # create the pprc path pathData = { 'target_system_wwnn': self._target.backend['storage_wwnn'], 'source_lss_id': src_lss, 'target_lss_id': tgt_lss, 'port_pairs': self._target.backend['port_pairs'] } LOG.info("PPRC path %(src)s:%(tgt)s will be created.", { 'src': src_lss, 'tgt': tgt_lss }) self._source.create_pprc_path(pathData) # check the state of the pprc path LOG.debug("Checking the state of the new PPRC path.") for retry in range(4): eventlet.sleep(2) if self._is_pprc_paths_healthy(pid) == PPRC_PATH_HEALTHY: break if retry == 3: self._source.delete_pprc_path(pid) raise restclient.APIException( data=(_("Failed to create PPRC path %(src)s:%(tgt)s.") % { 'src': src_lss, 'tgt': tgt_lss })) LOG.debug("Create the new PPRC path successfully.")
def wait_pprc_copy_finished(self, vol_ids, state, delete=True): msg = _LI("Wait for PPRC pair to enter into state %s") LOG.info(msg, state) vol_ids = sorted(vol_ids) min_vol_id = min(vol_ids) max_vol_id = max(vol_ids) try: finished = False while True: eventlet.sleep(2) pairs = self.get_pprc_pairs(min_vol_id, max_vol_id) pairs = [ p for p in pairs if p['source_volume']['name'] in vol_ids ] finished_pairs = [p for p in pairs if p['state'] == state] if len(finished_pairs) == len(pairs): finished = True break invalid_states = [ 'target_suspended', 'invalid', 'volume_inaccessible' ] if state == 'full_duplex': invalid_states.append('suspended') elif state == 'suspended': invalid_states.append('valid') unfinished_pairs = [p for p in pairs if p['state'] != state] for p in unfinished_pairs: if p['state'] in invalid_states: msg = (_('Metro Mirror pair %(id)s enters into ' 'state %(state)s. ') % { 'id': p['id'], 'state': p['state'] }) raise restclient.APIException(data=msg) finally: if not finished and delete: pair_ids = {'ids': ','.join([p['id'] for p in pairs])} self.delete_pprc_pair_by_pair_id(pair_ids)
def _create_client(self): san_ip = self._get_value('san_ip') try: clear_pass = cryptish.decrypt(self._get_value('san_password')) except TypeError: raise exception.InvalidParameterValue( err=_('Param [san_password] is invalid.')) verify = self._get_certificate(san_ip) try: self._client = restclient.RESTScheduler( san_ip, self._get_value('san_login'), clear_pass, self._connector_obj, verify) except restclient.TimeoutException: raise restclient.APIException( data=(_("Can't connect to %(host)s") % {'host': san_ip})) self.backend['rest_version'] = self._get_version()['bundle_version'] LOG.info("Connection to DS8K storage system %(host)s has been " "established successfully, the version of REST is %(rest)s.", {'host': self._get_value('san_ip'), 'rest': self.backend['rest_version']})
def _ensure_vol_not_fc_target(self, vol_hex_id): for cp in self._helper.get_flashcopy(vol_hex_id): if cp['targetvolume']['id'] == vol_hex_id: raise restclient.APIException( data=(_('Volume %s is currently a target of another ' 'FlashCopy operation') % vol_hex_id))
def __init__(self, volume, is_snapshot=False): volume_type_id = volume.get('volume_type_id') self.specs = volume_types.get_volume_type_extra_specs( volume_type_id) if volume_type_id else {} os400 = self.specs.get( 'drivers:os400', EXTRA_SPECS_DEFAULTS['os400'] ).strip().upper() self.type_thin = self.specs.get( 'drivers:thin_provision', '%s' % EXTRA_SPECS_DEFAULTS['thin'] ).upper() == 'True'.upper() self.type_replication = self.specs.get( 'replication_enabled', '<is> %s' % EXTRA_SPECS_DEFAULTS['replication_enabled'] ).upper() == strings.METADATA_IS_TRUE if volume.provider_location: provider_location = ast.literal_eval(volume.provider_location) self.ds_id = provider_location['vol_hex_id'] else: self.ds_id = None self.cinder_name = volume.display_name self.pool_lss_pair = {} self.is_snapshot = is_snapshot if self.is_snapshot: self.group = (Group(volume.group_snapshot, True) if volume.group_snapshot else None) self.size = volume.volume_size # ds8k supports at most 16 chars self.ds_name = ( "OS%s:%s" % ('snap', helper.filter_alnum(self.cinder_name)) )[:16] else: self.group = Group(volume.group) if volume.group else None self.size = volume.size self.ds_name = ( "OS%s:%s" % ('vol', helper.filter_alnum(self.cinder_name)) )[:16] self.replica_ds_name = ( "OS%s:%s" % ('Replica', helper.filter_alnum(self.cinder_name)) )[:16] self.replication_status = volume.replication_status self.replication_driver_data = ( json.loads(volume.replication_driver_data) if volume.replication_driver_data else {}) if self.replication_driver_data: # now only support one replication target. replication_target = sorted( self.replication_driver_data.values())[0] replica_id = replication_target['vol_hex_id'] self.pool_lss_pair = { 'source': (None, self.ds_id[0:2]), 'target': (None, replica_id[0:2]) } if os400: if os400 not in VALID_OS400_VOLUME_TYPES.keys(): raise restclient.APIException( data=(_("The OS400 volume type provided, %s, is not " "a valid volume type.") % os400)) self.type_os400 = os400 if os400 not in ['050', '099']: self.size = VALID_OS400_VOLUME_TYPES[os400] else: self.type_os400 = EXTRA_SPECS_DEFAULTS['os400'] self.data_type = self._create_datatype(self.type_os400) self.os_id = volume.id self.status = volume.status self.volume = volume
def _assert(self, assert_condition, exception_message=''): if not assert_condition: LOG.error(exception_message) raise restclient.APIException(data=exception_message)