def find_lunmap(self, ig_name, vol_name): try: lun_mappings = self.req('lun-maps')['lun-maps'] except exception.NotFound: raise (exception.VolumeDriverException (_("can't find lun-map, ig:%(ig)s vol:%(vol)s") % {'ig': ig_name, 'vol': vol_name})) for lm_link in lun_mappings: idx = lm_link['href'].split('/')[-1] # NOTE(geguileo): There can be races so mapped elements retrieved # in the listing may no longer exist. try: lm = self.req('lun-maps', idx=int(idx))['content'] except exception.NotFound: continue if lm['ig-name'] == ig_name and lm['vol-name'] == vol_name: return lm return None
def _clear_block_device(self, device): """Deletes a block device.""" dev_path = self.local_path(device) if not dev_path or dev_path not in \ self.configuration.available_devices: return if os.path.exists(dev_path) and \ self.configuration.volume_clear != 'none': dev_size = self._get_devices_sizes([dev_path]) volutils.clear_volume( dev_size[dev_path], dev_path, volume_clear=self.configuration.volume_clear, volume_clear_size=self.configuration.volume_clear_size) else: LOG.warning(_LW("The device %s won't be cleared."), device) if device.status == "error_deleting": msg = _("Failed to delete device.") LOG.error(msg, resource=device) raise exception.VolumeDriverException(msg)
def create_volume(self, volume): try: out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'create', 1, name=volume['name'], type=self.CINDER_LUN, location=('/' + self.config.synology_pool_name), size=volume['size'] * units.Gi) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create_volume. [%s]', volume['name']) if not self._check_lun_status_normal(volume['name']): message = _('Lun [%s] status is not normal') % volume['name'] raise exception.VolumeDriverException(message=message)
def get_image_snapshots_to_date(self, date): filter = { 'sortedBy': 'createTime', 'target': 'SNAPSHOT', 'consistency': 'CRASH_CONSISTENT', 'hasClone': 'No', 'type': 'CINDER_GENERATED_SNAPSHOT', 'contain': 'image-', 'limit': '100', 'page': '1', 'sortOrder': 'DESC', 'since': '1970-01-01T00:00:00', 'until': date, } payload = '/' + self.api_version + '/snapshot' r = self.get_query(payload, filter) if r.status_code != 200: msg = _('Failed to get image snapshots.') raise exception.VolumeDriverException(msg) return r.json()['items']
def create_copy(self, src, tgt, src_id, config, opts, full_copy, pool=None): """Create a new snapshot using FlashCopy.""" LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s' % { 'tgt': tgt, 'src': src }) src_attrs = self.get_vdisk_attributes(src) if src_attrs is None: msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) ' 'does not exist') % { 'src': src, 'src_id': src_id }) LOG.error(msg) raise exception.VolumeDriverException(message=msg) src_size = src_attrs['capacity'] # In case we need to use a specific pool if not pool: pool = config.storwize_svc_volpool_name self.create_vdisk(tgt, src_size, 'b', pool, opts) timeout = config.storwize_svc_flashcopy_timeout try: self.run_flashcopy(src, tgt, timeout, full_copy=full_copy) except Exception: with excutils.save_and_reraise_exception(): self.delete_vdisk(tgt, True) LOG.debug('leave: _create_copy: snapshot %(tgt)s from ' 'vdisk %(src)s' % { 'tgt': tgt, 'src': src })
def _get_iscsi_properties(self, lunmap): """Gets iscsi configuration. :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the id of the volume (currently used by xen) :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. :access_mode: the volume access mode allow client used ('rw' or 'ro' currently supported) multiple connection return :target_iqns, :target_portals, :target_luns, which contain lists of multiple values. The main portal information is also returned in :target_iqn, :target_portal, :target_lun for backward compatibility. """ portals = self.client.get_iscsi_portals() if not portals: msg = _("XtremIO not configured correctly, no iscsi portals found") LOG.error(msg) raise exception.VolumeDriverException(message=msg) portal = RANDOM.choice(portals) portal_addr = ('%(ip)s:%(port)d' % {'ip': portal['ip-addr'].split('/')[0], 'port': portal['ip-port']}) tg_portals = ['%(ip)s:%(port)d' % {'ip': p['ip-addr'].split('/')[0], 'port': p['ip-port']} for p in portals] properties = {'target_discovered': False, 'target_iqn': portal['port-address'], 'target_lun': lunmap['lun'], 'target_portal': portal_addr, 'access_mode': 'rw', 'target_iqns': [p['port-address'] for p in portals], 'target_portals': tg_portals, 'target_luns': [lunmap['lun']] * len(portals)} return properties
def create_flashcopy_to_consistgrp(self, source, target, consistgrp, config, opts, full_copy=False, pool=None): """Create a FlashCopy mapping and add to consistent group.""" LOG.debug( 'Enter: create_flashcopy_to_consistgrp: create FlashCopy' ' from source %(source)s to target %(target)s' 'Then add the flashcopy to %(cg)s.', { 'source': source, 'target': target, 'cg': consistgrp }) src_attrs = self.get_vdisk_attributes(source) if src_attrs is None: msg = (_('create_copy: Source vdisk %(src)s ' 'does not exist.') % { 'src': source }) LOG.error(msg) raise exception.VolumeDriverException(message=msg) src_size = src_attrs['capacity'] # In case we need to use a specific pool if not pool: pool = config.storwize_svc_volpool_name self.create_vdisk(target, src_size, 'b', pool, opts) self.ssh.mkfcmap(source, target, full_copy, consistgrp) LOG.debug( 'Leave: create_flashcopy_to_consistgrp: ' 'FlashCopy started from %(source)s to %(target)s.', { 'source': source, 'target': target })
def failover_volume_host(self, context, vref): LOG.debug('enter: failover_volume_host: vref=%(vref)s', {'vref': vref['name']}) # Make the aux volume writeable. try: tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + vref.name self.target_helpers.stop_relationship(tgt_volume, access=True) try: self.target_helpers.start_relationship(tgt_volume, 'aux') except exception.VolumeBackendAPIException as e: LOG.error('Error running startrcrelationship due to %(err)s.', {'err': e}) return except Exception as e: msg = (_('Unable to fail-over the volume %(id)s to the ' 'secondary back-end, error: %(error)s') % { "id": vref['id'], "error": six.text_type(e) }) LOG.exception(msg) raise exception.VolumeDriverException(message=msg)
def _check_iscsi_chap_configuration(self, iscsi_chap_enabled, targets): logical_units = self._api.get_logical_units() target_devices = self._api.get_target_devices() for logical_unit in logical_units: target_device_id = logical_unit.VirtualTargetDeviceId target_device = datacore_utils.get_first( lambda device, key=target_device_id: device.Id == key, target_devices) target_port_id = target_device.TargetPortId target = datacore_utils.get_first_or_default( lambda target_port, key=target_port_id: target_port.Id == key, targets, None) if (target and iscsi_chap_enabled == (target.ServerPortProperties.Authentication == 'None')): msg = _("iSCSI CHAP authentication can't be configured for " "target %s. Device exists that served through " "this target.") % target.PortName LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg)
def replication_failback(self, volume): LOG.debug('enter: replication_failback: volume=%(volume)s', {'volume': volume['name']}) tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] rel_info = self.target_helpers.get_relationship_info(tgt_volume) if rel_info: try: self.target_helpers.stop_relationship(tgt_volume, access=True) self.target_helpers.start_relationship(tgt_volume, 'master') return { 'replication_status': fields.ReplicationStatus.ENABLED, 'status': 'available' } except Exception as e: msg = (_('Unable to fail-back the volume:%(vol)s to the ' 'master back-end, error:%(error)s') % { "vol": volume['name'], "error": six.text_type(e) }) LOG.exception(msg) raise exception.VolumeDriverException(message=msg)
def extend_volume(self, volume, size_gb): """Extend volume.""" LOG.debug("Extend volume") try: message_body = {} message_body['volume_guid'] = ( util.get_guid_with_curly_brackets(volume['id'])) message_body['new_size'] = size_gb # Send Extend Volume message to Data Node util.message_data_plane( self.dn_routing_key, 'hyperscale.storage.dm.volume.extend', **message_body) except (exception.UnableToProcessHyperScaleCmdOutput, exception.ErrorInSendingMsg): msg = _('Exception in extend volume %s') % volume['name'] LOG.exception(msg) raise exception.VolumeDriverException(message=msg)
def create_volume(self, volume): """Creates a logical volume.""" if volume.encryption_key_id: message = _("Encryption is not yet supported.") raise exception.VolumeDriverException(message=message) size = int(volume.size) * units.Gi LOG.debug("creating volume '%s'", volume.name) chunk_size = self.configuration.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) with RADOSClient(self) as client: self.RBDProxy().create(client.ioctx, utils.convert_str(volume.name), size, order, old_format=False, features=client.features)
def _get_lunid_from_vol(self, volume_name, map_group_name): map_grp_info = {'cMapGrpName': map_group_name} ret = self._call_method('GetMapGrpInfo', map_grp_info) if ret['returncode'] == zte_pub.ZTE_SUCCESS: lun_num = int(ret['data']['sdwLunNum']) lun_info = ret['data']['tLunInfo'] for count in range(0, lun_num): if volume_name == lun_info[count]['cVolName']: return lun_info[count]['sdwLunId'] return None elif ret['returncode'] == zte_pub.ZTE_ERR_GROUP_NOT_EXIST: return None else: err_msg = (_('_get_lunid_from_vol:Get lunid from vol fail. ' 'Group name:%(name)s vol:%(vol)s ' 'with Return code: %(ret)s.') % { 'name': map_group_name, 'vol': volume_name, 'ret': ret['returncode'] }) raise exception.VolumeDriverException(message=err_msg)
def create_snapshot(self, volume_path, volume_name, volume_id, snapshot_name, deletion_policy=None): """Creates a volume snapshot.""" request = {'typeId': 'com.tintri.api.rest.' + self.api_version + '.dto.domain.beans.cinder.CinderSnapshotSpec', 'file': TClient._remove_prefix(volume_path, tintri_path), 'vmName': volume_name or snapshot_name, 'description': snapshot_name + ' (' + volume_id + ')', 'vmTintriUuid': volume_id, 'instanceId': volume_id, 'snapshotCreator': 'Cinder', 'deletionPolicy': deletion_policy, } payload = '/' + self.api_version + '/cinder/snapshot' r = self.post(payload, request) if r.status_code != 200: msg = _('Failed to create snapshot for volume %s.') % volume_path raise exception.VolumeDriverException(msg) return r.json()[0]
def login(self, username, password): # Payload, header and URL for login headers = {'content-type': 'application/json', 'Tintri-Api-Client': 'Tintri-Cinder-Driver-%s' % TintriDriver.VERSION} payload = {'username': username, 'password': password, 'typeId': 'com.tintri.api.rest.vcommon.dto.rbac.' 'RestApiCredentials'} url = self.api_url + '/' + self.api_version + '/session/login' r = requests.post(url, data=json.dumps(payload), headers=headers, verify=self.verify_ssl, cert=self.ssl_cert_path) if r.status_code != 200: msg = _('Failed to login for user %s.') % username raise exception.VolumeDriverException(msg) return r.cookies['JSESSIONID']
def manage_existing_get_size(self, volume, existing_ref): """Returns size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref) try: volume_path = os.path.join(nfs_mount, volume_name) vol_size = int(math.ceil(float(utils.get_file_size(volume_path)) / units.Gi)) except OSError: msg = (_('Failed to get size of volume %s') % existing_ref['source-name']) raise exception.VolumeDriverException(msg) return vol_size
def _get_node_uuid(self): try: out = self.exec_webapi('SYNO.Core.ISCSI.Node', 'list', 1) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to _get_node_uuid.')) if (not self.check_value_valid(out, ['data', 'nodes'], list) or 0 >= len(out['data']['nodes']) or not self.check_value_valid(out['data']['nodes'][0], ['uuid'], string_types)): msg = _('Failed to _get_node_uuid.') raise exception.VolumeDriverException(message=msg) return out['data']['nodes'][0]['uuid']
def _find_pool_lss_pair_for_cg(self, lun, excluded_lss): lss_in_cache = self.consisgroup_cache.get(lun.group.id, set()) if not lss_in_cache: lss_in_cg = self._get_lss_in_cg(lun.group, lun.is_snapshot) LOG.debug("LSSs used by CG %(cg)s are %(lss)s.", {'cg': lun.group.id, 'lss': ','.join(lss_in_cg)}) available_lss = lss_in_cg - excluded_lss else: available_lss = lss_in_cache - excluded_lss if not available_lss: available_lss = self._find_lss_for_cg() pid, lss = self._find_pool_for_lss(available_lss) if pid: lss_in_cache.add(lss) self.consisgroup_cache[lun.group.id] = lss_in_cache else: raise exception.VolumeDriverException( message=_('There are still some available LSSs for CG, ' 'but they are not in the same node as pool.')) return (pid, lss)
def _si_poll_2_1(self, volume, policies, tenant): # Initial 4 second sleep required for some Datera versions eventlet.sleep(datc.DEFAULT_SI_SLEEP) TIMEOUT = 10 retry = 0 check_url = datc.URL_TEMPLATES['si_inst']( policies['default_storage_name']).format( datc._get_name(volume['id'])) poll = True while poll and not retry >= TIMEOUT: retry += 1 si = self._issue_api_request(check_url, api_version='2.1', tenant=tenant)['data'] if si['op_state'] == 'available': poll = False else: eventlet.sleep(1) if retry >= TIMEOUT: raise exception.VolumeDriverException( message=_('Resource not ready.'))
def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot. Note: the revert process should not change the volume's current size, that means if the driver shrank the volume during the process, it should extend the volume internally. """ vname = jcom.vname(volume.id) sname = jcom.sname(snapshot.id) LOG.debug('reverting %(vname)s to %(sname)s', { "vname": vname, "sname": sname }) vsize = None try: vsize = self.ra.get_lun(vname).get('volsize') except jexc.JDSSResourceNotFoundException as jerr: raise exception.VolumeNotFound(volume_id=volume.id) from jerr except jexc.JDSSException as jerr: raise exception.VolumeBackendAPIException(jerr) from jerr if vsize is None: raise exception.VolumeDriverException( _("unable to identify volume size")) try: self.ra.rollback_volume_to_snapshot(vname, sname) except jexc.JDSSException as jerr: raise exception.VolumeBackendAPIException(jerr.message) from jerr try: rvsize = self.ra.get_lun(vname).get('volsize') if rvsize != vsize: self.ra.extend_lun(vname, vsize) except jexc.JDSSResourceNotFoundException as jerr: raise exception.VolumeNotFound(volume_id=volume.id) from jerr except jexc.JDSSException as jerr: raise exception.VolumeBackendAPIException(jerr) from jerr
def _clone_lun(self, name, new_name, space_reserved=None, qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0): """Clone LUN with the given handle to the new name.""" if not space_reserved: space_reserved = self.lun_space_reservation if qos_policy_group_name is not None: msg = _('Data ONTAP operating in 7-Mode does not support QoS ' 'policy groups.') raise exception.VolumeDriverException(msg) metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] (parent, _splitter, name) = path.rpartition('/') clone_path = '%s/%s' % (parent, new_name) self.zapi_client.clone_lun(path, clone_path, name, new_name, space_reserved, src_block=0, dest_block=0, block_count=0) self.vol_refresh_voluntary = True luns = self.zapi_client.get_lun_by_args(path=clone_path) cloned_lun = luns[0] self.zapi_client.set_space_reserve(clone_path, space_reserved) clone_meta = self._create_lun_meta(cloned_lun) handle = self._create_lun_handle(clone_meta) self._add_lun_to_table( block_base.NetAppLun(handle, new_name, cloned_lun.get_child_content('size'), clone_meta))
def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot """ bvol_name = self._translate_volume_name(snapshot['name']) cvol_name = self._translate_volume_name(volume['name']) if volume['size'] < snapshot['volume_size']: err_msg = (_('Cloned volume size invalid. ' 'Clone size: %(cloned_size)s. ' 'Src volume size: %(volume_size)s.') % { 'cloned_size': volume['size'], 'volume_size': snapshot['volume_size'] }) raise exception.VolumeDriverException(message=err_msg) else: volume_size = float(volume['size'] * units.Mi) try: self._cloned_volume(cvol_name, bvol_name, volume_size, zte_pub.ZTE_SNAPSHOT) except Exception: self._delete_clone_relation_by_volname(bvol_name, False) self._cloned_volume(cvol_name, bvol_name, volume_size, zte_pub.ZTE_SNAPSHOT)
def create_cloned_volume(self, volume, src_vref): """clone a volume""" bvol_name = self._translate_volume_name(src_vref['name']) cvol_name = self._translate_volume_name(volume['name']) if volume['size'] < src_vref['size']: err_msg = (_('Cloned volume size invalid. ' 'Clone size: %(cloned_size)s. ' 'Src volume size: %(volume_size)s.') % { 'cloned_size': volume['size'], 'volume_size': src_vref['size'] }) raise exception.VolumeDriverException(message=err_msg) else: volume_size = float(volume['size'] * units.Mi) try: self._cloned_volume(cvol_name, bvol_name, volume_size, zte_pub.ZTE_VOLUME) except Exception: self._delete_clone_relation_by_volname(bvol_name, False) self._cloned_volume(cvol_name, bvol_name, volume_size, zte_pub.ZTE_VOLUME)
def _create_full_snapshot(self, description, name, pool_names, profile_id, src_virtual_disk): pools = self._get_available_disk_pools(pool_names) destination_pool = datacore_utils.get_first_or_default( lambda pool: (pool.ServerId == src_virtual_disk.FirstHostId or pool .ServerId == src_virtual_disk.SecondHostId), pools, None) if not destination_pool: msg = _("Suitable snapshot destination disk pool not found for " "virtual disk %s.") % src_virtual_disk.Id LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) server = datacore_utils.get_first( lambda srv: srv.Id == destination_pool.ServerId, self._api.get_servers()) if not server.SnapshotMapStorePoolId: self._api.designate_map_store(destination_pool.Id) snapshot = self._api.create_snapshot(src_virtual_disk.Id, name, description, destination_pool.Id, 'Full', False, profile_id) return snapshot
def serve_virtual_disk(): connector_wwpns = list( wwpn.replace('-', '').lower() for wwpn in connector['wwpns']) client = self._get_client(connector['host'], create_new=True) available_ports = self._api.get_ports() initiators = [] for port in available_ports: port_name = port.PortName.replace('-', '').lower() if (port.PortType == 'FibreChannel' and port.PortMode == 'Initiator' and port_name in connector_wwpns): initiators.append(port) if not initiators: msg = _("Fibre Channel ports not found for " "connector: %s") % connector LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) else: for initiator in initiators: if initiator.HostId != client.Id: try: self._api.assign_port(client.Id, initiator.Id) except datacore_exception.DataCoreException as e: LOG.info( "Assigning initiator port %(initiator)s " "to client %(client)s failed with " "error: %(error)s", { 'initiator': initiator.Id, 'client': client.Id, 'error': e }) virtual_logical_units = self._api.serve_virtual_disks_to_host( client.Id, [virtual_disk_id]) return client, virtual_logical_units
def __init__(self, array_addr, array_login, array_passwd, retry, verify): self.client = "client=openstack" self.defhdrs = {"User-Agent": "OpenStack-agent", "Content-Type": "application/json"} self.array_addr = array_addr self.array_login = array_login self.hashpass = hashlib.md5() self.hashpass.update(array_passwd) self.login_content = ("username="******"&hash=" + self.hashpass.hexdigest()) self.retry = retry self.verify = verify # check the version of the API on the array. We only support 1.1 # for now. resp = requests.get(url=("https://" + array_addr + "/AUTH/Version"), headers=self.defhdrs, verify=self.verify) resp.raise_for_status() dictresp = resp.json() if dictresp["Version"] != self.APIVERSION: msg = _("FIO ioControl API version not supported") raise exception.VolumeDriverException(message=msg) LOG.debug('FIO Connection initialized to %s' % array_addr)
def create_cloned_volume(self, volume, src_volume): """Create volume from a source volume.""" LOG.debug('enter: create_cloned_volume: create %(vol)s from %(src)s.', {'src': src_volume['name'], 'vol': volume['name']}) if src_volume['size'] > volume['size']: msg = _('create_cloned_volume: Source volume larger than ' 'destination volume') LOG.error(msg) raise exception.VolumeDriverException(message=msg) self._create_and_copy_vdisk_data( src_volume['name'], src_volume['id'], volume['name'], volume['id'], dest_vdisk_size=volume['size'] * units.Gi ) LOG.debug('leave: create_cloned_volume: create %(vol)s from %(src)s.', {'src': src_volume['name'], 'vol': volume['name']})
def _ensure_target_volume(self, volume): """Checks if target configured properly and volume is attached to it param: volume: volume structure """ LOG.debug("ensure volume %s assigned to a proper target", volume.id) target_name = self.jovian_target_prefix + volume.id auth = volume.provider_auth if not auth: msg = _("volume %s is missing provider_auth") % volume.id raise exception.VolumeDriverException(msg) (__, auth_username, auth_secret) = auth.split() chap_cred = {"name": auth_username, "password": auth_secret} if not self.ra.is_target(target_name): self._create_target_volume(volume) return vname = jcom.vname(volume.id) if not self.ra.is_target_lun(target_name, vname): self._attach_target_volume(target_name, vname) try: users = self.ra.get_target_user(target_name) if len(users) == 1: if users[0]['name'] == chap_cred['name']: return self.ra.delete_target_user(target_name, users[0]['name']) for user in users: self.ra.delete_target_user(target_name, user['name']) self._set_target_credentials(target_name, chap_cred) except jexc.JDSSException as jerr: self.ra.delete_target(target_name) raise exception.VolumeBackendAPIException(jerr)
def _map_add_lun(self, volume_name, map_group_name): add_vol_to_grp = { 'cMapGrpName': map_group_name, 'sdwLunId': 0, 'cVolName': volume_name} ret = self._call_method('AddVolToGrp', add_vol_to_grp) if ret['returncode'] in [zte_pub.ZTE_SUCCESS, zte_pub.ZTE_VOLUME_IN_GROUP, zte_pub.ZTE_ERR_VOL_EXISTS]: return self._get_lunid_from_vol(volume_name, map_group_name) err_msg = ( _( '_map_add_lun:fail to add vol to grp. group name:%(name)s' ' lunid:%(lun)s ' 'vol:%(vol)s with Return code: %(ret)s') % {'name': map_group_name, 'lun': 0, 'vol': volume_name, 'ret': ret['returncode']}) raise exception.VolumeDriverException(message=err_msg)
def terminate_connection(self, volume, connector, **kwargs): """Cleanup after an iSCSI connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug(_('enter: terminate_connection: volume %(vol)s with ' 'connector %(conn)s') % {'vol': volume, 'conn': connector}) vol_name = volume['name'] if 'host' in connector: host_name = self._helpers.get_host_from_connector(connector) if host_name is None: msg = (_('terminate_connection: Failed to get host name from' ' connector.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: # See bug #1244257 host_name = None info = {} if 'wwpns' in connector and host_name: target_wwpns = self._helpers.get_conn_fc_wwpns(host_name) init_targ_map = self._make_initiator_target_map(connector['wwpns'], target_wwpns) info = {'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': init_targ_map}} self._helpers.unmap_vol_from_host(vol_name, host_name) LOG.debug(_('leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s') % {'vol': volume, 'conn': connector}) return info