def _get_one_fc_port_for_zone(self, initiator, contr, nsinfos, cfgmap_from_fabrics, fabric_maps): """Get on FC port per one controller. task flow: 1. Get all the FC port from the array. 2. Filter out ports belonged to the specific controller and the status is connected. 3. Filter out ports connected to the fabric configured in cinder.conf. 4. Get active zones set from switch. 5. Find a port according to three cases. """ LOG.info( _LI("Get in function _get_one_fc_port_for_zone. " "Initiator: %s"), initiator) formatted_initiator = fczm_utils.get_formatted_wwn(initiator) fabric_map = fabric_maps[contr] if not fabric_map: return (None, False) port_zone_number_map = {} for fabric in fabric_map: LOG.info(_LI("Dealing with fabric: %s"), fabric) nsinfo = nsinfos[fabric] if formatted_initiator not in nsinfo: continue final_port_list_per_fabric = fabric_map[fabric] cfgmap_from_fabric = cfgmap_from_fabrics[fabric] zones_members = cfgmap_from_fabric['zones'].values() for port in final_port_list_per_fabric: port_zone_number_map[port] = 0 formatted_port = fczm_utils.get_formatted_wwn(port) for zones_member in zones_members: if formatted_port in zones_member: # For the second case use. if formatted_initiator in zones_member: # First case: found a port in the same # zone with the given initiator. return (port, False) # For the third case use. port_zone_number_map[port] += 1 if port_zone_number_map == {}: return (None, False) temp_list = [] temp_list = sorted(port_zone_number_map.items(), key=lambda d: d[1]) # Third case: find a port referenced in fewest zone. return (temp_list[0][0], True)
def _get_one_fc_port_for_zone(self, initiator, contr, nsinfos, cfgmap_from_fabrics, fabric_maps): """Get on FC port per one controller. task flow: 1. Get all the FC port from the array. 2. Filter out ports belonged to the specific controller and the status is connected. 3. Filter out ports connected to the fabric configured in cinder.conf. 4. Get active zones set from switch. 5. Find a port according to three cases. """ LOG.info(_LI("Get in function _get_one_fc_port_for_zone. " "Initiator: %s"), initiator) formatted_initiator = fczm_utils.get_formatted_wwn(initiator) fabric_map = fabric_maps[contr] if not fabric_map: return (None, False) port_zone_number_map = {} for fabric in fabric_map: LOG.info(_LI("Dealing with fabric: %s"), fabric) nsinfo = nsinfos[fabric] if formatted_initiator not in nsinfo: continue final_port_list_per_fabric = fabric_map[fabric] cfgmap_from_fabric = cfgmap_from_fabrics[fabric] zones_members = cfgmap_from_fabric['zones'].values() for port in final_port_list_per_fabric: port_zone_number_map[port] = 0 formatted_port = fczm_utils.get_formatted_wwn(port) for zones_member in zones_members: if formatted_port in zones_member: # For the second case use. if formatted_initiator in zones_member: # First case: found a port in the same # zone with the given initiator. return (port, False) # For the third case use. port_zone_number_map[port] += 1 if port_zone_number_map == {}: return (None, False) temp_list = [] temp_list = sorted(port_zone_number_map.items(), key=lambda d: d[1]) # Third case: find a port referenced in fewest zone. return (temp_list[0][0], True)
def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector["wwpns"]] name = volume["name"] metadata = self._get_lun_attr(name, "metadata") path = metadata["Path"] self._unmap_lun(path, initiators) LOG.debug("Unmapped LUN %(name)s from the initiator %(initiators)s", {"name": name, "initiators": initiators}) info = {"driver_volume_type": "fibre_channel", "data": {}} if not self._has_luns_mapped_to_initiators(initiators): # No more exports for this host, so tear down zone. LOG.info(_LI("Need to remove FC Zone, building initiator " "target map")) target_wwpns, initiator_target_map, num_paths = self._build_initiator_target_map(connector) info["data"] = {"target_wwn": target_wwpns, "initiator_target_map": initiator_target_map} return info
def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fabrics = [x.strip() for x in self.configuration.fc_fabric_names.split(",")] LOG.debug("Fabric List: %s", fabrics) LOG.debug("Target wwn List: %s", target_wwn_list) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append(zm_utils.get_formatted_wwn(t.lower())) LOG.debug("Formatted Target wwn List: %s", formatted_target_list) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get("cisco_fc_fabric_address") fabric_user = self.fabric_configs[fabric_name].safe_get("cisco_fc_fabric_user") fabric_pwd = self.fabric_configs[fabric_name].safe_get("cisco_fc_fabric_password") fabric_port = self.fabric_configs[fabric_name].safe_get("cisco_fc_fabric_port") zoning_vsan = self.fabric_configs[fabric_name].safe_get("cisco_zoning_vsan") # Get name server data from fabric and get the targets # logged in. nsinfo = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan, ) nsinfo = conn.get_nameserver_info() LOG.debug("show fcns database info from fabric: %s", nsinfo) conn.cleanup() except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting show fcns database " "info.")) except Exception: msg = _("Failed to get show fcns database info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) visible_targets = filter(lambda x: x in formatted_target_list, nsinfo) if visible_targets: LOG.info(_LI("Filtered targets for SAN is: %s"), {fabric_name: visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): visible_targets[idx] = six.text_type(visible_targets[idx]).replace(":", "") fabric_map[fabric_name] = visible_targets else: LOG.debug("No targets are in the fcns info for SAN %s", fabric_name) LOG.debug("Return SAN context output: %s", fabric_map) return fabric_map
def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fc_fabric_names = self.configuration.fc_fabric_names fabrics = [x.strip() for x in fc_fabric_names.split(",")] LOG.debug("Fabric List: %(fabrics)s", {"fabrics": fabrics}) LOG.debug("Target WWN list: %(targetwwns)s", {"targetwwns": target_wwn_list}) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append(utils.get_formatted_wwn(t)) LOG.debug("Formatted target WWN list: %(targetlist)s", {"targetlist": formatted_target_list}) for fabric_name in fabrics: conn = self._get_southbound_client(fabric_name) # Get name server data from fabric and get the targets # logged in. nsinfo = None try: nsinfo = conn.get_nameserver_info() LOG.debug("Name server info from fabric: %(nsinfo)s", {"nsinfo": nsinfo}) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException): if not conn.is_supported_firmware(): msg = ( _( "Unsupported firmware on switch %s. Make sure " "switch is running firmware v6.4 or higher" ) % conn.switch_ip ) LOG.exception(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting name server info.")) except Exception: msg = _("Failed to get name server info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup() visible_targets = filter(lambda x: x in formatted_target_list, nsinfo) if visible_targets: LOG.info(_LI("Filtered targets for SAN is: %(targets)s"), {"targets": visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): visible_targets[idx] = str(visible_targets[idx]).replace(":", "") fabric_map[fabric_name] = visible_targets else: LOG.debug("No targets found in the nameserver " "for fabric: %(fabric)s", {"fabric": fabric_name}) LOG.debug("Return SAN context output: %(fabricmap)s", {"fabricmap": fabric_map}) return fabric_map
def test_initialize_connection_fc_volume_not_mapped_host_does_not_exist(self): connector = {"wwpns": [eseries_fake.WWPN]} self.library.driver_protocol = "FC" self.mock_object(self.library._client, "get_volume_mappings_for_volume", mock.Mock(return_value=[])) self.mock_object(self.library._client, "list_hosts", mock.Mock(return_value=[])) self.mock_object(self.library._client, "create_host_with_ports", mock.Mock(return_value=eseries_fake.HOST)) self.mock_object(host_mapper, "map_volume_to_single_host", mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_fc(get_fake_volume(), connector) self.library._client.create_host_with_ports.assert_called_once_with( mock.ANY, mock.ANY, [fczm_utils.get_formatted_wwn(eseries_fake.WWPN)], port_type="fc", group_id=None )
def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ name = volume['name'] if connector is None: initiators = [] LOG.debug('Unmapping LUN %(name)s from all initiators', {'name': name}) else: initiators = [ fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns'] ] LOG.debug( "Unmapping LUN %(name)s from the initiators " "%(initiator_name)s", { 'name': name, 'initiator_name': initiators }) metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] self._unmap_lun(path, initiators) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if connector and not self._has_luns_mapped_to_initiators(initiators): # No more exports for this host, so tear down zone. LOG.info("Need to remove FC Zone, building initiator target map") target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) info['data'] = { 'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map } return info
def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ eseries_vol = self._get_volume(volume['name_id']) initiators = [ fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns'] ] host = self._get_host_with_matching_port(initiators) mappings = eseries_vol.get('listOfMappings', []) # There can only be one or zero mappings on a volume in E-Series mapping = mappings[0] if mappings else None if not mapping: raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], host=host['label']) host_mapper.unmap_volume_from_host(self._client, volume, host, mapping) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if len(self._client.get_volume_mappings_for_host( host['hostRef'])) == 0: # No more exports for this host, so tear down zone. LOG.info( _LI("Need to remove FC Zone, building initiator " "target map.")) initiator_info = self._build_initiator_target_map_fc(connector) target_wwpns, initiator_target_map, num_paths = initiator_info info['data'] = { 'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map } return info
def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ name = volume['name'] if connector is None: initiators = [] LOG.debug('Unmapping LUN %(name)s from all initiators', {'name': name}) else: initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] LOG.debug("Unmapping LUN %(name)s from the initiators " "%(initiator_name)s", {'name': name, 'initiator_name': initiators}) metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] self._unmap_lun(path, initiators) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if connector and not self._has_luns_mapped_to_initiators(initiators): # No more exports for this host, so tear down zone. LOG.info("Need to remove FC Zone, building initiator target map") target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) info['data'] = {'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map} return info
def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ eseries_vol = self._get_volume(volume['name_id']) initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] host = self._get_host_with_matching_port(initiators) mappings = eseries_vol.get('listOfMappings', []) # There can only be one or zero mappings on a volume in E-Series mapping = mappings[0] if mappings else None if not mapping: msg = _("Mapping not found for %(vol)s to host %(ht)s.") raise exception.NotFound(msg % {'vol': eseries_vol['volumeRef'], 'ht': host['hostRef']}) self._client.delete_volume_mapping(mapping['lunMappingRef']) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if len(self._client.get_volume_mappings_for_host( host['hostRef'])) == 0: # No more exports for this host, so tear down zone. LOG.info(_LI("Need to remove FC Zone, building initiator " "target map.")) initiator_info = self._build_initiator_target_map_fc(connector) target_wwpns, initiator_target_map, num_paths = initiator_info info['data'] = {'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map} return info
def test_initialize_connection_fc_volume_not_mapped_host_does_not_exist( self): connector = {'wwpns': [eseries_fake.WWPN]} self.library.driver_protocol = 'FC' self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'create_host_with_ports', mock.Mock(return_value=eseries_fake.HOST)) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_fc(get_fake_volume(), connector) self.library._client.create_host_with_ports.assert_called_once_with( mock.ANY, mock.ANY, [fczm_utils.get_formatted_wwn(eseries_fake.WWPN)], port_type='fc', group_id=None)
def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ eseries_vol = self._get_volume(volume["name_id"]) initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector["wwpns"]] host = self._get_host_with_matching_port(initiators) mappings = eseries_vol.get("listOfMappings", []) # There can only be one or zero mappings on a volume in E-Series mapping = mappings[0] if mappings else None if not mapping: raise eseries_exc.VolumeNotMapped(volume_id=volume["id"], host=host["label"]) host_mapper.unmap_volume_from_host(self._client, volume, host, mapping) info = {"driver_volume_type": "fibre_channel", "data": {}} if len(self._client.get_volume_mappings_for_host(host["hostRef"])) == 0: # No more exports for this host, so tear down zone. LOG.info(_LI("Need to remove FC Zone, building initiator " "target map.")) initiator_info = self._build_initiator_target_map_fc(connector) target_wwpns, initiator_target_map, num_paths = initiator_info info["data"] = {"target_wwn": target_wwpns, "initiator_target_map": initiator_target_map} return info
def _check_fc_port_and_init(self, wwns, hostid, fabric_map, nsinfos): """Check FC port on array and wwn on host is connected to switch. If no FC port on array is connected to switch or no ini on host is connected to switch, raise a error. """ if not fabric_map: msg = _('No FC port on array is connected to switch.') LOG.error(msg) raise exception.CinderException(msg) no_wwn_connected_to_switch = True for wwn in wwns: formatted_initiator = fczm_utils.get_formatted_wwn(wwn) for fabric in fabric_map: nsinfo = nsinfos[fabric] if formatted_initiator in nsinfo: no_wwn_connected_to_switch = False self.client.ensure_fc_initiator_added(wwn, hostid) break if no_wwn_connected_to_switch: msg = _('No wwn on host is connected to switch.') LOG.error(msg) raise exception.CinderException(msg)
def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info(_LI("BrcdFCZoneDriver - Delete connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s"), {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info(_LI("Zoning policy for fabric %(policy)s"), {'policy': zoning_policy}) conn = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(conn) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push changes to # fabric. This operation could result in an update for zone config # with new member list or deleting zones from active cfg. LOG.debug("zone config from Fabric: %(cfgmap)s", {'cfgmap': cfgmap_from_fabric}) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = utils.get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) LOG.debug("Zone name to delete: %(zonename)s", {'zonename': zone_name}) if len(zone_names) > 0 and (zone_name in zone_names): # delete zone. LOG.debug("Added zone to delete to list: %(zonename)s", {'zonename': zone_name}) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (zone_names and (zone_name in zone_names)): # Check to see if there are other zone members # in the zone besides the initiator and # the targets being removed. filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # If there are other zone members, proceed with # zone update to remove the targets. Otherwise, # delete the zone. if filtered_members: zone_members.remove(formatted_initiator) # Verify that the zone members in target list # are listed in zone definition. If not, remove # the zone members from the list of members # to remove, otherwise switch will return error. zm_list = cfgmap_from_fabric['zones'][zone_name] for t in t_list: formatted_target = utils.get_formatted_wwn(t) if formatted_target not in zm_list: zone_members.remove(formatted_target) if zone_members: LOG.debug("Zone members to remove: " "%(members)s", {'members': zone_members}) zone_map[zone_name] = zone_members else: zones_to_delete.append(zone_name) else: LOG.warning(_LW("Zoning policy not recognized: %(policy)s"), {'policy': zoning_policy}) LOG.debug("Zone map to update: %(zonemap)s", {'zonemap': zone_map}) LOG.debug("Zone list to delete: %(zones)s", {'zones': zones_to_delete}) try: # Update zone membership. if zone_map: conn.update_zones(zone_map, zone_activate, fc_zone_constants.ZONE_REMOVE, cfgmap_from_fabric) # Delete zones if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ( '%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = '%s;%s' % ( zone_name_string, zones_to_delete[i]) conn.delete_zones( zone_name_string, zone_activate, cfgmap_from_fabric) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to update or delete zoning " "configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup()
def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info( _LI("BrcdFCZoneDriver - Add connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s"), {"fabric": fabric, "i_t_map": initiator_target_map}, ) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get("zoning_policy") zone_name_prefix = self.fabric_configs[fabric].safe_get("zone_name_prefix") zone_activate = self.fabric_configs[fabric].safe_get("zone_activate") if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info(_LI("Zoning policy for Fabric %(policy)s"), {"policy": zoning_policy}) if zoning_policy != "initiator" and zoning_policy != "initiator-target": LOG.info(_LI("Zoning policy is not valid, " "no zoning will be performed.")) return client = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(client) zone_names = [] if cfgmap_from_fabric.get("zones"): zone_names = cfgmap_from_fabric["zones"].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} initiator = initiator_key.lower() target_list = initiator_target_map[initiator_key] if zoning_policy == "initiator-target": for target in target_list: zone_members = [utils.get_formatted_wwn(initiator), utils.get_formatted_wwn(target)] zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS ) if len(cfgmap_from_fabric) == 0 or (zone_name not in zone_names): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone already exists. LOG.info( _LI("Zone exists in I-T mode. Skipping " "zone creation for %(zonename)s"), {"zonename": zone_name}, ) elif zoning_policy == "initiator": zone_members = [utils.get_formatted_wwn(initiator)] for target in target_list: zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS ) if len(zone_names) > 0 and (zone_name in zone_names): zone_members = zone_members + filter( lambda x: x not in zone_members, cfgmap_from_fabric["zones"][zone_name] ) zone_map[zone_name] = zone_members LOG.info(_LI("Zone map to add: %(zonemap)s"), {"zonemap": zone_map}) if len(zone_map) > 0: try: client.add_zones(zone_map, zone_activate, cfgmap_from_fabric) client.cleanup() except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to add zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %(zonemap)s", {"zonemap": zone_map})
def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info(_LI("BrcdFCZoneDriver - Add connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s"), {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info(_LI("Zoning policy for Fabric %(policy)s"), {'policy': zoning_policy}) if (zoning_policy != 'initiator' and zoning_policy != 'initiator-target'): LOG.info(_LI("Zoning policy is not valid, " "no zoning will be performed.")) return client = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(client) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} zone_update_map = {} initiator = initiator_key.lower() target_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for target in target_list: zone_members = [utils.get_formatted_wwn(initiator), utils.get_formatted_wwn(target)] zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone already exists. LOG.info(_LI("Zone exists in I-T mode. Skipping " "zone creation for %(zonename)s"), {'zonename': zone_name}) elif zoning_policy == 'initiator': zone_members = [utils.get_formatted_wwn(initiator)] for target in target_list: zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) # If zone exists, then do a zoneadd to update # the zone members in the existing zone. Otherwise, # do a zonecreate to create a new zone. if len(zone_names) > 0 and (zone_name in zone_names): # Verify that the target WWNs are not already members # of the existing zone. If so, remove them from the # list of members to add, otherwise error will be # returned from the switch. for t in target_list: if t in cfgmap_from_fabric['zones'][zone_name]: zone_members.remove(utils.get_formatted_wwn(t)) if zone_members: zone_update_map[zone_name] = zone_members else: zone_map[zone_name] = zone_members LOG.info(_LI("Zone map to create: %(zonemap)s"), {'zonemap': zone_map}) LOG.info(_LI("Zone map to update: %(zone_update_map)s"), {'zone_update_map': zone_update_map}) try: if zone_map: client.add_zones(zone_map, zone_activate, cfgmap_from_fabric) LOG.debug("Zones created successfully: %(zonemap)s", {'zonemap': zone_map}) if zone_update_map: client.update_zones(zone_update_map, zone_activate, fc_zone_constants.ZONE_ADD, cfgmap_from_fabric) LOG.debug("Zones updated successfully: %(updatemap)s", {'updatemap': zone_update_map}) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to add or update zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: client.cleanup()
def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Add connection for Fabric: %s", fabric) LOG.info("CiscoFCZoneDriver - Add connection " "for I-T map: %s", initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zone_name_prefix = self.fabric_configs[fabric].safe_get( 'cisco_zone_name_prefix') if not zone_name_prefix: zone_name_prefix = self.configuration.cisco_zone_name_prefix zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info("Zoning policy for Fabric %s", zoning_policy) statusmap_from_fabric = self.get_zoning_status(fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} zone_update_map = {} initiator = initiator_key.lower() t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for t in t_list: target = t.lower() zone_members = [ zm_utils.get_formatted_wwn(initiator), zm_utils.get_formatted_wwn(target) ] zone_name = (driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) if (len(cfgmap_from_fabric) == 0 or (zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. LOG.info( "Zone exists in I-T mode. " "Skipping zone creation %s", zone_name) elif zoning_policy == 'initiator': zone_members = [zm_utils.get_formatted_wwn(initiator)] for t in t_list: target = t.lower() zone_members.append(zm_utils.get_formatted_wwn(target)) zone_name = (driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) # If zone exists, then perform an update_zone and add # new members into existing zone. if zone_name and (zone_name in zone_names): zone_members = list( filter( lambda x: x not in cfgmap_from_fabric['zones'][ zone_name], zone_members)) if zone_members: zone_update_map[zone_name] = zone_members else: zone_map[zone_name] = zone_members else: msg = _("Zoning Policy: %s, not" " recognized") % zoning_policy LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.info("Zone map to add: %(zone_map)s", {'zone_map': zone_map}) LOG.info("Zone map to update add: %(zone_update_map)s", {'zone_update_map': zone_update_map}) if zone_map or zone_update_map: conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) if zone_map: conn.add_zones(zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) if zone_update_map: conn.update_zones( zone_update_map, self.configuration.cisco_zone_activate, zoning_vsan, ZoneConstant.ZONE_ADD, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except c_exception.CiscoZoningCliException as cisco_ex: msg = _("Exception: %s") % six.text_type(cisco_ex) raise exception.FCZoneDriverException(msg) except Exception: msg = _("Failed to add zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %s", zone_map) else: LOG.debug("Zones already exist - Initiator Target Map: %s", initiator_target_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def delete_connection(self, fabric, initiator_target_map): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric:%s", fabric) LOG.info(_("CiscoFCZoneDriver - Delete connection for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info(_("Zoning policy for fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push # changes to fabric. This operation could result in an update # for zone config with new member list or deleting zones from # active cfg. LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = ( self.configuration.cisco_zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) LOG.debug("Zone name to del: %s", zone_name) if (len(zone_names) > 0 and (zone_name in zone_names)): # delete zone. LOG.debug("Added zone to delete to list: %s", zone_name) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(get_formatted_wwn(target)) zone_name = self.configuration.cisco_zone_name_prefix \ + initiator.replace(':', '') if (zone_names and (zone_name in zone_names)): filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # The assumption here is that initiator is always # there in the zone as it is 'initiator' policy. # We find the filtered list and if it is non-empty, # add initiator to it and update zone if filtered # list is empty, we remove that zone. LOG.debug("Zone delete - I mode: filtered targets:%s", filtered_members) if filtered_members: filtered_members.append(formatted_initiator) LOG.debug("Filtered zone members to update: %s", filtered_members) zone_map[zone_name] = filtered_members LOG.debug("Filtered zone Map to update: %s", zone_map) else: zones_to_delete.append(zone_name) else: LOG.info(_("Zoning Policy: %s, not recognized"), zoning_policy) LOG.debug("Final Zone map to update: %s", zone_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) # Update zone membership. if zone_map: conn.add_zones( zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ('%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = ('%s%s%s' % ( zone_name_string, ';', zones_to_delete[i])) conn.delete_zones(zone_name_string, self.configuration. cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except Exception as e: msg = _("Exception: %s") % six.text_type(e) LOG.error(msg) msg = _("Failed to update or delete zoning configuration") raise exception.FCZoneDriverException(msg) LOG.debug("Zones deleted successfully: %s", zone_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): """Provides the initiator/target map for available SAN contexts. Looks up fcns database of each fc SAN configured to find logged in devices and returns a map of initiator and target port WWNs for each fabric. :param initiator_wwn_list: List of initiator port WWN :param target_wwn_list: List of target port WWN :returns: List -- device wwn map in following format { <San name>: { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } :raises: Exception when connection to fabric is failed """ device_map = {} formatted_target_list = [] formatted_initiator_list = [] fabric_map = {} fabric_names = self.configuration.fc_fabric_names if not fabric_names: raise exception.InvalidParameterValue( err=_("Missing Fibre Channel SAN configuration " "param - fc_fabric_names")) fabrics = [x.strip() for x in fabric_names.split(',')] LOG.debug("FC Fabric List: %s", fabrics) if fabrics: for t in target_wwn_list: formatted_target_list.append(zm_utils.get_formatted_wwn(t)) for i in initiator_wwn_list: formatted_initiator_list.append(zm_utils.get_formatted_wwn(i)) for fabric_name in fabrics: self.switch_ip = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_address') self.switch_user = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_user') self.switch_pwd = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_password') self.switch_port = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_port') zoning_vsan = self.fabric_configs[fabric_name].safe_get( 'cisco_zoning_vsan') # Get name server data from fabric and find the targets # logged in nsinfo = '' LOG.debug("show fcns database for vsan %s", zoning_vsan) nsinfo = self.get_nameserver_info(zoning_vsan) LOG.debug("Lookup service:fcnsdatabase-%s", nsinfo) LOG.debug("Lookup service:initiator list from caller-%s", formatted_initiator_list) LOG.debug("Lookup service:target list from caller-%s", formatted_target_list) visible_targets = [ x for x in nsinfo if x in formatted_target_list ] visible_initiators = [ x for x in nsinfo if x in formatted_initiator_list ] if visible_targets: LOG.debug("Filtered targets is: %s", visible_targets) # getting rid of the : before returning for idx, elem in enumerate(visible_targets): elem = str(elem).replace(':', '') visible_targets[idx] = elem else: LOG.debug( "No targets are in the fcns database" " for vsan %s", zoning_vsan) if visible_initiators: # getting rid of the : before returning ~sk for idx, elem in enumerate(visible_initiators): elem = str(elem).replace(':', '') visible_initiators[idx] = elem else: LOG.debug( "No initiators are in the fcns database" " for vsan %s", zoning_vsan) fabric_map = { 'initiator_port_wwn_list': visible_initiators, 'target_port_wwn_list': visible_targets } device_map[zoning_vsan] = fabric_map LOG.debug("Device map for SAN context: %s", device_map) return device_map
def delete_connection(self, fabric, initiator_target_map): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric:%s", fabric) LOG.info(_("CiscoFCZoneDriver - Delete connection for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info(_("Zoning policy for fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status(fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push # changes to fabric. This operation could result in an update # for zone config with new member list or deleting zones from # active cfg. LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = ( self.configuration.cisco_zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) LOG.debug("Zone name to del: %s", zone_name) if (len(zone_names) > 0 and (zone_name in zone_names)): # delete zone. LOG.debug("Added zone to delete to list: %s", zone_name) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(get_formatted_wwn(target)) zone_name = self.configuration.cisco_zone_name_prefix \ + initiator.replace(':', '') if (zone_names and (zone_name in zone_names)): filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # The assumption here is that initiator is always # there in the zone as it is 'initiator' policy. # We find the filtered list and if it is non-empty, # add initiator to it and update zone if filtered # list is empty, we remove that zone. LOG.debug("Zone delete - I mode: filtered targets:%s", filtered_members) if filtered_members: filtered_members.append(formatted_initiator) LOG.debug("Filtered zone members to update: %s", filtered_members) zone_map[zone_name] = filtered_members LOG.debug("Filtered zone Map to update: %s", zone_map) else: zones_to_delete.append(zone_name) else: LOG.info(_("Zoning Policy: %s, not recognized"), zoning_policy) LOG.debug("Final Zone map to update: %s", zone_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) # Update zone membership. if zone_map: conn.add_zones(zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ( '%s%s' % (zone_name_string, zones_to_delete[i])) else: zone_name_string = ('%s%s%s' % (zone_name_string, ';', zones_to_delete[i])) conn.delete_zones( zone_name_string, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except Exception as e: msg = _("Exception: %s") % six.text_type(e) LOG.error(msg) msg = _("Failed to update or delete zoning configuration") raise exception.FCZoneDriverException(msg) LOG.debug("Zones deleted successfully: %s", zone_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
class NetAppEseriesLibraryTestCase(test.TestCase): def setUp(self): super(NetAppEseriesLibraryTestCase, self).setUp() kwargs = {'configuration': eseries_fake.create_configuration_eseries()} self.library = library.NetAppESeriesLibrary('FAKE', **kwargs) self.library._client = eseries_fake.FakeEseriesClient() self.library.check_for_setup_error() def test_do_setup(self): self.mock_object(self.library, '_check_mode_get_or_register_storage_system') self.mock_object(es_client, 'RestClient', eseries_fake.FakeEseriesClient) mock_check_flags = self.mock_object(na_utils, 'check_flags') self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) def test_update_ssc_info(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'driveMediaType': 'ssd' }] self.library._get_storage_pools = mock.Mock(return_value=['test_vg1']) self.library._client.list_storage_pools = mock.Mock(return_value=[]) self.library._client.list_drives = mock.Mock(return_value=drives) self.library._update_ssc_info() self.assertEqual({'test_vg1': { 'netapp_disk_type': 'SSD' }}, self.library._ssc_stats) def test_update_ssc_disk_types_ssd(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'driveMediaType': 'ssd' }] self.library._client.list_drives = mock.Mock(return_value=drives) ssc_stats = self.library._update_ssc_disk_types(['test_vg1']) self.assertEqual({'test_vg1': {'netapp_disk_type': 'SSD'}}, ssc_stats) def test_update_ssc_disk_types_scsi(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'interfaceType': { 'driveType': 'scsi' } }] self.library._client.list_drives = mock.Mock(return_value=drives) ssc_stats = self.library._update_ssc_disk_types(['test_vg1']) self.assertEqual({'test_vg1': {'netapp_disk_type': 'SCSI'}}, ssc_stats) def test_update_ssc_disk_types_fcal(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'interfaceType': { 'driveType': 'fibre' } }] self.library._client.list_drives = mock.Mock(return_value=drives) ssc_stats = self.library._update_ssc_disk_types(['test_vg1']) self.assertEqual({'test_vg1': {'netapp_disk_type': 'FCAL'}}, ssc_stats) def test_update_ssc_disk_types_sata(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'interfaceType': { 'driveType': 'sata' } }] self.library._client.list_drives = mock.Mock(return_value=drives) ssc_stats = self.library._update_ssc_disk_types(['test_vg1']) self.assertEqual({'test_vg1': {'netapp_disk_type': 'SATA'}}, ssc_stats) def test_update_ssc_disk_types_sas(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'interfaceType': { 'driveType': 'sas' } }] self.library._client.list_drives = mock.Mock(return_value=drives) ssc_stats = self.library._update_ssc_disk_types(['test_vg1']) self.assertEqual({'test_vg1': {'netapp_disk_type': 'SAS'}}, ssc_stats) def test_update_ssc_disk_types_unknown(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'interfaceType': { 'driveType': 'unknown' } }] self.library._client.list_drives = mock.Mock(return_value=drives) ssc_stats = self.library._update_ssc_disk_types(['test_vg1']) self.assertEqual({'test_vg1': { 'netapp_disk_type': 'unknown' }}, ssc_stats) def test_update_ssc_disk_types_undefined(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'interfaceType': { 'driveType': '__UNDEFINED' } }] self.library._client.list_drives = mock.Mock(return_value=drives) ssc_stats = self.library._update_ssc_disk_types(['test_vg1']) self.assertEqual({'test_vg1': { 'netapp_disk_type': 'unknown' }}, ssc_stats) def test_update_ssc_disk_encryption_SecType_enabled(self): pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'enabled'}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(['test_vg1']) self.assertEqual({'test_vg1': { 'netapp_disk_encryption': 'true' }}, ssc_stats) def test_update_ssc_disk_encryption_SecType_unknown(self): pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'unknown'}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(['test_vg1']) self.assertEqual({'test_vg1': { 'netapp_disk_encryption': 'false' }}, ssc_stats) def test_update_ssc_disk_encryption_SecType_none(self): pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'none'}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(['test_vg1']) self.assertEqual({'test_vg1': { 'netapp_disk_encryption': 'false' }}, ssc_stats) def test_update_ssc_disk_encryption_SecType_capable(self): pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'capable'}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(['test_vg1']) self.assertEqual({'test_vg1': { 'netapp_disk_encryption': 'false' }}, ssc_stats) def test_update_ssc_disk_encryption_SecType_garbage(self): pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'garbage'}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(['test_vg1']) self.assertRaises(TypeError, 'test_vg1', {'netapp_disk_encryption': 'false'}, ssc_stats) def test_update_ssc_disk_encryption_multiple(self): pools = [{ 'volumeGroupRef': 'test_vg1', 'securityType': 'none' }, { 'volumeGroupRef': 'test_vg2', 'securityType': 'enabled' }] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption( ['test_vg1', 'test_vg2']) self.assertEqual( { 'test_vg1': { 'netapp_disk_encryption': 'false' }, 'test_vg2': { 'netapp_disk_encryption': 'true' } }, ssc_stats) def test_terminate_connection_iscsi_no_hosts(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_terminate_connection_iscsi_volume_not_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.assertRaises(eseries_exc.VolumeNotMapped, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_terminate_connection_iscsi_volume_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [eseries_fake.VOLUME_MAPPING] self.mock_object(self.library._client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) self.mock_object(host_mapper, 'unmap_volume_from_host') self.library.terminate_connection_iscsi(get_fake_volume(), connector) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_iscsi_not_mapped_initiator_does_not_exist( self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[eseries_fake.HOST_2])) self.assertRaises(exception.NotFound, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_initialize_connection_iscsi_volume_not_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_not_mapped_host_does_not_exist( self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'create_host_with_ports', mock.Mock(return_value=eseries_fake.HOST)) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(self.library._client.list_hosts.called) self.assertTrue(self.library._client.create_host_with_ports.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_already_mapped_to_target_host( self): """Should be a no-op""" connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_mapped_to_another_host(self): """Should raise error saying multiattach not enabled""" connector = {'initiator': eseries_fake.INITIATOR_NAME} fake_mapping_to_other_host = copy.deepcopy(eseries_fake.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2['hostRef'] self.mock_object( host_mapper, 'map_volume_to_single_host', mock.Mock(side_effect=exception.NetAppDriverException)) self.assertRaises(exception.NetAppDriverException, self.library.initialize_connection_iscsi, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) @ddt.data(eseries_fake.WWPN, fczm_utils.get_formatted_wwn(eseries_fake.WWPN)) def test_get_host_with_matching_port_wwpn(self, port_id): port_ids = [port_id] host = copy.deepcopy(eseries_fake.HOST) host.update({ 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] }) host_2 = copy.deepcopy(eseries_fake.HOST_2) host_2.update({ 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN_2 }] }) host_list = [host, host_2] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=host_list)) actual_host = self.library._get_host_with_matching_port(port_ids) self.assertEqual(host, actual_host) def test_get_host_with_matching_port_iqn(self): port_ids = [eseries_fake.INITIATOR_NAME] host = copy.deepcopy(eseries_fake.HOST) host.update({ 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'iscsi', 'address': eseries_fake.INITIATOR_NAME }] }) host_2 = copy.deepcopy(eseries_fake.HOST_2) host_2.update({ 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'iscsi', 'address': eseries_fake.INITIATOR_NAME_2 }] }) host_list = [host, host_2] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=host_list)) actual_host = self.library._get_host_with_matching_port(port_ids) self.assertEqual(host, actual_host) def test_terminate_connection_fc_no_hosts(self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_terminate_connection_fc_volume_not_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.assertRaises(eseries_exc.VolumeNotMapped, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_terminate_connection_fc_volume_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) self.mock_object(host_mapper, 'unmap_volume_from_host') self.library.terminate_connection_fc(get_fake_volume(), connector) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_volume_mapped_no_cleanup_zone(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': {}, } fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) self.mock_object(host_mapper, 'unmap_volume_from_host') self.mock_object( self.library._client, 'get_volume_mappings_for_host', mock.Mock( return_value=[copy.deepcopy(eseries_fake.VOLUME_MAPPING)])) target_info = self.library.terminate_connection_fc( get_fake_volume(), connector) self.assertDictEqual(expected_target_info, target_info) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_volume_mapped_cleanup_zone(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_wwn': [eseries_fake.WWPN_2], 'initiator_target_map': { eseries_fake.WWPN: [eseries_fake.WWPN_2] }, }, } fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volumes', mock.Mock(return_value=[fake_eseries_volume])) self.mock_object(host_mapper, 'unmap_volume_from_host') self.mock_object(self.library._client, 'get_volume_mappings_for_host', mock.Mock(return_value=[])) target_info = self.library.terminate_connection_fc( get_fake_volume(), connector) self.assertDictEqual(expected_target_info, target_info) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_not_mapped_host_with_wwpn_does_not_exist( self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[eseries_fake.HOST_2])) self.assertRaises(exception.NotFound, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_initialize_connection_fc_volume_not_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 0, 'target_wwn': [eseries_fake.WWPN_2], 'access_mode': 'rw', 'initiator_target_map': { eseries_fake.WWPN: [eseries_fake.WWPN_2] }, }, } target_info = self.library.initialize_connection_fc( get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) self.assertDictEqual(expected_target_info, target_info) def test_initialize_connection_fc_volume_not_mapped_host_does_not_exist( self): connector = {'wwpns': [eseries_fake.WWPN]} self.library.driver_protocol = 'FC' self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'create_host_with_ports', mock.Mock(return_value=eseries_fake.HOST)) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_fc(get_fake_volume(), connector) self.library._client.create_host_with_ports.assert_called_once_with( mock.ANY, mock.ANY, [fczm_utils.get_formatted_wwn(eseries_fake.WWPN)], port_type='fc', group_id=None) def test_initialize_connection_fc_volume_already_mapped_to_target_host( self): """Should be a no-op""" connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_fc(get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_fc_volume_mapped_to_another_host(self): """Should raise error saying multiattach not enabled""" connector = {'wwpns': [eseries_fake.WWPN]} fake_mapping_to_other_host = copy.deepcopy(eseries_fake.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2['hostRef'] self.mock_object( host_mapper, 'map_volume_to_single_host', mock.Mock(side_effect=exception.NetAppDriverException)) self.assertRaises(exception.NetAppDriverException, self.library.initialize_connection_fc, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_fc_no_target_wwpns(self): """Should be a no-op""" connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.mock_object(self.library._client, 'list_target_wwpns', mock.Mock(return_value=[])) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_fc, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_build_initiator_target_map_fc_with_lookup_service(self): connector = {'wwpns': [eseries_fake.WWPN, eseries_fake.WWPN_2]} self.library.lookup_service = mock.Mock() self.library.lookup_service.get_device_mapping_from_network = ( mock.Mock(return_value=eseries_fake.FC_FABRIC_MAP)) (target_wwpns, initiator_target_map, num_paths) = (self.library._build_initiator_target_map_fc(connector)) self.assertSetEqual(set(eseries_fake.FC_TARGET_WWPNS), set(target_wwpns)) self.assertDictEqual(eseries_fake.FC_I_T_MAP, initiator_target_map) self.assertEqual(4, num_paths)
class NetAppEseriesLibraryTestCase(test.TestCase): def setUp(self): super(NetAppEseriesLibraryTestCase, self).setUp() kwargs = {'configuration': eseries_fake.create_configuration_eseries()} self.library = library.NetAppESeriesLibrary('FAKE', **kwargs) self.library._client = eseries_fake.FakeEseriesClient() self.library.check_for_setup_error() def test_do_setup(self): self.mock_object(self.library, '_check_mode_get_or_register_storage_system') self.mock_object(es_client, 'RestClient', eseries_fake.FakeEseriesClient) mock_check_flags = self.mock_object(na_utils, 'check_flags') self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) def test_get_storage_pools(self): pool_labels = list() # Retrieve the first pool's label for pool in eseries_fake.STORAGE_POOLS: pool_labels.append(pool['label']) break self.library.configuration.netapp_storage_pools = ( ",".join(pool_labels)) filtered_pools = self.library._get_storage_pools() filtered_pool_labels = [pool['label'] for pool in filtered_pools] self.assertListEqual(pool_labels, filtered_pool_labels) def test_get_volume(self): fake_volume = copy.deepcopy(get_fake_volume()) volume = copy.deepcopy(eseries_fake.VOLUME) self.library._client.list_volume = mock.Mock(return_value=volume) result = self.library._get_volume(fake_volume['id']) self.assertEqual(1, self.library._client.list_volume.call_count) self.assertDictMatch(volume, result) def test_get_volume_bad_input(self): volume = copy.deepcopy(eseries_fake.VOLUME) self.library._client.list_volume = mock.Mock(return_value=volume) self.assertRaises(exception.InvalidInput, self.library._get_volume, None) def test_get_volume_bad_uuid(self): volume = copy.deepcopy(eseries_fake.VOLUME) self.library._client.list_volume = mock.Mock(return_value=volume) self.assertRaises(ValueError, self.library._get_volume, '1') def test_update_ssc_info_no_ssc(self): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'driveMediaType': 'ssd' }] pools = [{ 'volumeGroupRef': 'test_vg1', 'label': 'test_vg1', 'raidLevel': 'raid6', 'securityType': 'enabled' }] self.library._client = mock.Mock() self.library._client.features.SSC_API_V2 = na_utils.FeatureState( False, minimum_version="1.53.9000.1") self.library._client.SSC_VALID_VERSIONS = [(1, 53, 9000, 1), (1, 53, 9010, 15)] self.library.configuration.netapp_storage_pools = "test_vg1" self.library._client.list_storage_pools = mock.Mock(return_value=pools) self.library._client.list_drives = mock.Mock(return_value=drives) self.library._update_ssc_info() self.assertEqual( { 'test_vg1': { 'netapp_disk_encryption': 'true', 'netapp_disk_type': 'SSD', 'netapp_raid_type': 'raid6' } }, self.library._ssc_stats) @ddt.data(True, False) def test_update_ssc_info(self, data_assurance_supported): self.library._client = mock.Mock() self.library._client.features.SSC_API_V2 = na_utils.FeatureState( True, minimum_version="1.53.9000.1") self.library._client.list_ssc_storage_pools = mock.Mock( return_value=eseries_fake.SSC_POOLS) self.library._get_storage_pools = mock.Mock( return_value=eseries_fake.STORAGE_POOLS) # Data Assurance is not supported on some storage backends self.library._is_data_assurance_supported = mock.Mock( return_value=data_assurance_supported) self.library._update_ssc_info() for pool in eseries_fake.SSC_POOLS: poolId = pool['poolId'] raid_lvl = self.library.SSC_RAID_TYPE_MAPPING.get( pool['raidLevel'], 'unknown') if pool['pool']["driveMediaType"] == 'ssd': disk_type = 'SSD' else: disk_type = pool['pool']['drivePhysicalType'] disk_type = (self.library.SSC_DISK_TYPE_MAPPING.get( disk_type, 'unknown')) da_enabled = pool['dataAssuranceCapable'] and ( data_assurance_supported) thin_provisioned = pool['thinProvisioningCapable'] expected = { 'netapp_disk_encryption': six.text_type(pool['encrypted']).lower(), 'netapp_eseries_flash_read_cache': six.text_type(pool['flashCacheCapable']).lower(), 'netapp_thin_provisioned': six.text_type(thin_provisioned).lower(), 'netapp_eseries_data_assurance': six.text_type(da_enabled).lower(), 'netapp_eseries_disk_spindle_speed': pool['spindleSpeed'], 'netapp_raid_type': raid_lvl, 'netapp_disk_type': disk_type } actual = self.library._ssc_stats[poolId] self.assertDictMatch(expected, actual) @ddt.data(('FC', True), ('iSCSI', False)) @ddt.unpack def test_is_data_assurance_supported(self, backend_storage_protocol, enabled): self.mock_object(self.library, 'driver_protocol', backend_storage_protocol) actual = self.library._is_data_assurance_supported() self.assertEqual(enabled, actual) @ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage') def test_update_ssc_disk_types(self, disk_type): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'interfaceType': { 'driveType': disk_type } }] pools = [{'volumeGroupRef': 'test_vg1'}] self.library._client.list_drives = mock.Mock(return_value=drives) self.library._client.get_storage_pool = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_types(pools) expected = self.library.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown') self.assertEqual({'test_vg1': { 'netapp_disk_type': expected }}, ssc_stats) @ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage') def test_update_ssc_disk_types_ssd(self, disk_type): drives = [{ 'currentVolumeGroupRef': 'test_vg1', 'driveMediaType': 'ssd', 'driveType': disk_type }] pools = [{'volumeGroupRef': 'test_vg1'}] self.library._client.list_drives = mock.Mock(return_value=drives) self.library._client.get_storage_pool = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_types(pools) self.assertEqual({'test_vg1': {'netapp_disk_type': 'SSD'}}, ssc_stats) @ddt.data('enabled', 'none', 'capable', 'unknown', '__UNDEFINED', 'garbage') def test_update_ssc_disk_encryption(self, securityType): pools = [{'volumeGroupRef': 'test_vg1', 'securityType': securityType}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(pools) # Convert the boolean value to a lower-case string value expected = 'true' if securityType == "enabled" else 'false' self.assertEqual({'test_vg1': { 'netapp_disk_encryption': expected }}, ssc_stats) def test_update_ssc_disk_encryption_multiple(self): pools = [{ 'volumeGroupRef': 'test_vg1', 'securityType': 'none' }, { 'volumeGroupRef': 'test_vg2', 'securityType': 'enabled' }] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_disk_encryption(pools) self.assertEqual( { 'test_vg1': { 'netapp_disk_encryption': 'false' }, 'test_vg2': { 'netapp_disk_encryption': 'true' } }, ssc_stats) @ddt.data(True, False) def test_get_volume_stats(self, refresh): fake_stats = {'key': 'val'} def populate_stats(): self.library._stats = fake_stats self.library._update_volume_stats = mock.Mock( side_effect=populate_stats) self.library._update_ssc_info = mock.Mock() self.library._ssc_stats = {self.library.THIN_UQ_SPEC: True} actual = self.library.get_volume_stats(refresh=refresh) if (refresh): self.library._update_volume_stats.assert_called_once_with() self.assertEqual(fake_stats, actual) else: self.assertEqual(0, self.library._update_volume_stats.call_count) self.assertEqual(0, self.library._update_ssc_info.call_count) def test_get_volume_stats_no_ssc(self): """Validate that SSC data is collected if not yet populated""" fake_stats = {'key': 'val'} def populate_stats(): self.library._stats = fake_stats self.library._update_volume_stats = mock.Mock( side_effect=populate_stats) self.library._update_ssc_info = mock.Mock() self.library._ssc_stats = None actual = self.library.get_volume_stats(refresh=True) self.library._update_volume_stats.assert_called_once_with() self.library._update_ssc_info.assert_called_once_with() self.assertEqual(fake_stats, actual) def test_update_volume_stats_provisioning(self): """Validate pool capacity calculations""" fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) self.mock_object(self.library, '_ssc_stats', new_attr={ fake_pool["volumeGroupRef"]: { self.library.THIN_UQ_SPEC: True } }) self.library.configuration = mock.Mock() reserved_pct = 5 over_subscription_ratio = 1.0 self.library.configuration.max_over_subscription_ratio = ( over_subscription_ratio) self.library.configuration.reserved_percentage = reserved_pct total_gb = int(fake_pool['totalRaidedSpace']) / units.Gi used_gb = int(fake_pool['usedSpace']) / units.Gi free_gb = total_gb - used_gb self.library._update_volume_stats() self.assertEqual(1, len(self.library._stats['pools'])) pool_stats = self.library._stats['pools'][0] self.assertEqual(fake_pool['label'], pool_stats.get('pool_name')) self.assertEqual(reserved_pct, pool_stats['reserved_percentage']) self.assertEqual(over_subscription_ratio, pool_stats['max_oversubscription_ratio']) self.assertEqual(total_gb, pool_stats.get('total_capacity_gb')) self.assertEqual(used_gb, pool_stats.get('provisioned_capacity_gb')) self.assertEqual(free_gb, pool_stats.get('free_capacity_gb')) @ddt.data(False, True) def test_update_volume_stats_thin_provisioning(self, thin_provisioning): """Validate that thin provisioning support is correctly reported""" fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) self.mock_object(self.library, '_ssc_stats', new_attr={ fake_pool["volumeGroupRef"]: { self.library.THIN_UQ_SPEC: thin_provisioning } }) self.library._update_volume_stats() self.assertEqual(1, len(self.library._stats['pools'])) pool_stats = self.library._stats['pools'][0] self.assertEqual(thin_provisioning, pool_stats.get('thin_provisioning_support')) # Should always be True self.assertTrue(pool_stats.get('thick_provisioning_support')) def test_update_volume_stats_ssc(self): """Ensure that the SSC data is correctly reported in the pool stats""" ssc = {self.library.THIN_UQ_SPEC: True, 'key': 'val'} fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool["volumeGroupRef"]: ssc}) self.library._update_volume_stats() self.assertEqual(1, len(self.library._stats['pools'])) pool_stats = self.library._stats['pools'][0] for key in ssc: self.assertIn(key, pool_stats) self.assertEqual(ssc[key], pool_stats[key]) def test_update_volume_stats_no_ssc(self): """Ensure that that pool stats are correctly reported without SSC""" fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) self.library._update_volume_stats() self.assertEqual(1, len(self.library._stats['pools'])) pool_stats = self.library._stats['pools'][0] self.assertFalse(pool_stats.get('thin_provisioning_support')) # Should always be True self.assertTrue(pool_stats.get('thick_provisioning_support')) def test_terminate_connection_iscsi_no_hosts(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_terminate_connection_iscsi_volume_not_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} volume = copy.deepcopy(eseries_fake.VOLUME) volume['listOfMappings'] = [] self.library._get_volume = mock.Mock(return_value=volume) self.assertRaises(eseries_exc.VolumeNotMapped, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_terminate_connection_iscsi_volume_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [eseries_fake.VOLUME_MAPPING] self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.mock_object(host_mapper, 'unmap_volume_from_host') self.library.terminate_connection_iscsi(get_fake_volume(), connector) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_iscsi_not_mapped_initiator_does_not_exist( self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[eseries_fake.HOST_2])) self.assertRaises(exception.NotFound, self.library.terminate_connection_iscsi, get_fake_volume(), connector) def test_initialize_connection_iscsi_volume_not_mapped(self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [eseries_fake.VOLUME_MAPPING] self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_not_mapped_host_does_not_exist( self): connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'create_host_with_ports', mock.Mock(return_value=eseries_fake.HOST)) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [eseries_fake.VOLUME_MAPPING] self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(self.library._client.list_hosts.called) self.assertTrue(self.library._client.create_host_with_ports.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_already_mapped_to_target_host( self): """Should be a no-op""" connector = {'initiator': eseries_fake.INITIATOR_NAME} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.library.initialize_connection_iscsi(get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_iscsi_volume_mapped_to_another_host(self): """Should raise error saying multiattach not enabled""" connector = {'initiator': eseries_fake.INITIATOR_NAME} fake_mapping_to_other_host = copy.deepcopy(eseries_fake.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2['hostRef'] self.mock_object( host_mapper, 'map_volume_to_single_host', mock.Mock(side_effect=exception.NetAppDriverException)) self.assertRaises(exception.NetAppDriverException, self.library.initialize_connection_iscsi, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) @ddt.data(eseries_fake.WWPN, fczm_utils.get_formatted_wwn(eseries_fake.WWPN)) def test_get_host_with_matching_port_wwpn(self, port_id): port_ids = [port_id] host = copy.deepcopy(eseries_fake.HOST) host.update({ 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] }) host_2 = copy.deepcopy(eseries_fake.HOST_2) host_2.update({ 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN_2 }] }) host_list = [host, host_2] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=host_list)) actual_host = self.library._get_host_with_matching_port(port_ids) self.assertEqual(host, actual_host) def test_get_host_with_matching_port_iqn(self): port_ids = [eseries_fake.INITIATOR_NAME] host = copy.deepcopy(eseries_fake.HOST) host.update({ 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'iscsi', 'address': eseries_fake.INITIATOR_NAME }] }) host_2 = copy.deepcopy(eseries_fake.HOST_2) host_2.update({ 'hostSidePorts': [{ 'label': 'NewStore', 'type': 'iscsi', 'address': eseries_fake.INITIATOR_NAME_2 }] }) host_list = [host, host_2] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=host_list)) actual_host = self.library._get_host_with_matching_port(port_ids) self.assertEqual(host, actual_host) def test_terminate_connection_fc_no_hosts(self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_terminate_connection_fc_volume_not_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] volume = copy.deepcopy(eseries_fake.VOLUME) volume['listOfMappings'] = [] self.mock_object(self.library, '_get_volume', mock.Mock(return_value=volume)) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.assertRaises(eseries_exc.VolumeNotMapped, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_terminate_connection_fc_volume_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.mock_object(host_mapper, 'unmap_volume_from_host') self.library.terminate_connection_fc(get_fake_volume(), connector) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_volume_mapped_no_cleanup_zone(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': {}, } fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.mock_object(host_mapper, 'unmap_volume_from_host') self.mock_object( self.library._client, 'get_volume_mappings_for_host', mock.Mock( return_value=[copy.deepcopy(eseries_fake.VOLUME_MAPPING)])) target_info = self.library.terminate_connection_fc( get_fake_volume(), connector) self.assertDictEqual(expected_target_info, target_info) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_volume_mapped_cleanup_zone(self): connector = {'wwpns': [eseries_fake.WWPN]} fake_host = copy.deepcopy(eseries_fake.HOST) fake_host['hostSidePorts'] = [{ 'label': 'NewStore', 'type': 'fc', 'address': eseries_fake.WWPN }] expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_wwn': [eseries_fake.WWPN_2], 'initiator_target_map': { eseries_fake.WWPN: [eseries_fake.WWPN_2] }, }, } fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) fake_eseries_volume['listOfMappings'] = [ copy.deepcopy(eseries_fake.VOLUME_MAPPING) ] self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[fake_host])) self.mock_object(self.library._client, 'list_volume', mock.Mock(return_value=fake_eseries_volume)) self.mock_object(host_mapper, 'unmap_volume_from_host') self.mock_object(self.library._client, 'get_volume_mappings_for_host', mock.Mock(return_value=[])) target_info = self.library.terminate_connection_fc( get_fake_volume(), connector) self.assertDictEqual(expected_target_info, target_info) self.assertTrue(host_mapper.unmap_volume_from_host.called) def test_terminate_connection_fc_not_mapped_host_with_wwpn_does_not_exist( self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[eseries_fake.HOST_2])) self.assertRaises(exception.NotFound, self.library.terminate_connection_fc, get_fake_volume(), connector) def test_initialize_connection_fc_volume_not_mapped(self): connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) expected_target_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 0, 'target_wwn': [eseries_fake.WWPN_2], 'access_mode': 'rw', 'initiator_target_map': { eseries_fake.WWPN: [eseries_fake.WWPN_2] }, }, } target_info = self.library.initialize_connection_fc( get_fake_volume(), connector) self.assertTrue( self.library._client.get_volume_mappings_for_volume.called) self.assertTrue(host_mapper.map_volume_to_single_host.called) self.assertDictEqual(expected_target_info, target_info) def test_initialize_connection_fc_volume_not_mapped_host_does_not_exist( self): connector = {'wwpns': [eseries_fake.WWPN]} self.library.driver_protocol = 'FC' self.mock_object(self.library._client, 'get_volume_mappings_for_volume', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'list_hosts', mock.Mock(return_value=[])) self.mock_object(self.library._client, 'create_host_with_ports', mock.Mock(return_value=eseries_fake.HOST)) self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_fc(get_fake_volume(), connector) self.library._client.create_host_with_ports.assert_called_once_with( mock.ANY, mock.ANY, [fczm_utils.get_formatted_wwn(eseries_fake.WWPN)], port_type='fc', group_id=None) def test_initialize_connection_fc_volume_already_mapped_to_target_host( self): """Should be a no-op""" connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.library.initialize_connection_fc(get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_fc_volume_mapped_to_another_host(self): """Should raise error saying multiattach not enabled""" connector = {'wwpns': [eseries_fake.WWPN]} fake_mapping_to_other_host = copy.deepcopy(eseries_fake.VOLUME_MAPPING) fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2['hostRef'] self.mock_object( host_mapper, 'map_volume_to_single_host', mock.Mock(side_effect=exception.NetAppDriverException)) self.assertRaises(exception.NetAppDriverException, self.library.initialize_connection_fc, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_initialize_connection_fc_no_target_wwpns(self): """Should be a no-op""" connector = {'wwpns': [eseries_fake.WWPN]} self.mock_object(host_mapper, 'map_volume_to_single_host', mock.Mock(return_value=eseries_fake.VOLUME_MAPPING)) self.mock_object(self.library._client, 'list_target_wwpns', mock.Mock(return_value=[])) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_fc, get_fake_volume(), connector) self.assertTrue(host_mapper.map_volume_to_single_host.called) def test_build_initiator_target_map_fc_with_lookup_service(self): connector = {'wwpns': [eseries_fake.WWPN, eseries_fake.WWPN_2]} self.library.lookup_service = mock.Mock() self.library.lookup_service.get_device_mapping_from_network = ( mock.Mock(return_value=eseries_fake.FC_FABRIC_MAP)) (target_wwpns, initiator_target_map, num_paths) = (self.library._build_initiator_target_map_fc(connector)) self.assertSetEqual(set(eseries_fake.FC_TARGET_WWPNS), set(target_wwpns)) self.assertDictEqual(eseries_fake.FC_I_T_MAP, initiator_target_map) self.assertEqual(4, num_paths) @ddt.data(('raid0', 'raid0'), ('raid1', 'raid1'), ('raid3', 'raid5'), ('raid5', 'raid5'), ('raid6', 'raid6'), ('raidDiskPool', 'DDP')) @ddt.unpack def test_update_ssc_raid_type(self, raid_lvl, raid_lvl_mapping): pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_raid_type(pools) self.assertEqual({'test_vg1': { 'netapp_raid_type': raid_lvl_mapping }}, ssc_stats) @ddt.data('raidAll', '__UNDEFINED', 'unknown', 'raidUnsupported', 'garbage') def test_update_ssc_raid_type_invalid(self, raid_lvl): pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}] self.library._client.list_storage_pools = mock.Mock(return_value=pools) ssc_stats = self.library._update_ssc_raid_type(pools) self.assertEqual({'test_vg1': { 'netapp_raid_type': 'unknown' }}, ssc_stats) def test_create_asup(self): self.library._client = mock.Mock() self.library._client.features.AUTOSUPPORT = na_utils.FeatureState() self.library._client.api_operating_mode = ( eseries_fake.FAKE_ASUP_DATA['operating-mode']) self.library._app_version = eseries_fake.FAKE_APP_VERSION self.mock_object( self.library._client, 'get_firmware_version', mock.Mock( return_value=(eseries_fake.FAKE_ASUP_DATA['system-version']))) self.mock_object( self.library._client, 'get_serial_numbers', mock.Mock(return_value=eseries_fake.FAKE_SERIAL_NUMBERS)) self.mock_object( self.library._client, 'get_model_name', mock.Mock( return_value=eseries_fake.FAKE_CONTROLLERS[0]['modelName'])) self.mock_object(self.library._client, 'set_counter', mock.Mock(return_value={'value': 1})) mock_invoke = self.mock_object(self.library._client, 'add_autosupport_data') self.library._create_asup(eseries_fake.FAKE_CINDER_HOST) mock_invoke.assert_called_with(eseries_fake.FAKE_KEY, eseries_fake.FAKE_ASUP_DATA) def test_create_asup_not_supported(self): self.library._client = mock.Mock() self.library._client.features.AUTOSUPPORT = na_utils.FeatureState( supported=False) mock_invoke = self.mock_object(self.library._client, 'add_autosupport_data') self.library._create_asup(eseries_fake.FAKE_CINDER_HOST) mock_invoke.assert_not_called()
def test_get_formatted_wwn(self): wwn_list = ['10008c7cff523b01'] return_wwn_list = [] expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01'] return_wwn_list.append(get_formatted_wwn(wwn_list[0])) self.assertEqual(return_wwn_list, expected_wwn_list)
def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fabrics = [ x.strip() for x in self.configuration.fc_fabric_names.split(',') ] LOG.debug("Fabric List: %s", fabrics) LOG.debug("Target wwn List: %s", target_wwn_list) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append( zm_utils.get_formatted_wwn(t.lower())) LOG.debug("Formatted Target wwn List: %s", formatted_target_list) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_port') zoning_vsan = self.fabric_configs[fabric_name].safe_get( 'cisco_zoning_vsan') # Get name server data from fabric and get the targets # logged in. nsinfo = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) nsinfo = conn.get_nameserver_info() LOG.debug("show fcns database info from fabric: %s", nsinfo) conn.cleanup() except c_exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.exception("Error getting show fcns database info.") except Exception: msg = _("Failed to get show fcns database info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) visible_targets = list( filter(lambda x: x in formatted_target_list, nsinfo)) if visible_targets: LOG.info("Filtered targets for SAN is: %s", {fabric_name: visible_targets}) # getting rid of the ':' before returning fabric_map[fabric_name] = list( map(lambda x: re.sub(r':', '', x), visible_targets)) else: LOG.debug("No targets are in the fcns info for SAN %s", fabric_name) LOG.debug("Return SAN context output: %s", fabric_map) return fabric_map
def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Add connection for Fabric: %s", fabric) LOG.info("CiscoFCZoneDriver - Add connection " "for I-T map: %s", initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zone_name_prefix = self.fabric_configs[fabric].safe_get( 'cisco_zone_name_prefix') if not zone_name_prefix: zone_name_prefix = self.configuration.cisco_zone_name_prefix zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info("Zoning policy for Fabric %s", zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} zone_update_map = {} initiator = initiator_key.lower() t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for t in t_list: target = t.lower() zone_members = [ zm_utils.get_formatted_wwn(initiator), zm_utils.get_formatted_wwn(target)] zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. LOG.info("Zone exists in I-T mode. " "Skipping zone creation %s", zone_name) elif zoning_policy == 'initiator': zone_members = [ zm_utils.get_formatted_wwn(initiator)] for t in t_list: target = t.lower() zone_members.append( zm_utils.get_formatted_wwn(target)) zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) # If zone exists, then perform an update_zone and add # new members into existing zone. if zone_name and (zone_name in zone_names): zone_members = filter( lambda x: x not in cfgmap_from_fabric['zones'][zone_name], zone_members) if zone_members: zone_update_map[zone_name] = zone_members else: zone_map[zone_name] = zone_members else: msg = _("Zoning Policy: %s, not" " recognized") % zoning_policy LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.info("Zone map to add: %(zone_map)s", {'zone_map': zone_map}) LOG.info("Zone map to update add: %(zone_update_map)s", {'zone_update_map': zone_update_map}) if zone_map or zone_update_map: conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) if zone_map: conn.add_zones( zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) if zone_update_map: conn.update_zones( zone_update_map, self.configuration.cisco_zone_activate, zoning_vsan, ZoneConstant.ZONE_ADD, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except c_exception.CiscoZoningCliException as cisco_ex: msg = _("Exception: %s") % six.text_type(cisco_ex) raise exception.FCZoneDriverException(msg) except Exception: msg = _("Failed to add zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %s", zone_map) else: LOG.debug("Zones already exist - Initiator Target Map: %s", initiator_target_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def initialize_connection_fc(self, volume, connector): """Initializes the connection and returns connection info. Assigns the specified volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '500a098280feeba5', 'access_mode': 'rw', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5'], '21000024ff406cc2': ['500a098280feeba5'] } } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'], 'access_mode': 'rw', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5'] } } } """ initiators = [ fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns'] ] eseries_vol = self._get_volume(volume['name_id']) mapping = self.map_volume_to_host(volume, eseries_vol, initiators) lun_id = mapping['lun'] initiator_info = self._build_initiator_target_map_fc(connector) target_wwpns, initiator_target_map, num_paths = initiator_info if target_wwpns: msg = ("Successfully fetched target details for LUN %(id)s " "and initiator(s) %(initiators)s.") msg_fmt = {'id': volume['id'], 'initiators': initiators} LOG.debug(msg, msg_fmt) else: msg = _('Failed to get LUN target details for the LUN %s.') raise exception.VolumeBackendAPIException(data=msg % volume['id']) target_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': int(lun_id), 'target_wwn': target_wwpns, 'access_mode': 'rw', 'initiator_target_map': initiator_target_map } } return target_info
def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric: %s", fabric) LOG.info("CiscoFCZoneDriver - Delete connection for I-T map: %s", initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'cisco_zone_name_prefix') if not zone_name_prefix: zone_name_prefix = self.configuration.cisco_zone_name_prefix if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info("Zoning policy for fabric %s", zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push # changes to fabric. This operation could result in an update # for zone config with new member list or deleting zones from # active cfg. LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = zm_utils.get_formatted_wwn(initiator) zone_update_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) LOG.debug("Zone name to del: %s", zone_name) if (len(zone_names) > 0 and (zone_name in zone_names)): # delete zone. LOG.debug("Added zone to delete to list: %s", zone_name) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append( zm_utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) # Check if there are zone members leftover after removal if (zone_names and (zone_name in zone_names)): filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # The assumption here is that initiator is always # there in the zone as it is 'initiator' policy. # If filtered list is empty, we remove that zone. # If there are other members leftover, then perform # update_zone to remove targets LOG.debug("Zone delete - I mode: filtered targets: %s", filtered_members) if filtered_members: remove_members = filter( lambda x: x in cfgmap_from_fabric['zones'][zone_name], zone_members) if remove_members: # Do not want to remove the initiator remove_members.remove(formatted_initiator) LOG.debug("Zone members to remove: %s", remove_members) zone_update_map[zone_name] = remove_members LOG.debug("Filtered zone Map to update: %s", zone_update_map) else: zones_to_delete.append(zone_name) else: LOG.info("Zoning Policy: %s, not recognized", zoning_policy) LOG.debug("Zone map to remove update: %s", zone_update_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) # Update zone membership. if zone_update_map: conn.update_zones( zone_update_map, self.configuration.cisco_zone_activate, zoning_vsan, ZoneConstant.ZONE_REMOVE, cfgmap_from_fabric, statusmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ('%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = ('%s%s%s' % ( zone_name_string, ';', zones_to_delete[i])) conn.delete_zones(zone_name_string, self.configuration. cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except Exception: msg = _("Failed to update or delete zoning configuration") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones deleted successfully: %s", zone_update_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fc_fabric_names = self.configuration.fc_fabric_names fabrics = [x.strip() for x in fc_fabric_names.split(',')] LOG.debug("Fabric List: %(fabrics)s", {'fabrics': fabrics}) LOG.debug("Target WWN list: %(targetwwns)s", {'targetwwns': target_wwn_list}) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append(utils.get_formatted_wwn(t)) LOG.debug("Formatted target WWN list: %(targetlist)s", {'targetlist': formatted_target_list}) for fabric_name in fabrics: conn = self._get_southbound_client(fabric_name) # Get name server data from fabric and get the targets # logged in. nsinfo = None try: nsinfo = conn.get_nameserver_info() LOG.debug("Name server info from fabric: %(nsinfo)s", {'nsinfo': nsinfo}) except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException): if not conn.is_supported_firmware(): msg = _("Unsupported firmware on switch %s. Make sure " "switch is running firmware v6.4 or higher" ) % conn.switch_ip LOG.exception(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting name server info.")) except Exception: msg = _("Failed to get name server info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup() visible_targets = filter( lambda x: x in formatted_target_list, nsinfo) if visible_targets: LOG.info(_LI("Filtered targets for SAN is: %(targets)s"), {'targets': visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): visible_targets[idx] = str( visible_targets[idx]).replace(':', '') fabric_map[fabric_name] = visible_targets else: LOG.debug("No targets found in the nameserver " "for fabric: %(fabric)s", {'fabric': fabric_name}) LOG.debug("Return SAN context output: %(fabricmap)s", {'fabricmap': fabric_map}) return fabric_map
def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): """Provides the initiator/target map for available SAN contexts. Looks up fcns database of each fc SAN configured to find logged in devices and returns a map of initiator and target port WWNs for each fabric. :param initiator_wwn_list: List of initiator port WWN :param target_wwn_list: List of target port WWN :returns: List -- device wwn map in following format .. code-block:: python { <Fabric name>: { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } :raises Exception: when connection to fabric is failed """ device_map = {} formatted_target_list = [] formatted_initiator_list = [] fabric_map = {} fabric_names = self.configuration.fc_fabric_names if not fabric_names: raise exception.InvalidParameterValue( err=_("Missing Fibre Channel SAN configuration " "param - fc_fabric_names")) fabrics = [x.strip() for x in fabric_names.split(',')] LOG.debug("FC Fabric List: %s", fabrics) if fabrics: for t in target_wwn_list: formatted_target_list.append(zm_utils.get_formatted_wwn(t)) for i in initiator_wwn_list: formatted_initiator_list.append(zm_utils.get_formatted_wwn(i)) for fabric_name in fabrics: self.switch_ip = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_address') self.switch_user = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_user') self.switch_pwd = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_password') self.switch_port = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_port') zoning_vsan = self.fabric_configs[fabric_name].safe_get( 'cisco_zoning_vsan') # Get name server data from fabric and find the targets # logged in nsinfo = '' LOG.debug("show fcns database for vsan %s", zoning_vsan) nsinfo = self.get_nameserver_info(zoning_vsan) self.cleanup() LOG.debug("Lookup service:fcnsdatabase-%s", nsinfo) LOG.debug("Lookup service:initiator list from caller-%s", formatted_initiator_list) LOG.debug("Lookup service:target list from caller-%s", formatted_target_list) visible_targets = [x for x in nsinfo if x in formatted_target_list] visible_initiators = [x for x in nsinfo if x in formatted_initiator_list] if visible_targets: LOG.debug("Filtered targets is: %s", visible_targets) # getting rid of the : before returning for idx, elem in enumerate(visible_targets): elem = str(elem).replace(':', '') visible_targets[idx] = elem else: LOG.debug("No targets are in the fcns database" " for vsan %s", zoning_vsan) if visible_initiators: # getting rid of the : before returning ~sk for idx, elem in enumerate(visible_initiators): elem = str(elem).replace(':', '') visible_initiators[idx] = elem else: LOG.debug("No initiators are in the fcns database" " for vsan %s", zoning_vsan) fabric_map = {'initiator_port_wwn_list': visible_initiators, 'target_port_wwn_list': visible_targets } device_map[fabric_name] = fabric_map LOG.debug("Device map for SAN context: %s", device_map) return device_map
def initialize_connection_fc(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: .. code-block:: default { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '500a098280feeba5', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5'], '21000024ff406cc2': ['500a098280feeba5'] } } } Or .. code-block:: default { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'], 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5'] } } } """ initiators = [ fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns'] ] volume_name = volume['name'] lun_id = self._map_lun(volume_name, initiators, 'fcp', None) LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s", { 'name': volume_name, 'initiators': initiators }) target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) if target_wwpns: LOG.debug( "Successfully fetched target details for LUN %(name)s " "and initiator(s) %(initiators)s", { 'name': volume_name, 'initiators': initiators }) else: raise exception.VolumeBackendAPIException( data=_('Failed to get LUN target details for ' 'the LUN %s') % volume_name) target_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': int(lun_id), 'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map } } return target_info
def initialize_connection_fc(self, volume, connector): """Initializes the connection and returns connection info. Assigns the specified volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '500a098280feeba5', 'access_mode': 'rw', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5'], '21000024ff406cc2': ['500a098280feeba5'] } } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'], 'access_mode': 'rw', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5'] } } } """ initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector["wwpns"]] eseries_vol = self._get_volume(volume["name_id"]) mapping = self.map_volume_to_host(volume, eseries_vol, initiators) lun_id = mapping["lun"] initiator_info = self._build_initiator_target_map_fc(connector) target_wwpns, initiator_target_map, num_paths = initiator_info if target_wwpns: msg = "Successfully fetched target details for LUN %(id)s " "and initiator(s) %(initiators)s." msg_fmt = {"id": volume["id"], "initiators": initiators} LOG.debug(msg, msg_fmt) else: msg = _("Failed to get LUN target details for the LUN %s.") raise exception.VolumeBackendAPIException(data=msg % volume["id"]) target_info = { "driver_volume_type": "fibre_channel", "data": { "target_discovered": True, "target_lun": int(lun_id), "target_wwn": target_wwpns, "access_mode": "rw", "initiator_target_map": initiator_target_map, }, } return target_info
def add_connection(self, fabric, initiator_target_map): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Add connection for Fabric:%s", fabric) LOG.info(_("CiscoFCZoneDriver - Add connection " "for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info(_("Zoning policy for Fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status(fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} initiator = initiator_key.lower() t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for t in t_list: target = t.lower() zone_members = [ get_formatted_wwn(initiator), get_formatted_wwn(target) ] zone_name = ( self.configuration.cisco_zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) if (len(cfgmap_from_fabric) == 0 or (zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. LOG.info( _("Zone exists in I-T mode. " "Skipping zone creation %s"), zone_name) elif zoning_policy == 'initiator': zone_members = [get_formatted_wwn(initiator)] for t in t_list: target = t.lower() zone_members.append(get_formatted_wwn(target)) zone_name = self.configuration.cisco_zone_name_prefix \ + initiator.replace(':', '') if len(zone_names) > 0 and (zone_name in zone_names): zone_members = zone_members + filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) zone_map[zone_name] = zone_members else: msg = _("Zoning Policy: %s, not" " recognized") % zoning_policy LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.info(_("Zone map to add: %s"), zone_map) if len(zone_map) > 0: conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) conn.add_zones(zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except exception.CiscoZoningCliException as cisco_ex: msg = _("Exception: %s") % six.text_type(cisco_ex) raise exception.FCZoneDriverException(msg) except Exception as e: LOG.error(_("Exception: %s") % six.text_type(e)) msg = (_("Failed to add zoning configuration %s") % six.text_type(e)) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %s", zone_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info(_LI("BrcdFCZoneDriver - Delete connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s"), {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info(_LI("Zoning policy for fabric %(policy)s"), {'policy': zoning_policy}) conn = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(conn) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push changes to # fabric. This operation could result in an update for zone config # with new member list or deleting zones from active cfg. LOG.debug("zone config from Fabric: %(cfgmap)s", {'cfgmap': cfgmap_from_fabric}) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = utils.get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) LOG.debug("Zone name to delete: %(zonename)s", {'zonename': zone_name}) if len(zone_names) > 0 and (zone_name in zone_names): # delete zone. LOG.debug("Added zone to delete to list: %(zonename)s", {'zonename': zone_name}) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (zone_names and (zone_name in zone_names)): filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # The assumption here is that initiator is always there # in the zone as it is 'initiator' policy. We find the # filtered list and if it is non-empty, add initiator # to it and update zone if filtered list is empty, we # remove that zone. LOG.debug("Zone delete - initiator mode: " "filtered targets: %(targets)s", {'targets': filtered_members}) if filtered_members: filtered_members.append(formatted_initiator) LOG.debug("Filtered zone members to update: " "%(members)s", {'members': filtered_members}) zone_map[zone_name] = filtered_members LOG.debug("Filtered zone map to update: %(zonemap)s", {'zonemap': zone_map}) else: zones_to_delete.append(zone_name) else: LOG.warning(_LW("Zoning policy not recognized: %(policy)s"), {'policy': zoning_policy}) LOG.debug("Final zone map to update: %(zonemap)s", {'zonemap': zone_map}) LOG.debug("Final zone list to delete: %(zones)s", {'zones': zones_to_delete}) try: # Update zone membership. if zone_map: conn.add_zones( zone_map, zone_activate, cfgmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ( '%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = '%s;%s' % ( zone_name_string, zones_to_delete[i]) conn.delete_zones( zone_name_string, zone_activate, cfgmap_from_fabric) conn.cleanup() except (exception.BrocadeZoningCliException, exception.BrocadeZoningHttpException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to update or delete zoning " "configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg)
def initialize_connection_fc(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '500a098280feeba5', 'access_mode': 'rw', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5'], '21000024ff406cc2': ['500a098280feeba5'] } } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'], 'access_mode': 'rw', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5'] } } } """ initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] volume_name = volume['name'] lun_id = self._map_lun(volume_name, initiators, 'fcp', None) LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s", {'name': volume_name, 'initiators': initiators}) target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) if target_wwpns: LOG.debug("Successfully fetched target details for LUN %(name)s " "and initiator(s) %(initiators)s", {'name': volume_name, 'initiators': initiators}) else: raise exception.VolumeBackendAPIException( data=_('Failed to get LUN target details for ' 'the LUN %s') % volume_name) target_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_discovered': True, 'target_lun': int(lun_id), 'target_wwn': target_wwpns, 'access_mode': 'rw', 'initiator_target_map': initiator_target_map}} return target_info
def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): """Provides the initiator/target map for available SAN contexts. Looks up nameserver of each fc SAN configured to find logged in devices and returns a map of initiator and target port WWNs for each fabric. :param initiator_wwn_list: List of initiator port WWN :param target_wwn_list: List of target port WWN :returns: List -- device wwn map in following format { <San name>: { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } :raises: Exception when connection to fabric is failed """ device_map = {} formatted_target_list = [] formatted_initiator_list = [] fabric_map = {} fabric_names = self.configuration.fc_fabric_names fabrics = None if not fabric_names: raise exception.InvalidParameterValue( err=_("Missing Fibre Channel SAN configuration " "param - fc_fabric_names")) fabrics = [x.strip() for x in fabric_names.split(',')] LOG.debug("FC Fabric List: %s", fabrics) if fabrics: for t in target_wwn_list: formatted_target_list.append(fczm_utils.get_formatted_wwn(t)) for i in initiator_wwn_list: formatted_initiator_list.append(fczm_utils. get_formatted_wwn(i)) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_address') fabric_user = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_user') fabric_pwd = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_password') fabric_port = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_port') ssh_pool = ssh_utils.SSHPool(fabric_ip, fabric_port, None, fabric_user, password=fabric_pwd) # Get name server data from fabric and find the targets # logged in nsinfo = '' try: LOG.debug("Getting name server data for " "fabric %s", fabric_ip) nsinfo = self.get_nameserver_info(ssh_pool) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed collecting name server info from" " fabric %s"), fabric_ip) except Exception as e: msg = _("SSH connection failed " "for %(fabric)s with error: %(err)s" ) % {'fabric': fabric_ip, 'err': e} LOG.error(msg) raise exception.FCSanLookupServiceException(message=msg) LOG.debug("Lookup service:nsinfo-%s", nsinfo) LOG.debug("Lookup service:initiator list from " "caller-%s", formatted_initiator_list) LOG.debug("Lookup service:target list from " "caller-%s", formatted_target_list) visible_targets = [x for x in nsinfo if x in formatted_target_list] visible_initiators = [x for x in nsinfo if x in formatted_initiator_list] if visible_targets: LOG.debug("Filtered targets is: %s", visible_targets) # getting rid of the : before returning for idx, elem in enumerate(visible_targets): elem = str(elem).replace(':', '') visible_targets[idx] = elem else: LOG.debug("No targets are in the nameserver for SAN %s", fabric_name) if visible_initiators: # getting rid of the : before returning ~sk for idx, elem in enumerate(visible_initiators): elem = str(elem).replace(':', '') visible_initiators[idx] = elem else: LOG.debug("No initiators are in the nameserver " "for SAN %s", fabric_name) fabric_map = { 'initiator_port_wwn_list': visible_initiators, 'target_port_wwn_list': visible_targets } device_map[fabric_name] = fabric_map LOG.debug("Device map for SAN context: %s", device_map) return device_map
def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric: %s", fabric) LOG.info("CiscoFCZoneDriver - Delete connection for I-T map: %s", initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'cisco_zone_name_prefix') if not zone_name_prefix: zone_name_prefix = self.configuration.cisco_zone_name_prefix if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info("Zoning policy for fabric %s", zoning_policy) statusmap_from_fabric = self.get_zoning_status(fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push # changes to fabric. This operation could result in an update # for zone config with new member list or deleting zones from # active cfg. LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = zm_utils.get_formatted_wwn(initiator) zone_update_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = (driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) LOG.debug("Zone name to del: %s", zone_name) if (len(zone_names) > 0 and (zone_name in zone_names)): # delete zone. LOG.debug("Added zone to delete to list: %s", zone_name) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(zm_utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) # Check if there are zone members leftover after removal if (zone_names and (zone_name in zone_names)): filtered_members = list( filter(lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name])) # The assumption here is that initiator is always # there in the zone as it is 'initiator' policy. # If filtered list is empty, we remove that zone. # If there are other members leftover, then perform # update_zone to remove targets LOG.debug("Zone delete - I mode: filtered targets: %s", filtered_members) if filtered_members: remove_members = list( filter( lambda x: x in cfgmap_from_fabric['zones'][ zone_name], zone_members)) if remove_members: # Do not want to remove the initiator remove_members.remove(formatted_initiator) LOG.debug("Zone members to remove: %s", remove_members) zone_update_map[zone_name] = remove_members LOG.debug("Filtered zone Map to update: %s", zone_update_map) else: zones_to_delete.append(zone_name) else: LOG.info("Zoning Policy: %s, not recognized", zoning_policy) LOG.debug("Zone map to remove update: %s", zone_update_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) # Update zone membership. if zone_update_map: conn.update_zones( zone_update_map, self.configuration.cisco_zone_activate, zoning_vsan, ZoneConstant.ZONE_REMOVE, cfgmap_from_fabric, statusmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ( '%s%s' % (zone_name_string, zones_to_delete[i])) else: zone_name_string = ('%s%s%s' % (zone_name_string, ';', zones_to_delete[i])) conn.delete_zones( zone_name_string, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except Exception: msg = _("Failed to update or delete zoning configuration") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones deleted successfully: %s", zone_update_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def add_connection(self, fabric, initiator_target_map): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Add connection for Fabric:%s", fabric) LOG.info(_("CiscoFCZoneDriver - Add connection " "for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info(_("Zoning policy for Fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} initiator = initiator_key.lower() t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for t in t_list: target = t.lower() zone_members = [get_formatted_wwn(initiator), get_formatted_wwn(target)] zone_name = (self. configuration.cisco_zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. LOG.info(_("Zone exists in I-T mode. " "Skipping zone creation %s"), zone_name) elif zoning_policy == 'initiator': zone_members = [get_formatted_wwn(initiator)] for t in t_list: target = t.lower() zone_members.append(get_formatted_wwn(target)) zone_name = self.configuration.cisco_zone_name_prefix \ + initiator.replace(':', '') if len(zone_names) > 0 and (zone_name in zone_names): zone_members = zone_members + filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) zone_map[zone_name] = zone_members else: msg = _("Zoning Policy: %s, not" " recognized") % zoning_policy LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.info(_("Zone map to add: %s"), zone_map) if len(zone_map) > 0: conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) conn.add_zones( zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except exception.CiscoZoningCliException as cisco_ex: msg = _("Exception: %s") % six.text_type(cisco_ex) raise exception.FCZoneDriverException(msg) except Exception as e: LOG.error(_("Exception: %s") % six.text_type(e)) msg = (_("Failed to add zoning configuration %s") % six.text_type(e)) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %s", zone_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): """Provides the initiator/target map for available SAN contexts. Looks up nameserver of each fc SAN configured to find logged in devices and returns a map of initiator and target port WWNs for each fabric. :param initiator_wwn_list: List of initiator port WWN :param target_wwn_list: List of target port WWN :returns: List -- device wwn map in following format .. code-block:: default { <San name>: { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } :raises Exception: when connection to fabric is failed """ device_map = {} formatted_target_list = [] formatted_initiator_list = [] fabric_map = {} fabric_names = self.configuration.fc_fabric_names fabrics = None if not fabric_names: raise exception.InvalidParameterValue( err=_("Missing Fibre Channel SAN configuration " "param - fc_fabric_names")) fabrics = [x.strip() for x in fabric_names.split(',')] LOG.debug("FC Fabric List: %s", fabrics) if fabrics: for t in target_wwn_list: formatted_target_list.append(fczm_utils.get_formatted_wwn(t)) for i in initiator_wwn_list: formatted_initiator_list.append( fczm_utils.get_formatted_wwn(i)) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_address') # Get name server data from fabric and find the targets # logged in nsinfo = '' try: LOG.debug("Getting name server data for " "fabric %s", fabric_ip) conn = self._get_southbound_client(fabric_name) nsinfo = conn.get_nameserver_info() except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): LOG.error( "Failed collecting name server info from" " fabric %s", fabric_ip) except Exception as e: msg = _("SSH connection failed " "for %(fabric)s with error: %(err)s") % { 'fabric': fabric_ip, 'err': e } LOG.error(msg) raise exception.FCSanLookupServiceException(message=msg) LOG.debug("Lookup service:nsinfo-%s", nsinfo) LOG.debug("Lookup service:initiator list from " "caller-%s", formatted_initiator_list) LOG.debug("Lookup service:target list from " "caller-%s", formatted_target_list) visible_targets = [ x for x in nsinfo if x in formatted_target_list ] visible_initiators = [ x for x in nsinfo if x in formatted_initiator_list ] if visible_targets: LOG.debug("Filtered targets is: %s", visible_targets) # getting rid of the : before returning for idx, elem in enumerate(visible_targets): elem = str(elem).replace(':', '') visible_targets[idx] = elem else: LOG.debug("No targets are in the nameserver for SAN %s", fabric_name) if visible_initiators: # getting rid of the : before returning ~sk for idx, elem in enumerate(visible_initiators): elem = str(elem).replace(':', '') visible_initiators[idx] = elem else: LOG.debug( "No initiators are in the nameserver " "for SAN %s", fabric_name) fabric_map = { 'initiator_port_wwn_list': visible_initiators, 'target_port_wwn_list': visible_targets } device_map[fabric_name] = fabric_map LOG.debug("Device map for SAN context: %s", device_map) return device_map
def test_get_formatted_wwn(self): wwn_list = ['10008c7cff523b01'] return_wwn_list = [] expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01'] return_wwn_list.append(zm_utils.get_formatted_wwn(wwn_list[0])) self.assertEqual(expected_wwn_list, return_wwn_list)