def test_derive_base_npiv_map(self): vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry) vios_wraps = [vios_w] # Subset the WWPNs on that VIOS p_wwpns = ['10000090FA45473B', '10:00:00:90:fa:45:17:58'] # Run the derivation now resp = vfc_mapper.derive_base_npiv_map(vios_wraps, p_wwpns, 5) self.assertIsNotNone(resp) self.assertEqual(5, len(resp)) # Make sure we only get two unique keys back. unique_keys = set([i[0] for i in resp]) self.assertEqual({'10000090FA45473B', '10000090FA451758'}, unique_keys) # Make sure we get the 'marker' back for the values. Should now be # fused. values = set(i[1] for i in resp) self.assertEqual({vfc_mapper._FUSED_ANY_WWPN}, values)
def wwpns(self): """Builds the WWPNs of the adapters that will connect the ports.""" vios_wraps, mgmt_uuid = None, None resp_wwpns = [] # If this is a new mapping altogether, the WWPNs need to be logged # into the fabric so that Cinder can make use of them. This is a bit # of a catch-22 because the LPAR doesn't exist yet. So a mapping will # be created against the mgmt partition and then upon VM creation, the # mapping will be moved over to the VM. # # If a mapping already exists, we can instead just pull the data off # of the system metadata from the nova instance. for fabric in self._fabric_names(): fc_state = self._get_fabric_state(fabric) LOG.info( _LI("NPIV wwpns fabric state=%(st)s for " "instance %(inst)s") % { 'st': fc_state, 'inst': self.instance.name }) if self._is_initial_wwpn(fc_state, fabric): # At this point we've determined that we need to do a mapping. # So we go and obtain the mgmt uuid and the VIOS wrappers. # We only do this for the first loop through so as to ensure # that we do not keep invoking these expensive calls # unnecessarily. if mgmt_uuid is None: mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid # The VIOS wrappers are also not set at this point. Seed # them as well. Will get reused on subsequent loops. vios_wraps = self.stg_ftsk.feed # Derive the virtual to physical port mapping port_maps = pvm_vfcm.derive_base_npiv_map( vios_wraps, self._fabric_ports(fabric), self._ports_per_fabric()) # Every loop through, we reverse the vios wrappers. This is # done so that if Fabric A only has 1 port, it goes on the # first VIOS. Then Fabric B would put its port on a different # VIOS. As a form of multi pathing (so that your paths were # not restricted to a single VIOS). vios_wraps.reverse() # Check if the fabrics are unmapped then we need to map it # temporarily with the management partition. LOG.info( _LI("Adding NPIV Mapping with mgmt partition for " "instance %s") % self.instance.name) port_maps = pvm_vfcm.add_npiv_port_mappings( self.adapter, self.host_uuid, mgmt_uuid, port_maps) # Set the fabric meta (which indicates on the instance how # the fabric is mapped to the physical port) and the fabric # state. self._set_fabric_meta(fabric, port_maps) self._set_fabric_state(fabric, FS_MGMT_MAPPED) elif self._is_migration_wwpn(fc_state): port_maps = self._configure_wwpns_for_migration(fabric) # This won't actually get saved by the process. The save will # only occur after the 'post migration'. But if there are # multiple volumes, their WWPNs calls will subsequently see # the data saved temporarily here. self._set_fabric_meta(fabric, port_maps) else: # This specific fabric had been previously set. Just pull # from the meta (as it is likely already mapped to the # instance) port_maps = self._get_fabric_meta(fabric) # Port map is set by either conditional, but may be set to None. # If not None, then add the WWPNs to the response. if port_maps is not None: for mapping in port_maps: # Only add the first WWPN. That is the one that will be # logged into the fabric. resp_wwpns.append(mapping[1].split()[0]) # The return object needs to be a list for the volume connector. return resp_wwpns
def wwpns(self): """Builds the WWPNs of the adapters that will connect the ports.""" vios_wraps, mgmt_uuid = None, None resp_wwpns = [] # If this is a new mapping altogether, the WWPNs need to be logged # into the fabric so that Cinder can make use of them. This is a bit # of a catch-22 because the LPAR doesn't exist yet. So a mapping will # be created against the mgmt partition and then upon VM creation, the # mapping will be moved over to the VM. # # If a mapping already exists, we can instead just pull the data off # of the system metadata from the nova instance. for fabric in self._fabric_names(): fc_state = self._get_fabric_state(fabric) LOG.info(_LI("NPIV wwpns fabric state=%(st)s for " "instance %(inst)s") % {'st': fc_state, 'inst': self.instance.name}) if self._is_initial_wwpn(fc_state, fabric): # At this point we've determined that we need to do a mapping. # So we go and obtain the mgmt uuid and the VIOS wrappers. # We only do this for the first loop through so as to ensure # that we do not keep invoking these expensive calls # unnecessarily. if mgmt_uuid is None: mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid # The VIOS wrappers are also not set at this point. Seed # them as well. Will get reused on subsequent loops. vios_wraps = self.stg_ftsk.feed # Derive the virtual to physical port mapping port_maps = pvm_vfcm.derive_base_npiv_map( vios_wraps, self._fabric_ports(fabric), self._ports_per_fabric()) # Every loop through, we reverse the vios wrappers. This is # done so that if Fabric A only has 1 port, it goes on the # first VIOS. Then Fabric B would put its port on a different # VIOS. As a form of multi pathing (so that your paths were # not restricted to a single VIOS). vios_wraps.reverse() # Check if the fabrics are unmapped then we need to map it # temporarily with the management partition. LOG.info(_LI("Adding NPIV Mapping with mgmt partition for " "instance %s") % self.instance.name) port_maps = pvm_vfcm.add_npiv_port_mappings( self.adapter, self.host_uuid, mgmt_uuid, port_maps) # Set the fabric meta (which indicates on the instance how # the fabric is mapped to the physical port) and the fabric # state. self._set_fabric_meta(fabric, port_maps) self._set_fabric_state(fabric, FS_MGMT_MAPPED) elif self._is_migration_wwpn(fc_state): port_maps = self._configure_wwpns_for_migration(fabric) # This won't actually get saved by the process. The save will # only occur after the 'post migration'. But if there are # multiple volumes, their WWPNs calls will subsequently see # the data saved temporarily here. self._set_fabric_meta(fabric, port_maps) else: # This specific fabric had been previously set. Just pull # from the meta (as it is likely already mapped to the # instance) port_maps = self._get_fabric_meta(fabric) # Port map is set by either conditional, but may be set to None. # If not None, then add the WWPNs to the response. if port_maps is not None: for mapping in port_maps: # Only add the first WWPN. That is the one that will be # logged into the fabric. resp_wwpns.append(mapping[1].split()[0]) # The return object needs to be a list for the volume connector. return resp_wwpns