コード例 #1
0
ファイル: npiv.py プロジェクト: openstack/nova-powervm
    def wwpns(self):
        """Builds the WWPNs of the adapters that will connect the ports."""
        # Refresh the instance.  It could have been updated by a concurrent
        # call from another thread to get the wwpns.
        self.instance.refresh()
        vios_wraps = self.stg_ftsk.feed
        resp_wwpns = []

        # If this is the first time to query the WWPNs for the instance, we
        # need to generate a set of valid WWPNs.  Loop through the configured
        # FC fabrics and determine if these are new, part of a migration, or
        # were already configured.
        for fabric in self._fabric_names():
            fc_state = self._get_fabric_state(fabric)
            LOG.info("NPIV wwpns fabric state=%(st)s.", {'st': fc_state},
                     instance=self.instance)

            if self._is_initial_wwpn(fc_state, fabric):
                # Get a set of WWPNs that are globally unique from the system.
                v_wwpns = pvm_vfcm.build_wwpn_pair(
                    self.adapter,
                    self.host_uuid,
                    pair_count=self._ports_per_fabric())

                # Derive the virtual to physical port mapping
                port_maps = pvm_vfcm.derive_npiv_map(
                    vios_wraps, self._fabric_ports(fabric), v_wwpns)

                # the fabric is mapped to the physical port) and the fabric
                # state.
                self._set_fabric_meta(fabric, port_maps)
                self._set_fabric_state(fabric, FS_UNMAPPED)
                self.instance.save()
            elif self._is_migration_wwpn(fc_state):
                # The migration process requires the 'second' wwpn from the
                # fabric to be used.
                port_maps = self._configure_wwpns_for_migration(fabric)
            else:
                # This specific fabric had been previously set.  Just pull
                # from the meta (as it is likely already mapped to the
                # instance)
                port_maps = self._get_fabric_meta(fabric)

            # Every loop through, we reverse the vios wrappers.  This is
            # done so that if Fabric A only has 1 port, it goes on the
            # first VIOS.  Then Fabric B would put its port on a different
            # VIOS.  This servers as a form of multi pathing (so that your
            # paths are not restricted to a single VIOS).
            vios_wraps.reverse()

            # Port map is set by either conditional, but may be set to None.
            # If not None, then add the WWPNs to the response.
            if port_maps is not None:
                for mapping in port_maps:
                    # Only add the first WWPN.  That is the one that will be
                    # logged into the fabric.
                    resp_wwpns.append(mapping[1].split()[0])

        # The return object needs to be a list for the volume connector.
        return resp_wwpns
コード例 #2
0
ファイル: npiv.py プロジェクト: pkdevboxy/nova-powervm
    def wwpns(self):
        """Builds the WWPNs of the adapters that will connect the ports."""
        # Refresh the instance.  It could have been updated by a concurrent
        # call from another thread to get the wwpns.
        self.instance.refresh()
        vios_wraps = self.stg_ftsk.feed
        resp_wwpns = []

        # If this is the first time to query the WWPNs for the instance, we
        # need to generate a set of valid WWPNs.  Loop through the configured
        # FC fabrics and determine if these are new, part of a migration, or
        # were already configured.
        for fabric in self._fabric_names():
            fc_state = self._get_fabric_state(fabric)
            LOG.info(_LI("NPIV wwpns fabric state=%(st)s for "
                         "instance %(inst)s") %
                     {'st': fc_state, 'inst': self.instance.name})

            if self._is_initial_wwpn(fc_state, fabric):
                # Get a set of WWPNs that are globally unique from the system.
                v_wwpns = pvm_vfcm.build_wwpn_pair(
                    self.adapter, self.host_uuid,
                    pair_count=self._ports_per_fabric())

                # Derive the virtual to physical port mapping
                port_maps = pvm_vfcm.derive_npiv_map(
                    vios_wraps, self._fabric_ports(fabric), v_wwpns)

                # the fabric is mapped to the physical port) and the fabric
                # state.
                self._set_fabric_meta(fabric, port_maps)
                self._set_fabric_state(fabric, FS_UNMAPPED)
                self.instance.save()
            elif self._is_migration_wwpn(fc_state):
                # The migration process requires the 'second' wwpn from the
                # fabric to be used.
                port_maps = self._configure_wwpns_for_migration(fabric)
            else:
                # This specific fabric had been previously set.  Just pull
                # from the meta (as it is likely already mapped to the
                # instance)
                port_maps = self._get_fabric_meta(fabric)

            # Every loop through, we reverse the vios wrappers.  This is
            # done so that if Fabric A only has 1 port, it goes on the
            # first VIOS.  Then Fabric B would put its port on a different
            # VIOS.  This servers as a form of multi pathing (so that your
            # paths are not restricted to a single VIOS).
            vios_wraps.reverse()

            # Port map is set by either conditional, but may be set to None.
            # If not None, then add the WWPNs to the response.
            if port_maps is not None:
                for mapping in port_maps:
                    # Only add the first WWPN.  That is the one that will be
                    # logged into the fabric.
                    resp_wwpns.append(mapping[1].split()[0])

        # The return object needs to be a list for the volume connector.
        return resp_wwpns
コード例 #3
0
ファイル: test_vfc_mapper.py プロジェクト: tpeponas/pypowervm
    def test_derive_npiv_map_existing_no_preserve(self):
        # Use sample vios data with mappings.
        vios_file = 'fake_vios_mappings.txt'
        vios_w = pvm_vios.VIOS.wrap(tju.load_file(vios_file).entry)
        vios_wraps = [vios_w]

        # Subset the WWPNs on that VIOS
        p_wwpns = ['10000090FA1B6898', '10000090FA1B6899']
        v_port_wwpns = ['c05076065a7c02e4', 'c05076065a7c02e5']
        candidates = vfc_mapper._find_ports_on_vio(vios_w, p_wwpns)
        for p_port in candidates:
            if p_port.wwpn == p_wwpns[1]:
                # Artificially inflate the free ports so that it would get
                # chosen for a newly created mapping.
                p_port.set_parm_value('AvailablePorts', '64')

        # Run the derivation now
        resp = vfc_mapper.derive_npiv_map(vios_wraps,
                                          p_wwpns,
                                          v_port_wwpns,
                                          preserve=False)
        self.assertIsNotNone(resp)
        self.assertEqual(1, len(resp))

        # Make sure we only got one phys port key back and it should *not*
        # match the existing mapping of 'preserve' testcase.
        unique_keys = set([i[0] for i in resp])
        self.assertEqual({'10000090FA1B6899'}, unique_keys)
コード例 #4
0
ファイル: npiv.py プロジェクト: pratgohi/nova-powervm
    def _configure_wwpns_for_migration(self, fabric):
        """Configures the WWPNs for a migration.

        During a NPIV migration, the WWPNs need to be flipped and attached to
        the management VM.  This is so that the peer WWPN is brought online.

        The WWPNs will be removed from the management partition via the
        pre_live_migration_on_destination method.  The WWPNs invocation is
        done prior to the migration, when the volume connector is gathered.

        :param fabric: The fabric to configure.
        :return: An updated port mapping.
        """
        LOG.info(
            _LI("Mapping instance %(inst)s to the mgmt partition for "
                "fabric %(fabric)s because the VM is migrating to "
                "this host."), {
                    'inst': self.instance.name,
                    'fabric': fabric
                })

        mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

        # When we migrate...flip the WWPNs around.  This is so the other
        # WWPN logs in on the target fabric.  But we should only flip new
        # WWPNs.  There may already be some on the overall fabric...and if
        # there are, we keep those 'as-is'
        #
        # TODO(thorst) pending API change should be able to indicate which
        # wwpn is active.
        port_maps = self._get_fabric_meta(fabric)
        existing_wwpns = []
        new_wwpns = []

        for port_map in port_maps:
            c_wwpns = port_map[1].split()

            # Only add it as a 'new' mapping if it isn't on a VIOS already.  If
            # it is, then we know that it has already been serviced, perhaps
            # by a previous volume.
            vios_w, vfc_map = pvm_vfcm.has_client_wwpns(
                self.stg_ftsk.feed, c_wwpns)
            if vfc_map is None:
                c_wwpns.reverse()
                new_wwpns.extend(c_wwpns)
            else:
                existing_wwpns.extend(c_wwpns)

        # Now derive the mapping to THESE VIOSes physical ports
        port_mappings = pvm_vfcm.derive_npiv_map(self.stg_ftsk.feed,
                                                 self._fabric_ports(fabric),
                                                 new_wwpns + existing_wwpns)

        # Add the port maps to the mgmt partition
        if len(new_wwpns) > 0:
            pvm_vfcm.add_npiv_port_mappings(self.adapter, self.host_uuid,
                                            mgmt_uuid, port_mappings)
        return port_mappings
コード例 #5
0
ファイル: test_vfc_mapper.py プロジェクト: tpeponas/pypowervm
 def test_derive_npiv_map_existing_no_bp(self, mock_bp):
     vios_file = 'fake_vios_mappings.txt'
     vios_w = pvm_vios.VIOS.wrap(tju.load_file(vios_file).entry)
     # Subset the WWPNs on that VIOS
     p_wwpns = ['10000090FA1B6898', '10000090FA1B6899']
     v_port_wwpns = ['c05076065a7c02e4', 'c05076065a7c02e5']
     resp = vfc_mapper.derive_npiv_map([vios_w], p_wwpns, v_port_wwpns)
     # We shouldn't have returned the existing mapping that didn't have
     # a backing port.
     self.assertEqual([], resp)
コード例 #6
0
ファイル: npiv.py プロジェクト: pratgohi/nova-powervm
    def _configure_wwpns_for_migration(self, fabric):
        """Configures the WWPNs for a migration.

        During a NPIV migration, the WWPNs need to be flipped and attached to
        the management VM.  This is so that the peer WWPN is brought online.

        The WWPNs will be removed from the management partition via the
        pre_live_migration_on_destination method.  The WWPNs invocation is
        done prior to the migration, when the volume connector is gathered.

        :param fabric: The fabric to configure.
        :return: An updated port mapping.
        """
        LOG.info(_LI("Mapping instance %(inst)s to the mgmt partition for "
                     "fabric %(fabric)s because the VM is migrating to "
                     "this host."),
                 {'inst': self.instance.name, 'fabric': fabric})

        mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

        # When we migrate...flip the WWPNs around.  This is so the other
        # WWPN logs in on the target fabric.  But we should only flip new
        # WWPNs.  There may already be some on the overall fabric...and if
        # there are, we keep those 'as-is'
        #
        # TODO(thorst) pending API change should be able to indicate which
        # wwpn is active.
        port_maps = self._get_fabric_meta(fabric)
        existing_wwpns = []
        new_wwpns = []

        for port_map in port_maps:
            c_wwpns = port_map[1].split()

            # Only add it as a 'new' mapping if it isn't on a VIOS already.  If
            # it is, then we know that it has already been serviced, perhaps
            # by a previous volume.
            vios_w, vfc_map = pvm_vfcm.has_client_wwpns(self.stg_ftsk.feed,
                                                        c_wwpns)
            if vfc_map is None:
                c_wwpns.reverse()
                new_wwpns.extend(c_wwpns)
            else:
                existing_wwpns.extend(c_wwpns)

        # Now derive the mapping to THESE VIOSes physical ports
        port_mappings = pvm_vfcm.derive_npiv_map(
            self.stg_ftsk.feed, self._fabric_ports(fabric),
            new_wwpns + existing_wwpns)

        # Add the port maps to the mgmt partition
        if len(new_wwpns) > 0:
            pvm_vfcm.add_npiv_port_mappings(
                self.adapter, self.host_uuid, mgmt_uuid, port_mappings)
        return port_mappings
コード例 #7
0
ファイル: npiv.py プロジェクト: openstack/nova-powervm
    def _ensure_phys_ports_for_system(self, npiv_port_maps, vios_wraps,
                                      fabric):
        """Ensures that the npiv_port_map is correct for the system.

        Rare scenarios can occur where the physical port on the NPIV port
        map does not match the actual port.  This is generally caused when the
        last volume is removed from the VM, the VM is migrated to another host,
        and then a new volume is attached.

        Stale metadata would be there (as it can't be cleaned out) on the
        attach.  This method clears that up.

        :param npiv_port_maps: The existing port maps.
        :param vios_wraps: The Virtual I/O Server wraps.
        :param fabric: The name of the fabric
        :return: The npiv_port_maps.  May be unchanged.
        """
        # Check that all physical ports in the mappings belong to 'this'
        # set of VIOSs.
        if all(
                pvm_vfcm.find_vios_for_wwpn(vios_wraps, pm[0])[0]
                for pm in npiv_port_maps):
            LOG.debug(
                "All physical ports were found on the given Virtual I/O "
                "Server(s).",
                instance=self.instance)
            return npiv_port_maps

        # If ANY of the VIOS ports were not there, rebuild the port maps
        LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state.",
                  npiv_port_maps,
                  instance=self.instance)
        v_wwpns = []
        for port_map in npiv_port_maps:
            v_wwpns.extend(port_map[1].split())
        self._set_fabric_state(fabric, FS_UNMAPPED)

        # Derive new maps and don't preserve existing maps
        npiv_port_maps = pvm_vfcm.derive_npiv_map(vios_wraps,
                                                  self._fabric_ports(fabric),
                                                  v_wwpns,
                                                  preserve=False)
        LOG.debug("Rebuilt port maps: %s",
                  npiv_port_maps,
                  instance=self.instance)
        self._set_fabric_meta(fabric, npiv_port_maps)
        LOG.warning(
            "Had to update the system metadata for the WWPNs due to "
            "incorrect physical WWPNs on fabric %(fabric)s",
            {'fabric': fabric},
            instance=self.instance)

        return npiv_port_maps
コード例 #8
0
    def test_add_map(self):
        """Validates the add_map method."""
        # Determine the vios original values
        vios_wrap = self.entries[0]
        vios1_orig_map_count = len(vios_wrap.vfc_mappings)

        # Subset the WWPNs on that VIOS
        fabric_wwpns = ['10000090FA5371F2']

        # Fake Virtual WWPNs
        v_fabric_wwpns = ['0', '1']

        # Get the mappings
        fabric_map = vfc_mapper.derive_npiv_map([vios_wrap], fabric_wwpns,
                                                v_fabric_wwpns)[0]

        # Make sure the map was not there initially.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings,
                                    self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(0, len(maps))

        # Now call the add action
        resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid,
                                  fabric_map)
        self.assertIsNotNone(resp)
        self.assertIsInstance(resp, pvm_vios.VFCMapping)

        # Verify the update is now found.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings,
                                    self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(1, len(maps))
        self.assertEqual(vios1_orig_map_count + 1, len(vios_wrap.vfc_mappings))

        # Try to add it again...it shouldn't re-add it because its already
        # there.  Flip WWPNs to verify set query.
        fabric_map = ('10000090FA5371F2', '1 0')
        resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid,
                                  fabric_map)
        self.assertIsNone(resp)
        self.assertEqual(vios1_orig_map_count + 1, len(vios_wrap.vfc_mappings))

        # We should only find one here...the original add.  Not two even though
        # we've called add twice.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings,
                                    self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(1, len(maps))
コード例 #9
0
ファイル: npiv.py プロジェクト: openstack/nova-powervm
    def _configure_wwpns_for_migration(self, fabric):
        """Configures the WWPNs for a migration.

        During a NPIV migration, the WWPNs need to be flipped.  This is because
        the second WWPN is what will be logged in on the source system.  So by
        flipping them, we indicate that the 'second' wwpn is the new one to
        log in.

        Another way to think of it is, this code should always return the
        correct WWPNs for the system that the workload will be running on.

        This WWPNs invocation is done on the target server prior to the
        actual migration call.  It is used to build the volume connector.
        Therefore this code simply flips the ports around.

        :param fabric: The fabric to configure.
        :return: An updated port mapping.
        """
        if self._get_fabric_state(fabric) == FS_MIGRATING:
            # If the fabric is migrating, just return the existing port maps.
            # They've already been flipped.
            return self._get_fabric_meta(fabric)

        # When we migrate...flip the WWPNs around.  This is so the other
        # WWPN logs in on the target fabric.  If this code is hit, the flip
        # hasn't yet occurred (read as first volume on the instance).
        port_maps = self._get_fabric_meta(fabric)
        client_wwpns = []
        for port_map in port_maps:
            c_wwpns = port_map[1].split()
            c_wwpns.reverse()
            client_wwpns.extend(c_wwpns)

        # Now derive the mapping to the VIOS physical ports on this system
        # (the destination)
        port_mappings = pvm_vfcm.derive_npiv_map(self.stg_ftsk.feed,
                                                 self._fabric_ports(fabric),
                                                 client_wwpns)

        # This won't actually get saved by the process.  The instance save will
        # only occur after the 'post migration'.  But if there are multiple
        # volumes, their WWPNs calls will subsequently see the data saved
        # temporarily here, and therefore won't "double flip" the wwpns back
        # to the original.
        self._set_fabric_meta(fabric, port_mappings)
        self._set_fabric_state(fabric, FS_MIGRATING)

        # Return the mappings
        return port_mappings
コード例 #10
0
ファイル: npiv.py プロジェクト: pkdevboxy/nova-powervm
    def _configure_wwpns_for_migration(self, fabric):
        """Configures the WWPNs for a migration.

        During a NPIV migration, the WWPNs need to be flipped.  This is because
        the second WWPN is what will be logged in on the source system.  So by
        flipping them, we indicate that the 'second' wwpn is the new one to
        log in.

        Another way to think of it is, this code should always return the
        correct WWPNs for the system that the workload will be running on.

        This WWPNs invocation is done on the target server prior to the
        actual migration call.  It is used to build the volume connector.
        Therefore this code simply flips the ports around.

        :param fabric: The fabric to configure.
        :return: An updated port mapping.
        """
        if self._get_fabric_state(fabric) == FS_MIGRATING:
            # If the fabric is migrating, just return the existing port maps.
            # They've already been flipped.
            return self._get_fabric_meta(fabric)

        # When we migrate...flip the WWPNs around.  This is so the other
        # WWPN logs in on the target fabric.  If this code is hit, the flip
        # hasn't yet occurred (read as first volume on the instance).
        port_maps = self._get_fabric_meta(fabric)
        client_wwpns = []
        for port_map in port_maps:
            c_wwpns = port_map[1].split()
            c_wwpns.reverse()
            client_wwpns.extend(c_wwpns)

        # Now derive the mapping to the VIOS physical ports on this system
        # (the destination)
        port_mappings = pvm_vfcm.derive_npiv_map(
            self.stg_ftsk.feed, self._fabric_ports(fabric), client_wwpns)

        # This won't actually get saved by the process.  The instance save will
        # only occur after the 'post migration'.  But if there are multiple
        # volumes, their WWPNs calls will subsequently see the data saved
        # temporarily here, and therefore won't "double flip" the wwpns back
        # to the original.
        self._set_fabric_meta(fabric, port_mappings)
        self._set_fabric_state(fabric, FS_MIGRATING)

        # Return the mappings
        return port_mappings
コード例 #11
0
    def test_add_map(self):
        """Validates the add_map method."""
        # Determine the vios original values
        vios_wrap = self.entries[0]
        vios1_orig_map_count = len(vios_wrap.vfc_mappings)

        # Subset the WWPNs on that VIOS
        fabric_wwpns = ['10000090FA5371F2']

        # Fake Virtual WWPNs
        v_fabric_wwpns = ['0', '1']

        # Get the mappings
        fabric_map = vfc_mapper.derive_npiv_map([vios_wrap], fabric_wwpns,
                                                v_fabric_wwpns)[0]

        # Make sure the map was not there initially.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings, self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(0, len(maps))

        # Now call the add action
        resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid,
                                  fabric_map)
        self.assertIsNotNone(resp)
        self.assertIsInstance(resp, pvm_vios.VFCMapping)

        # Verify the update is now found.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings, self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(1, len(maps))
        self.assertEqual(vios1_orig_map_count + 1, len(vios_wrap.vfc_mappings))

        # Try to add it again...it shouldn't re-add it because its already
        # there.  Flip WWPNs to verify set query.
        fabric_map = ('10000090FA5371F2', '1 0')
        resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid,
                                  fabric_map)
        self.assertIsNone(resp)
        self.assertEqual(vios1_orig_map_count + 1, len(vios_wrap.vfc_mappings))

        # We should only find one here...the original add.  Not two even though
        # we've called add twice.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings, self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(1, len(maps))
コード例 #12
0
ファイル: test_vfc_mapper.py プロジェクト: tpeponas/pypowervm
    def test_derive_npiv_map_multi_vio(self):
        vios_wraps = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FEED))

        # Subset the WWPNs on that VIOS
        p_wwpns = ['10000090FA5371F2', '10000090FA53720A']

        # Virtual WWPNs can be faked, and simplified.
        v_port_wwpns = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

        # Run the derivation now
        resp = vfc_mapper.derive_npiv_map(vios_wraps, p_wwpns, v_port_wwpns)
        self.assertIsNotNone(resp)
        self.assertEqual(5, len(resp))

        # Make sure we only get two unique keys back.
        unique_keys = set([i[0] for i in resp])
        self.assertEqual(set(p_wwpns), unique_keys)
コード例 #13
0
    def test_derive_npiv_map_multi_vio(self):
        vios_wraps = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FEED))

        # Subset the WWPNs on that VIOS
        p_wwpns = ['10000090FA5371F2', '10000090FA53720A']

        # Virtual WWPNs can be faked, and simplified.
        v_port_wwpns = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

        # Run the derivation now
        resp = vfc_mapper.derive_npiv_map(vios_wraps, p_wwpns, v_port_wwpns)
        self.assertIsNotNone(resp)
        self.assertEqual(5, len(resp))

        # Make sure we only get two unique keys back.
        unique_keys = set([i[0] for i in resp])
        self.assertEqual(set(p_wwpns), unique_keys)
コード例 #14
0
    def test_derive_npiv_map(self):
        vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry)
        vios_wraps = [vios_w]

        # Subset the WWPNs on that VIOS
        p_wwpns = ['10000090FA45473B', '10:00:00:90:fa:45:17:58']

        # Virtual WWPNs can be faked, and simplified.
        v_port_wwpns = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

        # Run the derivation now
        resp = vfc_mapper.derive_npiv_map(vios_wraps, p_wwpns, v_port_wwpns)
        self.assertIsNotNone(resp)
        self.assertEqual(5, len(resp))

        # Make sure we only get two unique keys back.
        unique_keys = set([i[0] for i in resp])
        self.assertEqual({'10000090FA45473B', '10000090FA451758'}, unique_keys)
コード例 #15
0
ファイル: test_vfc_mapper.py プロジェクト: tpeponas/pypowervm
    def test_derive_npiv_map(self):
        vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry)
        vios_wraps = [vios_w]

        # Subset the WWPNs on that VIOS
        p_wwpns = ['10000090FA45473B', '10:00:00:90:fa:45:17:58']

        # Virtual WWPNs can be faked, and simplified.
        v_port_wwpns = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

        # Run the derivation now
        resp = vfc_mapper.derive_npiv_map(vios_wraps, p_wwpns, v_port_wwpns)
        self.assertIsNotNone(resp)
        self.assertEqual(5, len(resp))

        # Make sure we only get two unique keys back.
        unique_keys = set([i[0] for i in resp])
        self.assertEqual({'10000090FA45473B', '10000090FA451758'}, unique_keys)
コード例 #16
0
ファイル: npiv.py プロジェクト: adreznec/nova-powervm
    def _ensure_phys_ports_for_system(self, npiv_port_maps, vios_wraps,
                                      fabric):
        """Ensures that the npiv_port_map is correct for the system.

        Rare scenarios can occur where the physical port on the NPIV port
        map does not match the actual port.  This is generally caused when the
        last volume is removed from the VM, the VM is migrated to another host,
        and then a new volume is attached.

        Stale metadata would be there (as it can't be cleaned out) on the
        attach.  This method clears that up.

        :param npiv_port_maps: The existing port maps.
        :param vios_wraps: The Virtual I/O Server wraps.
        :param fabric: The name of the fabric
        :return: The npiv_port_maps.  May be unchanged.
        """
        # Check that all physical ports in the mappings belong to 'this'
        # set of VIOSs.
        if all(pvm_vfcm.find_vios_for_wwpn(vios_wraps, pm[0])[0]
               for pm in npiv_port_maps):
            LOG.debug("Physical ports check out - just return maps.")
            return npiv_port_maps

        # If ANY of the VIOS ports were not there, rebuild the port maps
        LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state." %
                  npiv_port_maps)
        v_wwpns = []
        for port_map in npiv_port_maps:
            v_wwpns.extend(port_map[1].split())
        self._set_fabric_state(fabric, FS_UNMAPPED)

        # Derive new maps and don't preserve existing maps
        npiv_port_maps = pvm_vfcm.derive_npiv_map(
            vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
        LOG.debug("Rebuilt port maps: %s" % npiv_port_maps)
        self._set_fabric_meta(fabric, npiv_port_maps)
        LOG.warning(_LW("Had to update the system metadata for the WWPNs "
                        "due to incorrect physical WWPNs on fabric "
                        "%(fabric)s"),
                    {'fabric': fabric}, instance=self.instance)

        return npiv_port_maps
コード例 #17
0
ファイル: test_vfc_mapper.py プロジェクト: tpeponas/pypowervm
    def test_add_map(self):
        """Validates the add_map method."""
        # Determine the vios original values
        vios_wrap = self.entries[0]
        vios1_orig_map_count = len(vios_wrap.vfc_mappings)

        # Subset the WWPNs on that VIOS
        fabric_wwpns = ['10000090FA5371F2']

        # Fake Virtual WWPNs
        v_fabric_wwpns = ['0', '1']

        # Get the mappings
        fabric_map = vfc_mapper.derive_npiv_map([vios_wrap], fabric_wwpns,
                                                v_fabric_wwpns)[0]

        # Make sure the map was not there initially.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings,
                                    self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(0, len(maps))

        # Now call the add action
        resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid,
                                  fabric_map)
        self.assertIsNotNone(resp)
        self.assertIsInstance(resp, pvm_vios.VFCMapping)

        # Verify the update is now found.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings,
                                    self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(1, len(maps))
        self.assertEqual(vios1_orig_map_count + 1, len(vios_wrap.vfc_mappings))

        # Try to add it again...it shouldn't re-add it because its already
        # there.  Flip WWPNs to verify set query.
        fabric_map = ('10000090FA5371F2', '1 0')
        resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid,
                                  fabric_map)
        self.assertIsNone(resp)
        self.assertEqual(vios1_orig_map_count + 1, len(vios_wrap.vfc_mappings))

        # We should only find one here...the original add.  Not two even though
        # we've called add twice.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings,
                                    self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(1, len(maps))

        # This time, remove the backing port of the existing mapping and try
        # the add again. It should return an updated mapping that contains the
        # backing port. This simulates a VM migrating with a vfc mapping, but
        # no volume had been previously detached.
        maps[0].element.remove(maps[0].backing_port.element)
        resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid,
                                  fabric_map)
        self.assertIsNotNone(resp)
        self.assertIsInstance(resp, pvm_vios.VFCMapping)
        self.assertIsNotNone(resp.backing_port)
        self.assertIn('Port', resp.child_order)

        # Pass in slot number to be set on the VFC adapter
        fabric_map = ('10000090FA5371F1', '2 3')
        resp = vfc_mapper.add_map(vios_wrap,
                                  'host_uuid',
                                  self.lpar_uuid,
                                  fabric_map,
                                  lpar_slot_num=3)
        self.assertIsNotNone(resp)
        self.assertEqual(vios1_orig_map_count + 2, len(vios_wrap.vfc_mappings))
        # Verify the update is now found.
        maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings,
                                    self.lpar_uuid,
                                    port_map=fabric_map)
        self.assertEqual(1, len(maps))
        self.assertEqual(3, maps[0].client_adapter.lpar_slot_num)