Esempio n. 1
0
    def _configure_wwpns_for_migration(self, fabric):
        """Configures the WWPNs for a migration.

        During a NPIV migration, the WWPNs need to be flipped and attached to
        the management VM.  This is so that the peer WWPN is brought online.

        The WWPNs will be removed from the management partition via the
        pre_live_migration_on_destination method.  The WWPNs invocation is
        done prior to the migration, when the volume connector is gathered.

        :param fabric: The fabric to configure.
        :return: An updated port mapping.
        """
        LOG.info(
            _LI("Mapping instance %(inst)s to the mgmt partition for "
                "fabric %(fabric)s because the VM is migrating to "
                "this host."), {
                    'inst': self.instance.name,
                    'fabric': fabric
                })

        mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

        # When we migrate...flip the WWPNs around.  This is so the other
        # WWPN logs in on the target fabric.  But we should only flip new
        # WWPNs.  There may already be some on the overall fabric...and if
        # there are, we keep those 'as-is'
        #
        # TODO(thorst) pending API change should be able to indicate which
        # wwpn is active.
        port_maps = self._get_fabric_meta(fabric)
        existing_wwpns = []
        new_wwpns = []

        for port_map in port_maps:
            c_wwpns = port_map[1].split()

            # Only add it as a 'new' mapping if it isn't on a VIOS already.  If
            # it is, then we know that it has already been serviced, perhaps
            # by a previous volume.
            vios_w, vfc_map = pvm_vfcm.has_client_wwpns(
                self.stg_ftsk.feed, c_wwpns)
            if vfc_map is None:
                c_wwpns.reverse()
                new_wwpns.extend(c_wwpns)
            else:
                existing_wwpns.extend(c_wwpns)

        # Now derive the mapping to THESE VIOSes physical ports
        port_mappings = pvm_vfcm.derive_npiv_map(self.stg_ftsk.feed,
                                                 self._fabric_ports(fabric),
                                                 new_wwpns + existing_wwpns)

        # Add the port maps to the mgmt partition
        if len(new_wwpns) > 0:
            pvm_vfcm.add_npiv_port_mappings(self.adapter, self.host_uuid,
                                            mgmt_uuid, port_mappings)
        return port_mappings
Esempio n. 2
0
    def _configure_wwpns_for_migration(self, fabric):
        """Configures the WWPNs for a migration.

        During a NPIV migration, the WWPNs need to be flipped and attached to
        the management VM.  This is so that the peer WWPN is brought online.

        The WWPNs will be removed from the management partition via the
        pre_live_migration_on_destination method.  The WWPNs invocation is
        done prior to the migration, when the volume connector is gathered.

        :param fabric: The fabric to configure.
        :return: An updated port mapping.
        """
        LOG.info(_LI("Mapping instance %(inst)s to the mgmt partition for "
                     "fabric %(fabric)s because the VM is migrating to "
                     "this host."),
                 {'inst': self.instance.name, 'fabric': fabric})

        mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

        # When we migrate...flip the WWPNs around.  This is so the other
        # WWPN logs in on the target fabric.  But we should only flip new
        # WWPNs.  There may already be some on the overall fabric...and if
        # there are, we keep those 'as-is'
        #
        # TODO(thorst) pending API change should be able to indicate which
        # wwpn is active.
        port_maps = self._get_fabric_meta(fabric)
        existing_wwpns = []
        new_wwpns = []

        for port_map in port_maps:
            c_wwpns = port_map[1].split()

            # Only add it as a 'new' mapping if it isn't on a VIOS already.  If
            # it is, then we know that it has already been serviced, perhaps
            # by a previous volume.
            vios_w, vfc_map = pvm_vfcm.has_client_wwpns(self.stg_ftsk.feed,
                                                        c_wwpns)
            if vfc_map is None:
                c_wwpns.reverse()
                new_wwpns.extend(c_wwpns)
            else:
                existing_wwpns.extend(c_wwpns)

        # Now derive the mapping to THESE VIOSes physical ports
        port_mappings = pvm_vfcm.derive_npiv_map(
            self.stg_ftsk.feed, self._fabric_ports(fabric),
            new_wwpns + existing_wwpns)

        # Add the port maps to the mgmt partition
        if len(new_wwpns) > 0:
            pvm_vfcm.add_npiv_port_mappings(
                self.adapter, self.host_uuid, mgmt_uuid, port_mappings)
        return port_mappings
Esempio n. 3
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        Will check if the Fabric is mapped to the management partition.  If it
        is, then it will remove the mappings and update the fabric state. This
        is because, in order for the WWPNs to be on the fabric (for Cinder)
        before the VM is online, the WWPNs get mapped to the management
        partition.

        This method will remove from the management partition (if needed), and
        then assign it to the instance itself.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed

        # If currently mapped to the mgmt partition, remove the mappings so
        # that they can be added to the client.
        if self._get_fabric_state(fabric) == FS_MGMT_MAPPED:
            mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

            # Each port mapping should be removed from the VIOS.
            for npiv_port_map in npiv_port_maps:
                vios_w = pvm_vfcm.find_vios_for_port_map(
                    vios_wraps, npiv_port_map)
                ls = [LOG.info, _LI("Removing NPIV mapping for mgmt partition "
                                    "for instance %(inst)s on VIOS %(vios)s."),
                      {'inst': self.instance.name, 'vios': vios_w.name}]

                # Add the subtask to remove the map from the mgmt partition
                self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                    pvm_vfcm.remove_maps, mgmt_uuid, port_map=npiv_port_map,
                    logspec=ls)

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
                                "for Virtual I/O Server %(vios)s."),
                  {'inst': self.instance.name, 'vios': vios_w.name}]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)
        volume_id = self.connection_info['data']['volume_id']
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_state, name='fab_%s_%s' % (fabric, volume_id)))
Esempio n. 4
0
 def test_get_mgmt_partition(self):
     self.apt.read.return_value = self.resp
     mp_wrap = mgmt.get_mgmt_partition(self.apt)
     self.assertIsInstance(mp_wrap, pvm_lpar.LPAR)
     self.assertTrue(mp_wrap.is_mgmt_partition)
Esempio n. 5
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        Will check if the Fabric is mapped to the management partition.  If it
        is, then it will remove the mappings and update the fabric state. This
        is because, in order for the WWPNs to be on the fabric (for Cinder)
        before the VM is online, the WWPNs get mapped to the management
        partition.

        This method will remove from the management partition (if needed), and
        then assign it to the instance itself.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed

        # If currently mapped to the mgmt partition, remove the mappings so
        # that they can be added to the client.
        if self._get_fabric_state(fabric) == FS_MGMT_MAPPED:
            mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

            # Each port mapping should be removed from the VIOS.
            for npiv_port_map in npiv_port_maps:
                vios_w = pvm_vfcm.find_vios_for_port_map(
                    vios_wraps, npiv_port_map)
                ls = [
                    LOG.info,
                    _LI("Removing NPIV mapping for mgmt partition "
                        "for instance %(inst)s on VIOS %(vios)s."), {
                            'inst': self.instance.name,
                            'vios': vios_w.name
                        }
                ]

                # Add the subtask to remove the map from the mgmt partition
                self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                    pvm_vfcm.remove_maps,
                    mgmt_uuid,
                    port_map=npiv_port_map,
                    logspec=ls)

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            ls = [
                LOG.info,
                _LI("Adding NPIV mapping for instance %(inst)s "
                    "for Virtual I/O Server %(vios)s."), {
                        'inst': self.instance.name,
                        'vios': vios_w.name
                    }
            ]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        volume_id = self.connection_info['data']['volume_id']
        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
Esempio n. 6
0
    def wwpns(self):
        """Builds the WWPNs of the adapters that will connect the ports."""
        vios_wraps, mgmt_uuid = None, None
        resp_wwpns = []

        # If this is a new mapping altogether, the WWPNs need to be logged
        # into the fabric so that Cinder can make use of them.  This is a bit
        # of a catch-22 because the LPAR doesn't exist yet.  So a mapping will
        # be created against the mgmt partition and then upon VM creation, the
        # mapping will be moved over to the VM.
        #
        # If a mapping already exists, we can instead just pull the data off
        # of the system metadata from the nova instance.
        for fabric in self._fabric_names():
            fc_state = self._get_fabric_state(fabric)
            LOG.info(
                _LI("NPIV wwpns fabric state=%(st)s for "
                    "instance %(inst)s") % {
                        'st': fc_state,
                        'inst': self.instance.name
                    })

            if self._is_initial_wwpn(fc_state, fabric):
                # At this point we've determined that we need to do a mapping.
                # So we go and obtain the mgmt uuid and the VIOS wrappers.
                # We only do this for the first loop through so as to ensure
                # that we do not keep invoking these expensive calls
                # unnecessarily.
                if mgmt_uuid is None:
                    mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

                    # The VIOS wrappers are also not set at this point.  Seed
                    # them as well.  Will get reused on subsequent loops.
                    vios_wraps = self.stg_ftsk.feed

                # Derive the virtual to physical port mapping
                port_maps = pvm_vfcm.derive_base_npiv_map(
                    vios_wraps, self._fabric_ports(fabric),
                    self._ports_per_fabric())

                # Every loop through, we reverse the vios wrappers.  This is
                # done so that if Fabric A only has 1 port, it goes on the
                # first VIOS.  Then Fabric B would put its port on a different
                # VIOS.  As a form of multi pathing (so that your paths were
                # not restricted to a single VIOS).
                vios_wraps.reverse()

                # Check if the fabrics are unmapped then we need to map it
                # temporarily with the management partition.
                LOG.info(
                    _LI("Adding NPIV Mapping with mgmt partition for "
                        "instance %s") % self.instance.name)
                port_maps = pvm_vfcm.add_npiv_port_mappings(
                    self.adapter, self.host_uuid, mgmt_uuid, port_maps)

                # Set the fabric meta (which indicates on the instance how
                # the fabric is mapped to the physical port) and the fabric
                # state.
                self._set_fabric_meta(fabric, port_maps)
                self._set_fabric_state(fabric, FS_MGMT_MAPPED)
            elif self._is_migration_wwpn(fc_state):
                port_maps = self._configure_wwpns_for_migration(fabric)

                # This won't actually get saved by the process.  The save will
                # only occur after the 'post migration'.  But if there are
                # multiple volumes, their WWPNs calls will subsequently see
                # the data saved temporarily here.
                self._set_fabric_meta(fabric, port_maps)
            else:
                # This specific fabric had been previously set.  Just pull
                # from the meta (as it is likely already mapped to the
                # instance)
                port_maps = self._get_fabric_meta(fabric)

            # Port map is set by either conditional, but may be set to None.
            # If not None, then add the WWPNs to the response.
            if port_maps is not None:
                for mapping in port_maps:
                    # Only add the first WWPN.  That is the one that will be
                    # logged into the fabric.
                    resp_wwpns.append(mapping[1].split()[0])

        # The return object needs to be a list for the volume connector.
        return resp_wwpns
Esempio n. 7
0
    def pre_live_migration_on_destination(self, src_mig_data, dest_mig_data):
        """Perform pre live migration steps for the volume on the target host.

        This method performs any pre live migration that is needed.

        Certain volume connectors may need to pass data from the source host
        to the target.  This may be required to determine how volumes connect
        through the Virtual I/O Servers.

        This method will be called after the pre_live_migration_on_source
        method.  The data from the pre_live call will be passed in via the
        mig_data.  This method should put its output into the dest_mig_data.

        :param src_mig_data: The migration data from the source server.
        :param dest_mig_data: The migration data for the destination server.
                              If the volume connector needs to provide
                              information to the live_migration command, it
                              should be added to this dictionary.
        """
        vios_wraps = self.stg_ftsk.feed
        mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

        # Each mapping should attempt to remove itself from the management
        # partition.
        for fabric in self._fabric_names():
            npiv_port_maps = self._get_fabric_meta(fabric)

            # Need to first derive the port mappings that can be passed back
            # to the source system for the live migration call.  This tells
            # the source system what 'vfc mappings' to pass in on the live
            # migration command.
            slots = src_mig_data['npiv_fabric_slots_%s' % fabric]
            fabric_mapping = pvm_vfcm.build_migration_mappings_for_fabric(
                vios_wraps, self._fabric_ports(fabric), slots)
            dest_mig_data['npiv_fabric_mapping_%s' % fabric] = fabric_mapping

            # Next we need to remove the mappings off the mgmt partition.
            for npiv_port_map in npiv_port_maps:
                ls = [
                    LOG.info,
                    _LI("Removing mgmt NPIV mapping for instance "
                        "%(inst)s for fabric %(fabric)s."), {
                            'inst': self.instance.name,
                            'fabric': fabric
                        }
                ]
                vios_w, vfc_map = pvm_vfcm.find_vios_for_vfc_wwpns(
                    vios_wraps, npiv_port_map[1].split())

                if vios_w is not None:
                    # Add the subtask to remove the mapping from the management
                    # partition.
                    task_wrapper = self.stg_ftsk.wrapper_tasks[vios_w.uuid]
                    task_wrapper.add_functor_subtask(
                        pvm_vfcm.remove_maps,
                        mgmt_uuid,
                        client_adpt=vfc_map.client_adapter,
                        logspec=ls)
                else:
                    LOG.warn(
                        _LW("No storage connections found between the "
                            "Virtual I/O Servers and FC Fabric "
                            "%(fabric)s. The connection might be removed "
                            "already."), {'fabric': fabric})

        # TODO(thorst) Find a better place for this execute.  Works for now
        # as the stg_ftsk is all local.  Also won't do anything if there
        # happen to be no fabric changes.
        self.stg_ftsk.execute()

        # Collate all of the individual fabric mappings into a single element.
        full_map = []
        for key, value in dest_mig_data.items():
            if key.startswith('npiv_fabric_mapping_'):
                full_map.extend(value)
        dest_mig_data['vfc_lpm_mappings'] = full_map
Esempio n. 8
0
    def wwpns(self):
        """Builds the WWPNs of the adapters that will connect the ports."""
        vios_wraps, mgmt_uuid = None, None
        resp_wwpns = []

        # If this is a new mapping altogether, the WWPNs need to be logged
        # into the fabric so that Cinder can make use of them.  This is a bit
        # of a catch-22 because the LPAR doesn't exist yet.  So a mapping will
        # be created against the mgmt partition and then upon VM creation, the
        # mapping will be moved over to the VM.
        #
        # If a mapping already exists, we can instead just pull the data off
        # of the system metadata from the nova instance.
        for fabric in self._fabric_names():
            fc_state = self._get_fabric_state(fabric)
            LOG.info(_LI("NPIV wwpns fabric state=%(st)s for "
                         "instance %(inst)s") %
                     {'st': fc_state, 'inst': self.instance.name})

            if self._is_initial_wwpn(fc_state, fabric):
                # At this point we've determined that we need to do a mapping.
                # So we go and obtain the mgmt uuid and the VIOS wrappers.
                # We only do this for the first loop through so as to ensure
                # that we do not keep invoking these expensive calls
                # unnecessarily.
                if mgmt_uuid is None:
                    mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

                    # The VIOS wrappers are also not set at this point.  Seed
                    # them as well.  Will get reused on subsequent loops.
                    vios_wraps = self.stg_ftsk.feed

                # Derive the virtual to physical port mapping
                port_maps = pvm_vfcm.derive_base_npiv_map(
                    vios_wraps, self._fabric_ports(fabric),
                    self._ports_per_fabric())

                # Every loop through, we reverse the vios wrappers.  This is
                # done so that if Fabric A only has 1 port, it goes on the
                # first VIOS.  Then Fabric B would put its port on a different
                # VIOS.  As a form of multi pathing (so that your paths were
                # not restricted to a single VIOS).
                vios_wraps.reverse()

                # Check if the fabrics are unmapped then we need to map it
                # temporarily with the management partition.
                LOG.info(_LI("Adding NPIV Mapping with mgmt partition for "
                             "instance %s") % self.instance.name)
                port_maps = pvm_vfcm.add_npiv_port_mappings(
                    self.adapter, self.host_uuid, mgmt_uuid, port_maps)

                # Set the fabric meta (which indicates on the instance how
                # the fabric is mapped to the physical port) and the fabric
                # state.
                self._set_fabric_meta(fabric, port_maps)
                self._set_fabric_state(fabric, FS_MGMT_MAPPED)
            elif self._is_migration_wwpn(fc_state):
                port_maps = self._configure_wwpns_for_migration(fabric)

                # This won't actually get saved by the process.  The save will
                # only occur after the 'post migration'.  But if there are
                # multiple volumes, their WWPNs calls will subsequently see
                # the data saved temporarily here.
                self._set_fabric_meta(fabric, port_maps)
            else:
                # This specific fabric had been previously set.  Just pull
                # from the meta (as it is likely already mapped to the
                # instance)
                port_maps = self._get_fabric_meta(fabric)

            # Port map is set by either conditional, but may be set to None.
            # If not None, then add the WWPNs to the response.
            if port_maps is not None:
                for mapping in port_maps:
                    # Only add the first WWPN.  That is the one that will be
                    # logged into the fabric.
                    resp_wwpns.append(mapping[1].split()[0])

        # The return object needs to be a list for the volume connector.
        return resp_wwpns
Esempio n. 9
0
    def pre_live_migration_on_destination(self, src_mig_data, dest_mig_data):
        """Perform pre live migration steps for the volume on the target host.

        This method performs any pre live migration that is needed.

        Certain volume connectors may need to pass data from the source host
        to the target.  This may be required to determine how volumes connect
        through the Virtual I/O Servers.

        This method will be called after the pre_live_migration_on_source
        method.  The data from the pre_live call will be passed in via the
        mig_data.  This method should put its output into the dest_mig_data.

        :param src_mig_data: The migration data from the source server.
        :param dest_mig_data: The migration data for the destination server.
                              If the volume connector needs to provide
                              information to the live_migration command, it
                              should be added to this dictionary.
        """
        vios_wraps = self.stg_ftsk.feed
        mgmt_uuid = mgmt.get_mgmt_partition(self.adapter).uuid

        # Each mapping should attempt to remove itself from the management
        # partition.
        for fabric in self._fabric_names():
            npiv_port_maps = self._get_fabric_meta(fabric)

            # Need to first derive the port mappings that can be passed back
            # to the source system for the live migration call.  This tells
            # the source system what 'vfc mappings' to pass in on the live
            # migration command.
            slots = src_mig_data['npiv_fabric_slots_%s' % fabric]
            fabric_mapping = pvm_vfcm.build_migration_mappings_for_fabric(
                vios_wraps, self._fabric_ports(fabric), slots)
            dest_mig_data['npiv_fabric_mapping_%s' % fabric] = fabric_mapping

            # Next we need to remove the mappings off the mgmt partition.
            for npiv_port_map in npiv_port_maps:
                ls = [LOG.info, _LI("Removing mgmt NPIV mapping for instance "
                                    "%(inst)s for fabric %(fabric)s."),
                      {'inst': self.instance.name, 'fabric': fabric}]
                vios_w, vfc_map = pvm_vfcm.find_vios_for_vfc_wwpns(
                    vios_wraps, npiv_port_map[1].split())

                if vios_w is not None:
                    # Add the subtask to remove the mapping from the management
                    # partition.
                    task_wrapper = self.stg_ftsk.wrapper_tasks[vios_w.uuid]
                    task_wrapper.add_functor_subtask(
                        pvm_vfcm.remove_maps, mgmt_uuid,
                        client_adpt=vfc_map.client_adapter, logspec=ls)
                else:
                    LOG.warn(_LW("No storage connections found between the "
                                 "Virtual I/O Servers and FC Fabric "
                                 "%(fabric)s. The connection might be removed "
                                 "already."), {'fabric': fabric})

        # TODO(thorst) Find a better place for this execute.  Works for now
        # as the stg_ftsk is all local.  Also won't do anything if there
        # happen to be no fabric changes.
        self.stg_ftsk.execute()

        # Collate all of the individual fabric mappings into a single element.
        full_map = []
        for key, value in dest_mig_data.items():
            if key.startswith('npiv_fabric_mapping_'):
                full_map.extend(value)
        dest_mig_data['vfc_lpm_mappings'] = full_map
Esempio n. 10
0
 def test_get_mgmt_partition(self):
     self.apt.read.return_value = self.resp
     mp_wrap = mgmt.get_mgmt_partition(self.apt)
     self.assertIsInstance(mp_wrap, pvm_lpar.LPAR)
     self.assertTrue(mp_wrap.is_mgmt_partition)