Beispiel #1
0
def dlt_lpar(adapter, lpar_uuid):
    """Delete an LPAR

    :param adapter: The adapter for the pypowervm API
    :param lpar_uuid: The lpar to delete
    """
    # Attempt to delete the VM.
    try:
        LOG.info(_LI('Deleting virtual machine. LPARID: %s'), lpar_uuid)

        # Ensure any vterms are closed.  Will no-op otherwise.
        vterm.close_vterm(adapter, lpar_uuid)

        # Run the LPAR delete
        resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
        LOG.info(_LI('Virtual machine delete status: %d'), resp.status)
        return resp
    except pvm_exc.HttpError as e:
        if e.response and e.response.status == 404:
            LOG.warning(_LW('Virtual Machine not found LPAR_ID: %s'),
                        lpar_uuid)
        else:
            LOG.error(_LE('HttpError deleting virtual machine. LPARID: %s'),
                      lpar_uuid)
            raise
    except pvm_exc.Error:
        # Attempting to close vterm did not help so raise exception
        LOG.error(_LE('Virtual machine delete failed: LPARID=%s'), lpar_uuid)
        raise
Beispiel #2
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Unplugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {
                          'inst': self.instance.name,
                          'reason': reason
                      },
                      instance=self.instance)
            raise VirtualInterfaceUnplugException()

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for vif in self.network_info:
            for cna_w in cna_w_list:
                # If the MAC address matched, attempt the delete.
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    LOG.info(_LI('Deleting VIF with mac %(mac)s for instance '
                                 '%(inst)s.'), {
                                     'mac': vif['address'],
                                     'inst': self.instance.name
                                 },
                             instance=self.instance)
                    try:
                        cna_w.delete()
                    except Exception as e:
                        LOG.error(_LE('Unable to unplug VIF with mac %(mac)s '
                                      'for instance %(inst)s.'), {
                                          'mac': vif['address'],
                                          'inst': self.instance.name
                                      },
                                  instance=self.instance)
                        LOG.error(e)
                        raise VirtualInterfaceUnplugException()

                    # Break from the loop as we had a successful unplug.
                    # This prevents from going to 'else' loop.
                    break
            else:
                LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                                'instance %(inst)s.  The VIF was not found on '
                                'the instance.'), {
                                    'mac': vif['address'],
                                    'inst': self.instance.name
                                },
                            instance=self.instance)
        return cna_w_list
Beispiel #3
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Plugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # Get the current adapters on the system
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Trim the VIFs down to the ones that haven't yet been created.
        crt_vifs = []
        for vif in self.network_info:
            for cna_w in cna_w_list:
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    break
            else:
                crt_vifs.append(vif)

        # If there are no vifs to create, then just exit immediately.
        if len(crt_vifs) == 0:
            return []

        # Check to see if the LPAR is OK to add VIFs to.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s.  The '
                          'VM was in a state where VIF plugging is not '
                          'acceptable.  The reason from the system is: '
                          '%(reason)s'),
                      {'sys': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # For the VIFs, run the creates (and wait for the events back)
        try:
            with self.virt_api.wait_for_instance_event(
                    self.instance, self._get_vif_events(),
                    deadline=CONF.vif_plugging_timeout,
                    error_callback=self._vif_callback_failed):
                for vif in crt_vifs:
                    LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
                                 '%(sys)s'),
                             {'mac': vif['address'],
                              'sys': self.instance.name},
                             instance=self.instance)
                    vm.crt_vif(self.adapter, self.instance, self.host_uuid,
                               vif)
        except eventlet.timeout.Timeout:
            LOG.error(_LE('Error waiting for VIF to be created for instance '
                          '%(sys)s'), {'sys': self.instance.name},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # Return the list of created VIFs.
        return cna_w_list
Beispiel #4
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Unplugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {'inst': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise VirtualInterfaceUnplugException()

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for vif in self.network_info:
            for cna_w in cna_w_list:
                # If the MAC address matched, attempt the delete.
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    LOG.info(_LI('Deleting VIF with mac %(mac)s for instance '
                                 '%(inst)s.'), {'mac': vif['address'],
                                                'inst': self.instance.name},
                             instance=self.instance)
                    try:
                        cna_w.delete()
                    except Exception as e:
                        LOG.error(_LE('Unable to unplug VIF with mac %(mac)s '
                                      'for instance %(inst)s.'),
                                  {'mac': vif['address'],
                                   'inst': self.instance.name},
                                  instance=self.instance)
                        LOG.error(e)
                        raise VirtualInterfaceUnplugException()

                    # Break from the loop as we had a successful unplug.
                    # This prevents from going to 'else' loop.
                    break
            else:
                LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                                'instance %(inst)s.  The VIF was not found on '
                                'the instance.'),
                            {'mac': vif['address'],
                             'inst': self.instance.name},
                            instance=self.instance)
        return cna_w_list
Beispiel #5
0
    def live_migration(self, context, migrate_data):
        """Start the live migration.

        :param context: security context
        :param migrate_data: migration data from src and dest host.
        """
        LOG.debug("Starting migration.", instance=self.instance)
        LOG.debug("Migrate data: %s" % migrate_data)

        # Get the vFC and vSCSI live migration mappings
        dest_pre_lm_data = migrate_data.get('pre_live_migration_result', {})
        vfc_mappings = dest_pre_lm_data.get('vfc_lpm_mappings')
        vscsi_mappings = dest_pre_lm_data.get('vscsi_lpm_mappings')

        try:
            # Migrate the LPAR!
            mig.migrate_lpar(self.lpar_w,
                             self.dest_data['dest_sys_name'],
                             validate_only=False,
                             tgt_mgmt_svr=self.dest_data['dest_ip'],
                             tgt_mgmt_usr=self.dest_data.get('dest_user_id'),
                             virtual_fc_mappings=vfc_mappings,
                             virtual_scsi_mappings=vscsi_mappings)

        except Exception:
            LOG.error(_LE("Live migration failed."), instance=self.instance)
            raise
        finally:
            LOG.debug("Finished migration.", instance=self.instance)
    def live_migration(self, context, migrate_data):
        """Start the live migration.

        :param context: security context
        :param migrate_data: migration data from src and dest host.
        """
        LOG.debug("Starting migration.", instance=self.instance)
        LOG.debug("Migrate data: %s" % migrate_data)

        # Get the vFC and vSCSI live migration mappings
        dest_pre_lm_data = migrate_data.get('pre_live_migration_result', {})
        vfc_mappings = dest_pre_lm_data.get('vfc_lpm_mappings')
        vscsi_mappings = dest_pre_lm_data.get('vscsi_lpm_mappings')

        try:
            # Migrate the LPAR!
            mig.migrate_lpar(self.lpar_w, self.dest_data['dest_sys_name'],
                             validate_only=False,
                             tgt_mgmt_svr=self.dest_data['dest_ip'],
                             tgt_mgmt_usr=self.dest_data.get('dest_user_id'),
                             virtual_fc_mappings=vfc_mappings,
                             virtual_scsi_mappings=vscsi_mappings)

        except Exception:
            LOG.error(_LE("Live migration failed."), instance=self.instance)
            raise
        finally:
            LOG.debug("Finished migration.", instance=self.instance)
Beispiel #7
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is None:
                LOG.error(_LE("Mappings were not able to find a proper VIOS. "
                              "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id, instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))

            ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
                                "for Virtual I/O Server %(vios)s."),
                  {'inst': self.instance.name, 'vios': vios_w.name}]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_state, name='fab_%s_%s' % (fabric, volume_id)))
Beispiel #8
0
def _push_vif_event(adapter, action, vif_w, instance, vif_type):
    """Push a custom event to the REST server for a vif action (plug/unplug).

    This event prompts the neutron agent to mark the port up or down.

    :param adapter: The pypowervm adapter.
    :param action: The action taken on the vif - either 'plug' or 'unplug'
    :param vif_w: The pypowervm wrapper of the affected vif (CNA, VNIC, etc.)
    :param instance: The nova instance for the event
    :param vif_type: The type of event source (pvm_sea, ovs, bridge,
                     pvm_sriov etc)
    """
    data = vif_w.href
    detail = jsonutils.dumps(
        dict(provider=EVENT_PROVIDER_ID,
             action=action,
             mac=vif_w.mac,
             type=vif_type))
    event = pvm_evt.Event.bld(adapter, data, detail)
    try:
        event = event.create()
        LOG.debug(_LI('Pushed custom event for consumption by neutron agent: '
                      '%s'),
                  str(event),
                  instance=instance)
    except Exception:
        LOG.error(_LE('Custom VIF event push failed.  %s'),
                  str(event),
                  instance=instance)
        raise
Beispiel #9
0
    def unplug(self, vif, cna_w_list=None):
        """Unplugs a virtual interface (network) from a VM.

        :param vif: The virtual interface to plug into the instance.
        :param cna_w_list: (Optional, Default: None) The list of Client Network
                           Adapters from pypowervm.  Providing this input
                           allows for an improvement in operation speed.
        :return cna_w: The deleted Client Network Adapter.
        """
        # This is a default implementation that most implementations will
        # require.

        # Need to find the adapters if they were not provided
        if not cna_w_list:
            cna_w_list = vm.get_cnas(self.adapter, self.instance)

        cna_w = self._find_cna_for_vif(cna_w_list, vif)
        if not cna_w:
            LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                            'instance %(inst)s.  The VIF was not found on '
                            'the instance.'),
                        {'mac': vif['address'], 'inst': self.instance.name})
            return None

        LOG.info(_LI('Deleting VIF with mac %(mac)s for instance %(inst)s.'),
                 {'mac': vif['address'], 'inst': self.instance.name})
        try:
            cna_w.delete()
        except Exception as e:
            LOG.error(_LE('Unable to unplug VIF with mac %(mac)s for instance '
                          '%(inst)s.'), {'mac': vif['address'],
                                         'inst': self.instance.name})
            LOG.exception(e)
            raise VirtualInterfaceUnplugException()
        return cna_w
Beispiel #10
0
    def live_migration(self, context, migrate_data):
        """Start the live migration.

        :param context: security context
        :param migrate_data: a PowerVMLiveMigrateData object
        """
        LOG.debug("Starting migration.", instance=self.instance)
        LOG.debug("Migrate data: %s" % migrate_data)

        # The passed in mig data has more info (dest data added), so replace
        self.mig_data = migrate_data
        # Get the vFC and vSCSI live migration mappings
        vol_data = migrate_data.vol_data
        vfc_mappings = vol_data.get('vfc_lpm_mappings')
        if vfc_mappings is not None:
            vfc_mappings = jsonutils.loads(vfc_mappings)
        vscsi_mappings = vol_data.get('vscsi_lpm_mappings')
        if vscsi_mappings is not None:
            vscsi_mappings = jsonutils.loads(vscsi_mappings)

        try:
            # Migrate the LPAR!
            mig.migrate_lpar(
                self.lpar_w, self.mig_data.dest_sys_name,
                validate_only=False, tgt_mgmt_svr=self.mig_data.dest_ip,
                tgt_mgmt_usr=self.mig_data.dest_user_id,
                virtual_fc_mappings=vfc_mappings,
                virtual_scsi_mappings=vscsi_mappings,
                sdn_override=True, vlan_check_override=True)

        except Exception:
            LOG.error(_LE("Live migration failed."), instance=self.instance)
            raise
        finally:
            LOG.debug("Finished migration.", instance=self.instance)
Beispiel #11
0
 def _vif_callback_failed(self, event_name, instance):
     LOG.error(
         _LE('VIF Plug failure for callback on event '
             '%(event)s for instance %(uuid)s'), {
                 'event': event_name,
                 'uuid': instance.uuid
             })
     if CONF.vif_plugging_is_fatal:
         raise exception.VirtualInterfaceCreateException()
 def migration_abort(self):
     """Abort the migration if the operation exceeds the configured timeout.
     """
     LOG.debug("Abort migration.", instance=self.instance)
     try:
         mig.migrate_abort(self.lpar_w)
     except Exception as ex:
         LOG.error(_LE("Abort of live migration has failed. "
                       "This is non-blocking. "
                       "Exception is logged below."))
         LOG.exception(ex)
Beispiel #13
0
    def execute_impl(self):
        if self.nvram_mgr is None:
            return

        try:
            self.nvram_mgr.store(self.instance, immediate=self.immediate)
        except Exception as e:
            LOG.exception(_LE('Unable to store NVRAM for instance '
                              '%(name)s. Exception: %(reason)s'),
                          {'name': self.instance.name,
                           'reason': six.text_type(e)},
                          instance=self.instance)
Beispiel #14
0
    def execute_impl(self):
        if self.nvram_mgr is None:
            return

        try:
            self.nvram_mgr.store(self.instance, immediate=self.immediate)
        except Exception as e:
            LOG.exception(_LE('Unable to store NVRAM for instance '
                              '%(name)s. Exception: %(reason)s'),
                          {'name': self.instance.name,
                           'reason': six.text_type(e)},
                          instance=self.instance)
Beispiel #15
0
    def live_migration(self, context, migrate_data):
        """Start the live migration.

        :param context: security context
        :param migrate_data: a PowerVMLiveMigrateData object
        """
        LOG.debug("Starting migration.", instance=self.instance)
        LOG.debug("Migrate data: %s", migrate_data, instance=self.instance)

        # The passed in mig data has more info (dest data added), so replace
        self.mig_data = migrate_data

        # Get the vFC and vSCSI live migration mappings
        vol_data = migrate_data.vol_data
        vfc_mappings = vol_data.get('vfc_lpm_mappings')
        if vfc_mappings is not None:
            vfc_mappings = jsonutils.loads(vfc_mappings)
        vscsi_mappings = vol_data.get('vscsi_lpm_mappings')
        if vscsi_mappings is not None:
            vscsi_mappings = jsonutils.loads(vscsi_mappings)

        # Run the pre-live migration on the network objects
        network_infos = self.instance.info_cache.network_info
        trunks_to_del = []
        for network_info in network_infos:
            trunks_to_del.extend(vif.pre_live_migrate_at_source(
                self.drvr.adapter, self.drvr.host_uuid, self.instance,
                network_info))

        # Convert the network mappings into something the API can understand.
        vlan_mappings = self._convert_nl_io_mappings(
            migrate_data.vea_vlan_mappings)

        try:
            # Migrate the LPAR!
            mig.migrate_lpar(
                self.lpar_w, self.mig_data.dest_sys_name,
                validate_only=False, tgt_mgmt_svr=self.mig_data.dest_ip,
                tgt_mgmt_usr=self.mig_data.dest_user_id,
                virtual_fc_mappings=vfc_mappings,
                virtual_scsi_mappings=vscsi_mappings,
                vlan_mappings=vlan_mappings, sdn_override=True,
                vlan_check_override=True)

            # Delete the source side network trunk adapters
            for trunk_to_del in trunks_to_del:
                trunk_to_del.delete()
        except Exception:
            LOG.error(_LE("Live migration failed."), instance=self.instance)
            raise
        finally:
            LOG.debug("Finished migration.", instance=self.instance)
Beispiel #16
0
    def migration_abort(self):
        """Abort the migration.

        Invoked if the operation exceeds the configured timeout.
        """
        LOG.debug("Abort migration.", instance=self.instance)
        try:
            mig.migrate_abort(self.lpar_w)
        except Exception as ex:
            LOG.error(_LE("Abort of live migration has failed. "
                          "This is non-blocking. Exception is logged below."),
                      instance=self.instance)
            LOG.exception(ex, instance=self.instance)
Beispiel #17
0
    def fetch(self, instance):
        """Fetch the NVRAM for an instance.

        :param instance: The instance to fetch the NVRAM for.
        :returns: The NVRAM data for the instance.
        """
        try:
            return self._api.fetch(instance)
        except Exception as e:
            LOG.exception(_LE('Could not update NVRAM: %s'), e,
                          instance=instance)
            raise api.NVRAMDownloadException(instance=instance.name,
                                             reason=six.text_type(e))
Beispiel #18
0
    def fetch(self, instance):
        """Fetch the NVRAM for an instance.

        :param instance: The instance to fetch the NVRAM for.
        :returns: The NVRAM data for the instance.
        """
        try:
            return self._api.fetch(instance)
        except Exception as e:
            LOG.exception(_LE('Could not update NVRAM: %s'),
                          e,
                          instance=instance)
            raise api.NVRAMDownloadException(instance=instance.name,
                                             reason=six.text_type(e))
Beispiel #19
0
    def execute_impl(self):
        if self.nvram_mgr is None:
            LOG.info(_LI("No op for NVRAM delete."), instance=self.instance)
            return

        LOG.info(_LI('Deleting NVRAM for instance: %s'),
                 self.instance.name, instance=self.instance)
        try:
            self.nvram_mgr.remove(self.instance)
        except Exception as e:
            LOG.exception(_LE('Unable to delete NVRAM for instance '
                              '%(name)s. Exception: %(reason)s'),
                          {'name': self.instance.name,
                           'reason': six.text_type(e)},
                          instance=self.instance)
Beispiel #20
0
    def execute_impl(self):
        if self.nvram_mgr is None:
            LOG.info(_LI("No op for NVRAM delete."), instance=self.instance)
            return

        LOG.info(_LI('Deleting NVRAM for instance: %s'),
                 self.instance.name, instance=self.instance)
        try:
            self.nvram_mgr.remove(self.instance)
        except Exception as e:
            LOG.exception(_LE('Unable to delete NVRAM for instance '
                              '%(name)s. Exception: %(reason)s'),
                          {'name': self.instance.name,
                           'reason': six.text_type(e)},
                          instance=self.instance)
Beispiel #21
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is None:
                LOG.error(
                    _LE("Mappings were not able to find a proper VIOS. "
                        "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id,
                    instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))

            ls = [
                LOG.info,
                _LI("Adding NPIV mapping for instance %(inst)s "
                    "for Virtual I/O Server %(vios)s."), {
                        'inst': self.instance.name,
                        'vios': vios_w.name
                    }
            ]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
Beispiel #22
0
    def check_instance_shared_storage_remote(self, context, data):
        """Check if instance files located on shared storage.

        :param context: security context
        :param data: result of check_instance_shared_storage_local
        """

        # Check the data passed and see if we're in the same SSP
        try:
            if data:
                ssp_uuid = data.get('ssp_uuid')
                if ssp_uuid is not None:
                    return ssp_uuid == self._cluster.ssp_uuid
        except Exception as e:
            LOG.exception(_LE(u'Error checking for shared storage. '
                              'exception=%s'), e)
        return False
Beispiel #23
0
    def unplug(self, vif, cna_w_list=None):
        """Unplugs a virtual interface (network) from a VM.

        :param vif: The virtual interface to plug into the instance.
        :param cna_w_list: (Optional, Default: None) The list of Client Network
                           Adapters from pypowervm.  Providing this input
                           allows for an improvement in operation speed.
        :return cna_w: The deleted Client Network Adapter.
        """
        # This is a default implementation that most implementations will
        # require.

        # Need to find the adapters if they were not provided
        if not cna_w_list:
            cna_w_list = vm.get_cnas(self.adapter, self.instance)

        cna_w = self._find_cna_for_vif(cna_w_list, vif)
        if not cna_w:
            LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
                            'instance %(inst)s.  The VIF was not found on '
                            'the instance.'), {
                                'mac': vif['address'],
                                'inst': self.instance.name
                            },
                        instance=self.instance)
            return None

        LOG.info(_LI('Deleting VIF with mac %(mac)s for instance %(inst)s.'), {
            'mac': vif['address'],
            'inst': self.instance.name
        },
                 instance=self.instance)
        try:
            cna_w.delete()
        except Exception as e:
            LOG.error(_LE('Unable to unplug VIF with mac %(mac)s for instance '
                          '%(inst)s.'), {
                              'mac': vif['address'],
                              'inst': self.instance.name
                          },
                      instance=self.instance)
            LOG.exception(e, instance=self.instance)
            raise exception.VirtualInterfaceUnplugException(
                reason=six.text_type(e))
        return cna_w
Beispiel #24
0
    def check_instance_shared_storage_remote(self, context, data):
        """Check if instance files located on shared storage.

        :param context: security context
        :param data: result of check_instance_shared_storage_local
        """

        # Check the data passed and see if we're in the same SSP
        try:
            if data:
                ssp_uuid = data.get('ssp_uuid')
                if ssp_uuid is not None:
                    return ssp_uuid == self._cluster.ssp_uuid
        except Exception as e:
            LOG.exception(
                _LE(u'Error checking for shared storage. '
                    'exception=%s'), e)
        return False
Beispiel #25
0
    def rollback_live_migration(self, context):
        """Roll back a failed migration.

        :param context: security context
        """
        LOG.debug("Rollback live migration.", instance=self.instance)
        # If an error happened then let's try to recover
        # In most cases the recovery will happen automatically, but if it
        # doesn't, then force it.
        try:
            self.lpar_w.refresh()
            if self.lpar_w.migration_state != 'Not_Migrating':
                self.migration_recover()

        except Exception as ex:
            LOG.error(_LE("Migration recover failed with error: %s"), ex,
                      instance=self.instance)
        finally:
            LOG.debug("Finished migration rollback.", instance=self.instance)
Beispiel #26
0
    def rollback_live_migration(self, context):
        """Roll back a failed migration.

        :param context: security context
        """
        LOG.debug("Rollback live migration.", instance=self.instance)
        # If an error happened then let's try to recover
        # In most cases the recovery will happen automatically, but if it
        # doesn't, then force it.
        try:
            self.lpar_w.refresh()
            if self.lpar_w.migration_state != 'Not_Migrating':
                self.migration_recover()

        except Exception as ex:
            LOG.error(_LE("Migration recover failed with error: %s"), ex,
                      instance=self.instance)
        finally:
            LOG.debug("Finished migration rollback.", instance=self.instance)
Beispiel #27
0
        def _extend():
            # Get the volume group
            vg_wrap = self._get_vg_wrap()
            # Find the disk by name
            vdisks = vg_wrap.virtual_disks
            disk_found = None
            for vdisk in vdisks:
                if vdisk.name == vol_name:
                    disk_found = vdisk
                    break

            if not disk_found:
                LOG.error(_LE("Disk %s not found during resize."), vol_name, instance=instance)
                raise nova_exc.DiskNotFound(location=self.vg_name + "/" + vol_name)

            # Set the new size
            disk_found.capacity = size

            # Post it to the VIOS
            vg_wrap.update()
Beispiel #28
0
    def execute_impl(self, lpar_wrap):
        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {'inst': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise exception.VirtualInterfaceUnplugException(reason=reason)

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for network_info in self.network_infos:
            vif.unplug(self.adapter, self.host_uuid, self.instance,
                       network_info, self.slot_mgr, cna_w_list=cna_w_list)

        return cna_w_list
Beispiel #29
0
    def execute_impl(self, lpar_wrap):
        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {'inst': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise VirtualInterfaceUnplugException()

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for network_info in self.network_infos:
            vif.unplug(self.adapter, self.host_uuid, self.instance,
                       network_info, self.slot_mgr, cna_w_list=cna_w_list)

        return cna_w_list
Beispiel #30
0
def dlt_lpar(adapter, lpar_uuid):
    """Delete an LPAR

    :param adapter: The adapter for the pypowervm API
    :param lpar_uuid: The lpar to delete
    """
    # Attempt to delete the VM. If delete fails because of vterm
    # we will close the vterm and try the delete again
    try:
        LOG.info(_LI('Deleting virtual machine. LPARID: %s'), lpar_uuid)
        # Ensure any vterms are closed.  Will no-op otherwise.
        vterm.close_vterm(adapter, lpar_uuid)

        # Run the LPAR delete
        resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
        LOG.info(_LI('Virtual machine delete status: %d'), resp.status)
        return resp
    except pvm_exc.Error:
        # Attempting to close vterm did not help so raise exception
        LOG.error(_LE('Virtual machine delete failed: LPARID=%s'),
                  lpar_uuid)
        raise
Beispiel #31
0
def dlt_lpar(adapter, lpar_uuid):
    """Delete an LPAR

    :param adapter: The adapter for the pypowervm API
    :param lpar_uuid: The lpar to delete
    """
    # Attempt to delete the VM. If delete fails because of vterm
    # we will close the vterm and try the delete again
    try:
        LOG.info(_LI('Deleting virtual machine. LPARID: %s'), lpar_uuid)
        # Ensure any vterms are closed.  Will no-op otherwise.
        vterm.close_vterm(adapter, lpar_uuid)

        # Run the LPAR delete
        resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
        LOG.info(_LI('Virtual machine delete status: %d'), resp.status)
        return resp
    except pvm_exc.Error:
        # Attempting to close vterm did not help so raise exception
        LOG.error(_LE('Virtual machine delete failed: LPARID=%s'),
                  lpar_uuid)
        raise
Beispiel #32
0
        def _extend():
            # Get the volume group
            vg_wrap = self._get_vg_wrap()
            # Find the disk by name
            vdisks = vg_wrap.virtual_disks
            disk_found = None
            for vdisk in vdisks:
                if vdisk.name == vol_name:
                    disk_found = vdisk
                    break

            if not disk_found:
                LOG.error(_LE('Disk %s not found during resize.'), vol_name,
                          instance=instance)
                raise nova_exc.DiskNotFound(
                    location=self.vg_name + '/' + vol_name)

            # Set the new size
            disk_found.capacity = size

            # Post it to the VIOS
            vg_wrap.update()
Beispiel #33
0
    def _update_nvram(self, instance=None):
        """Perform an update of NVRAM for instance.

        :param instance: The instance to update or if not specified pull the
                         next one off the list to update.
        """
        if instance is None:
            uuid, instance = self._pop_from_list()
            if uuid is None:
                return
        else:
            # Remove any pending updates
            self._pop_from_list(uuid=instance.uuid)

        try:
            LOG.debug('Updating NVRAM for instance: %s', instance.uuid)
            data = self._get_data(instance)
            if data is not None:
                self._api.store(instance, data)
        except Exception as e:
            # Update exceptions should not end the operation.
            LOG.exception(_LE('Could not update NVRAM: %s'), e,
                          instance=instance)
Beispiel #34
0
    def _update_nvram(self, instance=None):
        """Perform an update of NVRAM for instance.

        :param instance: The instance to update or if not specified pull the
                         next one off the list to update.
        """
        if instance is None:
            uuid, instance = self._pop_from_list()
            if uuid is None:
                return
        else:
            # Remove any pending updates
            self._pop_from_list(uuid=instance.uuid)

        try:
            LOG.debug('Updating NVRAM for instance: %s', instance.uuid)
            data = self._get_data(instance)
            if data is not None:
                self._api.store(instance, data)
        except Exception as e:
            # Update exceptions should not end the operation.
            LOG.exception(_LE('Could not update NVRAM: %s'),
                          e,
                          instance=instance)
Beispiel #35
0
    def _add_maps_for_fabric(self, fabric, slot_mgr):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is attached to the VM
        """
        vios_wraps = self.stg_ftsk.feed
        # Ensure the physical ports in the metadata are not for a different
        # host (stale). If so, rebuild the maps with current info.
        npiv_port_maps = self._ensure_phys_ports_for_system(
            self._get_fabric_meta(fabric), vios_wraps, fabric)
        volume_id = self.connection_info['serial']

        # This loop adds the maps from the appropriate VIOS to the client VM
        slot_ids = copy.deepcopy(slot_mgr.build_map.get_vfc_slots(
            fabric, len(npiv_port_maps)))
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            if vios_w is None:
                LOG.error(_LE("Mappings were not able to find a proper VIOS. "
                              "The port mappings were %s."), npiv_port_maps,
                          instance=self.instance)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id, instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))
            ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
                                "for Virtual I/O Server %(vios)s."),
                  {'inst': self.instance.name, 'vios': vios_w.name}]

            # Add the subtask to add the specific map.
            slot_num = slot_ids.pop()
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map,
                lpar_slot_num=slot_num, logspec=ls)

        # Store the client slot number for the NPIV mapping (for rebuild
        # scenarios)
        def set_vol_meta():
            vios_wraps = self.stg_ftsk.feed
            port_maps = self._get_fabric_meta(fabric)
            for port_map in port_maps:
                # The port map is [ 'phys_wwpn', 'client_wwpn1 client_wwpn2' ]
                # We only need one of the two client wwpns.
                vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, port_map)
                c_wwpns = port_map[1].split()
                vfc_mapping = pvm_c_stor.c_wwpn_to_vfc_mapping(vios_w,
                                                               c_wwpns[0])

                # If there is no mapping, then don't add it.  It means that
                # the client WWPN is hosted on a different VIOS.
                if vfc_mapping is None:
                    continue

                # However, by this point we know that it is hosted on this
                # VIOS.  So the vfc_mapping will have the client adapter
                slot_mgr.register_vfc_mapping(vfc_mapping, fabric)

        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_vol_meta, name='fab_slot_%s_%s' % (fabric, volume_id)))

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_state, name='fab_%s_%s' % (fabric, volume_id)))
Beispiel #36
0
 def _vif_callback_failed(self, event_name, instance):
     LOG.error(_LE('VIF Plug failure for callback on event '
                   '%(event)s for instance %(uuid)s'),
               {'event': event_name, 'uuid': instance.uuid})
     if CONF.vif_plugging_is_fatal:
         raise exception.VirtualInterfaceCreateException()
Beispiel #37
0
    def execute_impl(self, lpar_wrap):
        # Get the current adapters on the system
        cna_w_list = vm.get_cnas(self.adapter, self.instance)

        # Trim the VIFs down to the ones that haven't yet been created.
        crt_network_infos = []
        for network_info in self.network_infos:
            for cna_w in cna_w_list:
                if vm.norm_mac(cna_w.mac) == network_info['address']:
                    break
            else:
                crt_network_infos.append(network_info)

        # If there are no vifs to create, then just exit immediately.
        if len(crt_network_infos) == 0:
            return []

        # Check to see if the LPAR is OK to add VIFs to.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s.  The '
                          'VM was in a state where VIF plugging is not '
                          'acceptable.  The reason from the system is: '
                          '%(reason)s'),
                      {'sys': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # TODO(KYLEH): We're setting up to wait for an instance event.  The
        # event needs to come back to our compute manager so we need to ensure
        # the instance.host is set to our host.  We shouldn't need to do this
        # but in the evacuate/recreate case it may reflect the old host.
        # See: https://bugs.launchpad.net/nova/+bug/1535918
        undo_host_change = False
        if self.instance.host != CONF.host:
            LOG.warning(_LW('Instance was not assigned to this host. '
                            'It was assigned to: %s'), self.instance.host,
                        instance=self.instance)
            # Update the instance...
            old_host = self.instance.host
            self.instance.host = CONF.host
            self.instance.save()
            undo_host_change = True

        # For the VIFs, run the creates (and wait for the events back)
        try:
            with self.virt_api.wait_for_instance_event(
                    self.instance, self._get_vif_events(),
                    deadline=CONF.vif_plugging_timeout,
                    error_callback=self._vif_callback_failed):
                for network_info in crt_network_infos:
                    LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
                                 '%(sys)s'),
                             {'mac': network_info['address'],
                              'sys': self.instance.name},
                             instance=self.instance)
                    vif.plug(self.adapter, self.host_uuid, self.instance,
                             network_info, self.slot_mgr)
        except eventlet.timeout.Timeout:
            LOG.error(_LE('Error waiting for VIF to be created for instance '
                          '%(sys)s'), {'sys': self.instance.name},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()
        finally:
            if undo_host_change:
                LOG.info(_LI('Undoing temporary host assignment to instance.'),
                         instance=self.instance)
                self.instance.host = old_host
                self.instance.save()

        # Return the list of created VIFs.
        return cna_w_list
Beispiel #38
0
    def execute_impl(self, lpar_wrap):
        # We will have two types of network infos.  One is for newly created
        # vifs.  The others are those that exist, but should be re-'treated'
        for network_info in self.network_infos:
            if self._vif_exists(network_info):
                self.update_network_infos.append(network_info)
            else:
                self.crt_network_infos.append(network_info)

        # If there are no vifs to create or update, then just exit immediately.
        if not self.crt_network_infos and not self.update_network_infos:
            return []

        # Check to see if the LPAR is OK to add VIFs to.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable and self.crt_network_infos:
            LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s.  The '
                          'VM was in a state where VIF plugging is not '
                          'acceptable.  The reason from the system is: '
                          '%(reason)s'),
                      {'sys': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # TODO(KYLEH): We're setting up to wait for an instance event.  The
        # event needs to come back to our compute manager so we need to ensure
        # the instance.host is set to our host.  We shouldn't need to do this
        # but in the evacuate/recreate case it may reflect the old host.
        # See: https://bugs.launchpad.net/nova/+bug/1535918
        undo_host_change = False
        if self.instance.host != CONF.host:
            LOG.warning(_LW('Instance was not assigned to this host. '
                            'It was assigned to: %s'), self.instance.host,
                        instance=self.instance)
            # Update the instance...
            old_host = self.instance.host
            self.instance.host = CONF.host
            self.instance.save()
            undo_host_change = True

        # For existing VIFs that we just need to update, run the plug but do
        # not wait for the neutron event as that likely won't be sent (it was
        # already done).
        for network_info in self.update_network_infos:
            LOG.info(_LI("Updating VIF with mac %(mac)s for instance %(sys)s"),
                     {'mac': network_info['address'],
                      'sys': self.instance.name}, instance=self.instance)
            vif.plug(self.adapter, self.host_uuid, self.instance,
                     network_info, self.slot_mgr, new_vif=False)

        # For the VIFs, run the creates (and wait for the events back)
        try:
            with self.virt_api.wait_for_instance_event(
                    self.instance, self._get_vif_events(),
                    deadline=CONF.vif_plugging_timeout,
                    error_callback=self._vif_callback_failed):
                for network_info in self.crt_network_infos:
                    LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
                                 '%(sys)s'),
                             {'mac': network_info['address'],
                              'sys': self.instance.name},
                             instance=self.instance)
                    new_vif = vif.plug(
                        self.adapter, self.host_uuid, self.instance,
                        network_info, self.slot_mgr, new_vif=True)
                    if self.cnas is not None and isinstance(new_vif,
                                                            pvm_net.CNA):
                        self.cnas.append(new_vif)
        except eventlet.timeout.Timeout:
            LOG.error(_LE('Error waiting for VIF to be created for instance '
                          '%(sys)s'), {'sys': self.instance.name},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()
        finally:
            if undo_host_change:
                LOG.info(_LI('Undoing temporary host assignment to instance.'),
                         instance=self.instance)
                self.instance.host = old_host
                self.instance.save()

        return self.cnas
Beispiel #39
0
    def _disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()

                if udid:
                    # This will only work if vios_w has the Storage XAG.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # If we have a device name, but not a udid, at this point
                    # we should not continue.  The hdisk is in a bad state
                    # in the I/O Server.  Subsequent scrub code on future
                    # deploys will clean this up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(_LW(
                            "Disconnect Volume: The backing hdisk for volume "
                            "%(volume_id)s on Virtual I/O Server %(vios)s is "
                            "not in a valid state.  This may be the result of "
                            "an evacuate."), {
                                'volume_id': self.volume_id,
                                'vios': vios_w.name
                            },
                                    instance=self.instance)
                        return False

            except Exception as e:
                LOG.warning(_LW(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.  Error: %(error)s"), {
                        'error': e,
                        'volume_uid': udid,
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id
                    },
                            instance=self.instance)
                return False

            # We have found the device name
            LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
                         "on Virtual I/O Server %(vios_name)s for volume "
                         "%(volume_id)s.  Volume UDID: %(volume_uid)s."), {
                             'volume_uid': udid,
                             'volume_id': self.volume_id,
                             'vios_name': vios_w.name,
                             'hdisk': device_name
                         },
                     instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio',
                pvm_vios.VIOS.getter(self.adapter,
                                     xag=[pvm_const.XAG.VIO_STOR]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(discon_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warning(_LW("Disconnect Volume: Failed to disconnect the "
                                "volume %(volume_id)s on ANY of the Virtual "
                                "I/O Servers for instance %(inst)s."), {
                                    'inst': self.instance.name,
                                    'volume_id': self.volume_id
                                },
                            instance=self.instance)

        except Exception as e:
            LOG.error(_LE('Cannot detach volumes from virtual machine'),
                      instance=self.instance)
            LOG.exception(_LE('Error: %s'), e)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': six.text_type(e),
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeDetachFailed(**ex_args)
Beispiel #40
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Plugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # Get the current adapters on the system
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Trim the VIFs down to the ones that haven't yet been created.
        crt_vifs = []
        for vif in self.network_info:
            for cna_w in cna_w_list:
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    break
            else:
                crt_vifs.append(vif)

        # If there are no vifs to create, then just exit immediately.
        if len(crt_vifs) == 0:
            return []

        # Check to see if the LPAR is OK to add VIFs to.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s.  The '
                          'VM was in a state where VIF plugging is not '
                          'acceptable.  The reason from the system is: '
                          '%(reason)s'), {
                              'sys': self.instance.name,
                              'reason': reason
                          },
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # TODO(KYLEH): We're setting up to wait for an instance event.  The
        # event needs to come back to our compute manager so we need to ensure
        # the instance.host is set to our host.  We shouldn't need to do this
        # but in the evacuate/recreate case it may reflect the old host.
        # See: https://bugs.launchpad.net/nova/+bug/1535918
        undo_host_change = False
        if self.instance.host != CONF.host:
            LOG.warning(_LW('Instance was not assigned to this host. '
                            'It was assigned to: %s'),
                        self.instance.host,
                        instance=self.instance)
            # Update the instance...
            old_host = self.instance.host
            self.instance.host = CONF.host
            self.instance.save()
            undo_host_change = True

        # For the VIFs, run the creates (and wait for the events back)
        try:
            with self.virt_api.wait_for_instance_event(
                    self.instance,
                    self._get_vif_events(),
                    deadline=CONF.vif_plugging_timeout,
                    error_callback=self._vif_callback_failed):
                for vif in crt_vifs:
                    LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
                                 '%(sys)s'), {
                                     'mac': vif['address'],
                                     'sys': self.instance.name
                                 },
                             instance=self.instance)
                    vm.crt_vif(self.adapter, self.instance, self.host_uuid,
                               vif)
        except eventlet.timeout.Timeout:
            LOG.error(_LE('Error waiting for VIF to be created for instance '
                          '%(sys)s'), {'sys': self.instance.name},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()
        finally:
            if undo_host_change:
                LOG.info(_LI('Undoing temporary host assignment to instance.'),
                         instance=self.instance)
                self.instance.host = old_host
                self.instance.save()

        # Return the list of created VIFs.
        return cna_w_list
Beispiel #41
0
    def _add_maps_for_fabric(self, fabric, slot_mgr):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is attached to the VM
        """
        vios_wraps = self.stg_ftsk.feed
        # Ensure the physical ports in the metadata are not for a different
        # host (stale). If so, rebuild the maps with current info.
        npiv_port_maps = self._ensure_phys_ports_for_system(
            self._get_fabric_meta(fabric), vios_wraps, fabric)
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        slot_ids = copy.deepcopy(slot_mgr.build_map.get_vfc_slots(
            fabric, len(npiv_port_maps)))
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            if vios_w is None:
                LOG.error(_LE("Mappings were not able to find a proper VIOS. "
                              "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id, instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))
            ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
                                "for Virtual I/O Server %(vios)s."),
                  {'inst': self.instance.name, 'vios': vios_w.name}]

            # Add the subtask to add the specific map.
            slot_num = slot_ids.pop()
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map,
                lpar_slot_num=slot_num, logspec=ls)

        # Store the client slot number for the NPIV mapping (for rebuild
        # scenarios)
        def set_vol_meta():
            vios_wraps = self.stg_ftsk.feed
            port_maps = self._get_fabric_meta(fabric)
            for port_map in port_maps:
                # The port map is [ 'phys_wwpn', 'client_wwpn1 client_wwpn2' ]
                # We only need one of the two client wwpns.
                vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, port_map)
                c_wwpns = port_map[1].split()
                vfc_mapping = pvm_c_stor.c_wwpn_to_vfc_mapping(vios_w,
                                                               c_wwpns[0])

                # If there is no mapping, then don't add it.  It means that
                # the client WWPN is hosted on a different VIOS.
                if vfc_mapping is None:
                    continue

                # However, by this point we know that it is hosted on this
                # VIOS.  So the vfc_mapping will have the client adapter
                slot_mgr.register_vfc_mapping(vfc_mapping, fabric)

        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_vol_meta, name='fab_slot_%s_%s' % (fabric, volume_id)))

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_state, name='fab_%s_%s' % (fabric, volume_id)))
Beispiel #42
0
    def _disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            udid, device_name = None, None
            try:
                udid = self._get_udid()

                if udid:
                    # This will only work if vios_w has the Storage XAG.
                    device_name = vios_w.hdisk_from_uuid(udid)

                if not udid or not device_name:
                    # We lost our bdm data. We'll need to discover it.
                    status, device_name, udid = self._discover_volume_on_vios(
                        vios_w, self.volume_id)

                    # If we have a device name, but not a udid, at this point
                    # we should not continue.  The hdisk is in a bad state
                    # in the I/O Server.  Subsequent scrub code on future
                    # deploys will clean this up.
                    if not hdisk.good_discovery(status, device_name):
                        LOG.warning(_LW(
                            "Disconnect Volume: The backing hdisk for volume "
                            "%(volume_id)s on Virtual I/O Server %(vios)s is "
                            "not in a valid state.  No disconnect "
                            "actions to be taken as volume is not healthy."),
                            {'volume_id': self.volume_id, 'vios': vios_w.name})
                        return False

            except Exception as e:
                LOG.warning(_LW(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s. Volume "
                    "UDID: %(volume_uid)s.  Error: %(error)s"),
                    {'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
                     'volume_id': self.volume_id})
                return False

            # We have found the device name
            LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
                         "on Virtual I/O Server %(vios_name)s for volume "
                         "%(volume_id)s.  Volume UDID: %(volume_uid)s."),
                     {'volume_uid': udid, 'volume_id': self.volume_id,
                      'vios_name': vios_w.name, 'hdisk': device_name})

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)

                # Add a step to also remove the hdisk
                self._add_remove_hdisk(vios_w, device_name)

            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio', pvm_vios.VIOS.getter(
                    self.adapter, xag=[pvm_const.XAG.VIO_STOR]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(
                discon_vol_for_vio, provides='vio_modified', flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([result['vio_modified']
                        for result in ret['wrapper_task_rets'].values()]):
                LOG.warning(_LW("Disconnect Volume: Failed to disconnect the "
                                "volume %(volume_id)s on ANY of the Virtual "
                                "I/O Servers for instance %(inst)s."),
                            {'inst': self.instance.name,
                             'volume_id': self.volume_id})

        except Exception as e:
            LOG.error(_LE('Cannot detach volumes from virtual machine: %s'),
                      self.vm_uuid)
            LOG.exception(_LE('Error: %s'), e)
            ex_args = {'volume_id': self.volume_id, 'reason': six.text_type(e),
                       'instance_name': self.instance.name}
            raise p_exc.VolumeDetachFailed(**ex_args)
Beispiel #43
0
    def _disconnect_volume(self, slot_mgr):
        """Disconnect the volume.

        This is the actual method to implement within the subclass.  Some
        transaction maintenance is done by the parent class.

        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM
        """
        def discon_vol_for_vio(vios_w):
            """Removes the volume from a specific Virtual I/O Server.

            :param vios_w: The VIOS wrapper.
            :return: True if a remove action was done against this VIOS.  False
                     otherwise.
            """
            LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            device_name = None
            try:
                device_name = self._get_devname()

                if not device_name:
                    # We lost our bdm data.

                    # If we have no device name, at this point
                    # we should not continue.  Subsequent scrub code on future
                    # deploys will clean this up.
                    LOG.warning(_LW(
                        "Disconnect Volume: The backing hdisk for volume "
                        "%(volume_id)s on Virtual I/O Server %(vios)s is "
                        "not in a valid state.  No disconnect "
                        "actions to be taken as volume is not healthy."), {
                            'volume_id': self.volume_id,
                            'vios': vios_w.name
                        },
                                instance=self.instance)
                    return False

            except Exception as e:
                LOG.warning(_LW(
                    "Disconnect Volume: Failed to find disk on Virtual I/O "
                    "Server %(vios_name)s for volume %(volume_id)s."
                    " Error: %(error)s"), {
                        'error': e,
                        'vios_name': vios_w.name,
                        'volume_id': self.volume_id
                    },
                            instance=self.instance)
                return False

            # We have found the device name
            LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
                         "on Virtual I/O Server %(vios_name)s for volume "
                         "%(volume_id)s."), {
                             'volume_id': self.volume_id,
                             'vios_name': vios_w.name,
                             'hdisk': device_name
                         },
                     instance=self.instance)

            # Add the action to remove the mapping when the stg_ftsk is run.
            partition_id = vm.get_vm_id(self.adapter, self.vm_uuid)

            with lockutils.lock(hash(self)):
                self._add_remove_mapping(partition_id, vios_w.uuid,
                                         device_name, slot_mgr)
                target_iqn = self.connection_info["data"]["target_iqn"]

                def logout():
                    hdisk.remove_iscsi(self.adapter, target_iqn, vios_w.uuid)

                self.stg_ftsk.add_post_execute(
                    task.FunctorTask(logout,
                                     name='remove_iSCSI_%s' % target_iqn))
            # Found a valid element to remove
            return True

        try:
            # See logic in _connect_volume for why this new FeedTask is here.
            discon_ftsk = tx.FeedTask(
                'discon_volume_from_vio',
                pvm_vios.VIOS.getter(self.adapter,
                                     xag=[pvm_const.XAG.VIO_STOR]))
            # Find hdisks to disconnect
            discon_ftsk.add_functor_subtask(discon_vol_for_vio,
                                            provides='vio_modified',
                                            flag_update=False)
            ret = discon_ftsk.execute()

            # Warn if no hdisks disconnected.
            if not any([
                    result['vio_modified']
                    for result in ret['wrapper_task_rets'].values()
            ]):
                LOG.warning(_LW(
                    "Disconnect Volume: Failed to disconnect the  volume "
                    "%(volume_id)s on ANY of the Virtual I/O Servers for "
                    "instance %(inst)s."), {
                        'inst': self.instance.name,
                        'volume_id': self.volume_id
                    },
                            instance=self.instance)

        except Exception as e:
            LOG.error(_LE('Cannot detach volumes from virtual machine: %s'),
                      self.vm_uuid,
                      instance=self.instance)
            LOG.exception(_LE('Error: %s'), e, instance=self.instance)
            ex_args = {
                'volume_id': self.volume_id,
                'reason': six.text_type(e),
                'instance_name': self.instance.name
            }
            raise p_exc.VolumeDetachFailed(**ex_args)