예제 #1
0
    def _validate_vios_on_connection(self, num_vioses_found):
        """Validates that the correct number of VIOSes were discovered.

        Certain environments may have redundancy requirements.  For PowerVM
        this is achieved by having multiple Virtual I/O Servers.  This method
        will check to ensure that the operator's requirements for redundancy
        have been met.  If not, a specific error message will be raised.

        :param num_vioses_found: The number of VIOSes the hdisk was found on.
        """
        # Is valid as long as the vios count exceeds the conf value.
        if num_vioses_found >= CONF.powervm.vscsi_vios_connections_required:
            return

        # Should have a custom message based on zero or 'some but not enough'
        # I/O Servers.
        if num_vioses_found == 0:
            msg = (_('Failed to discover valid hdisk on any Virtual I/O '
                     'Server for volume %(volume_id)s.') %
                   {'volume_id': self.volume_id})
        else:
            msg = (_('Failed to discover the hdisk on the required number of '
                     'Virtual I/O Servers.  Volume %(volume_id)s required '
                     '%(vios_req)d Virtual I/O Servers, but the disk was only '
                     'found on %(vios_act)d Virtual I/O Servers.') %
                   {'volume_id': self.volume_id, 'vios_act': num_vioses_found,
                    'vios_req': CONF.powervm.vscsi_vios_connections_required})
        LOG.error(msg)
        ex_args = {'volume_id': self.volume_id, 'reason': msg,
                   'instance_name': self.instance.name}
        raise p_exc.VolumeAttachFailed(**ex_args)
예제 #2
0
파일: vm.py 프로젝트: adreznec/nova-powervm
    def _spp_pool_id(self, pool_name):
        """Returns the shared proc pool id for a given pool name.

        :param pool_name: The shared proc pool name.
        :return: The internal API id for the shared proc pool.
        """
        if (pool_name is None or
                pool_name == pvm_spp.DEFAULT_POOL_DISPLAY_NAME):
            # The default pool is 0
            return 0

        # Search for the pool with this name
        pool_wraps = pvm_spp.SharedProcPool.search(
            self.adapter, name=pool_name, parent=self.host_w)

        # Check to make sure there is a pool with the name, and only one pool.
        if len(pool_wraps) > 1:
            msg = (_('Multiple Shared Processing Pools with name %(pool)s.') %
                   {'pool': pool_name})
            raise exception.ValidationError(msg)
        elif len(pool_wraps) == 0:
            msg = (_('Unable to find Shared Processing Pool %(pool)s') %
                   {'pool': pool_name})
            raise exception.ValidationError(msg)

        # Return the singular pool id.
        return pool_wraps[0].id
예제 #3
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is None:
                LOG.error(_LE("Mappings were not able to find a proper VIOS. "
                              "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id, instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))

            ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
                                "for Virtual I/O Server %(vios)s."),
                  {'inst': self.instance.name, 'vios': vios_w.name}]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_state, name='fab_%s_%s' % (fabric, volume_id)))
예제 #4
0
    def _fetch(self, object_key):
        # Check if the object exists.  If not, return a result accordingly.
        if not self._exists(object_key):
            return None, _('Object does not exist in Swift.')

        try:
            # Create a temp file for download into
            with tempfile.NamedTemporaryFile(delete=False) as f:
                options = {
                    'out_file': f.name
                }
            # The file is now created and closed for the swift client to use.
            results = self._run_operation(
                'download', container=self.container, objects=[object_key],
                options=options)
            for result in results:
                if result['success']:
                    with open(f.name, 'r') as f:
                        return f.read(), result
                else:
                    return None, result
        finally:
            try:
                os.remove(f.name)
            except Exception:
                LOG.warning(_LW('Could not remove temporary file: %s'), f.name)
예제 #5
0
    def validate(self, disk_info):
        """Validate the disk information is compatible with this driver.

        This method is called during cold migration to ensure the disk
        drivers on the destination host is compatible with the source host.

        :param disk_info: disk information dictionary
        :returns: None if compatible, otherwise a reason for incompatibility
        """
        return _("The configured disk driver does not support migration " "or resize.")
예제 #6
0
    def _store(self, inst_key, inst_name, data, exists=None):
        """Store the NVRAM into the storage service.

        :param instance: instance object
        :param data: the NVRAM data base64 encoded string
        :param exists: (Optional, Default: None) If specified, tells the upload
                       whether or not the object exists.  Should be a boolean
                       or None.  If left as None, the method will look up
                       whether or not it exists.
        """
        source = six.StringIO(data)

        # If the object doesn't exist, we tell it to 'leave_segments'.  This
        # prevents a lookup and saves the logs from an ERROR in the swift
        # client (that really isn't an error...sigh).  It should be empty
        # if not the first upload (which defaults to leave_segments=False)
        # so that it overrides the existing element on a subsequent upload.
        if exists is None:
            exists = self._exists(inst_key)
        options = dict(leave_segments=True) if not exists else None
        obj = swft_srv.SwiftUploadObject(source, object_name=inst_key)

        # The swift client already has a retry opertaion. The retry method
        # takes a 'reset' function as a parameter. This parameter is 'None'
        # for all operations except upload. For upload, it's set to a default
        # method that throws a ClientException if the object to upload doesn't
        # implement tell/see/reset. If the authentication error occurs during
        # upload, this ClientException is raised with no retry. For any other
        # operation, swift client will retry and succeed.
        @retrying.retry(retry_on_result=lambda result: not result,
                        wait_fixed=250, stop_max_attempt_number=2)
        def _run_upload_operation():
            try:
                return self._run_operation('upload', self.container,
                                           [obj], options=options)
            except swft_exc.ClientException:
                # Upload operation failed due to expired Keystone token.
                # Retry SwiftClient operation to allow regeneration of token.
                return None

        try:
            results = _run_upload_operation()
        except retrying.RetryError as re:
            # The upload failed.
            reason = (_('Unable to store NVRAM after %d attempts') %
                      re.last_attempt.attempt_number)
            raise api.NVRAMUploadException(instance=inst_name, reason=reason)

        for result in results:
            if not result['success']:
                # The upload failed.
                raise api.NVRAMUploadException(instance=inst_name,
                                               reason=result)
예제 #7
0
    def _check_migration_ready(self, lpar_w, host_w):
        """See if the lpar is ready for LPM.

        :param lpar_w: LogicalPartition wrapper
        :param host_w: ManagedSystem wrapper
        """
        ready, msg = lpar_w.can_lpm(host_w,
                                    migr_data=self.mig_data.host_mig_data)
        if not ready:
            msg = (_("Live migration of instance '%(name)s' failed because it "
                     "is not ready. Reason: %(reason)s") %
                   dict(name=self.instance.name, reason=msg))
            raise exception.MigrationPreCheckError(reason=msg)
예제 #8
0
def _verify_migration_capacity(host_w, instance):
    """Check that the counts are valid for in progress and supported."""
    mig_stats = host_w.migration_data
    if (mig_stats['active_migrations_in_progress'] >=
            mig_stats['active_migrations_supported']):

        msg = (_("Cannot migrate %(name)s because the host %(host)s only "
                 "allows %(allowed)s concurrent migrations and "
                 "%(running)s migrations are currently running.") %
               dict(name=instance.name, host=host_w.system_name,
                    running=mig_stats['active_migrations_in_progress'],
                    allowed=mig_stats['active_migrations_supported']))
        raise exception.MigrationPreCheckError(reason=msg)
예제 #9
0
def _build_vif_driver(adapter, host_uuid, instance, vif):
    """Returns the appropriate VIF Driver for the given VIF.

    :param adapter: The pypowervm adapter API interface.
    :param host_uuid: The host system UUID.
    :param instance: The nova instance.
    :param vif: The virtual interface to from Nova.
    :return: The appropriate PvmVifDriver for the VIF.
    """
    if vif.get('type') is None:
        raise exception.VirtualInterfacePlugException(
            _("vif_type parameter must be present for this vif_driver "
              "implementation"))

    # Check the type to the implementations
    if VIF_MAPPING.get(vif['type']):
        return importutils.import_object(
            VIF_MAPPING.get(vif['type']), adapter, host_uuid, instance)

    # No matching implementation, raise error.
    raise exception.VirtualInterfacePlugException(
        _("Unable to find appropriate PowerVM VIF Driver for VIF type "
          "%(vif_type)s on instance %(instance)s") %
        {'vif_type': vif['type'], 'instance': instance.name})
예제 #10
0
    def validate(self, disk_info):
        """Validate the disk information is compatible with this driver.

        This method is called during cold migration to ensure the disk
        drivers on the destination host is compatible with the source host.

        :param disk_info: disk information dictionary
        :returns: None if compatible, otherwise a reason for incompatibility
        """
        if disk_info.get('ssp_uuid') != self._ssp.uuid:
            return (_('The host is not a member of the same SSP cluster. '
                      'The source host cluster: %(source_clust_name)s. '
                      'The source host SSP: %(source_ssp_name)s.') % {
                          'source_clust_name': disk_info.get('cluster_name'),
                          'source_ssp_name': disk_info.get('ssp_name')
                      })
예제 #11
0
    def validate(self, disk_info):
        """Validate the disk information is compatible with this driver.

        This method is called during cold migration to ensure the disk
        drivers on the destination host is compatible with the source host.

        :param disk_info: disk information dictionary
        :returns: None if compatible, otherwise a reason for incompatibility
        """
        if disk_info.get('ssp_uuid') != self._ssp.uuid:
            return (_('The host is not a member of the same SSP cluster. '
                      'The source host cluster: %(source_clust_name)s. '
                      'The source host SSP: %(source_ssp_name)s.') %
                    {'source_clust_name': disk_info.get('cluster_name'),
                     'source_ssp_name': disk_info.get('ssp_name')}
                    )
예제 #12
0
    def _add_maps_for_fabric(self, fabric):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        """
        npiv_port_maps = self._get_fabric_meta(fabric)
        vios_wraps = self.stg_ftsk.feed
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)

            if vios_w is None:
                LOG.error(
                    _LE("Mappings were not able to find a proper VIOS. "
                        "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id,
                    instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))

            ls = [
                LOG.info,
                _LI("Adding NPIV mapping for instance %(inst)s "
                    "for Virtual I/O Server %(vios)s."), {
                        'inst': self.instance.name,
                        'vios': vios_w.name
                    }
            ]

            # Add the subtask to add the specific map.
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                logspec=ls)

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
예제 #13
0
    def check_destination(self, context, src_compute_info, dst_compute_info):
        """Check the destination host

        Here we check the destination host to see if it's capable of migrating
        the instance to this host.

        :param context: security context
        :param src_compute_info: Info about the sending machine
        :param dst_compute_info: Info about the receiving machine
        :returns: a dict containing migration info
        """

        # Refresh the host wrapper since we're pulling values that may change
        self.drvr.host_wrapper.refresh()

        src_stats = src_compute_info['stats']
        dst_stats = dst_compute_info['stats']
        # Check the lmb sizes for compatibility
        if (src_stats['memory_region_size'] !=
                dst_stats['memory_region_size']):
            msg = (_("Cannot migrate instance '%(name)s' because the "
                     "memory region size of the source (%(source_mrs)d MB) "
                     "does not match the memory region size of the target "
                     "(%(target_mrs)d MB).") %
                   dict(name=self.instance.name,
                        source_mrs=src_stats['memory_region_size'],
                        target_mrs=dst_stats['memory_region_size']))

            raise exception.MigrationPreCheckError(reason=msg)

        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.dest_data['dest_host_migr_data'] = (self.drvr.host_wrapper.
                                                 migration_data)
        self.dest_data['dest_ip'] = CONF.my_ip
        self.dest_data['dest_user_id'] = self._get_dest_user_id()
        self.dest_data['dest_sys_name'] = self.drvr.host_wrapper.system_name
        self.dest_data['dest_proc_compat'] = (
            ','.join(self.drvr.host_wrapper.proc_compat_modes))

        LOG.debug('src_compute_info: %s' % src_compute_info)
        LOG.debug('dst_compute_info: %s' % dst_compute_info)
        LOG.debug('Migration data: %s' % self.dest_data)

        return self.dest_data
예제 #14
0
    def check_destination(self, context, src_compute_info, dst_compute_info):
        """Check the destination host

        Here we check the destination host to see if it's capable of migrating
        the instance to this host.

        :param context: security context
        :param src_compute_info: Info about the sending machine
        :param dst_compute_info: Info about the receiving machine
        :returns: a dict containing migration info
        """

        # Refresh the host wrapper since we're pulling values that may change
        self.drvr.host_wrapper.refresh()

        src_stats = src_compute_info['stats']
        dst_stats = dst_compute_info['stats']
        # Check the lmb sizes for compatibility
        if (src_stats['memory_region_size'] !=
                dst_stats['memory_region_size']):
            msg = (_("Cannot migrate instance '%(name)s' because the "
                     "memory region size of the source (%(source_mrs)d MB) "
                     "does not match the memory region size of the target "
                     "(%(target_mrs)d MB).") %
                   dict(name=self.instance.name,
                        source_mrs=src_stats['memory_region_size'],
                        target_mrs=dst_stats['memory_region_size']))

            raise exception.MigrationPreCheckError(reason=msg)

        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.dest_data['dest_host_migr_data'] = (
            self.drvr.host_wrapper.migration_data)
        self.dest_data['dest_ip'] = CONF.my_ip
        self.dest_data['dest_user_id'] = self._get_dest_user_id()
        self.dest_data['dest_sys_name'] = self.drvr.host_wrapper.system_name
        self.dest_data['dest_proc_compat'] = (','.join(
            self.drvr.host_wrapper.proc_compat_modes))

        LOG.debug('src_compute_info: %s' % src_compute_info)
        LOG.debug('dst_compute_info: %s' % dst_compute_info)
        LOG.debug('Migration data: %s' % self.dest_data)

        return self.dest_data
예제 #15
0
    def _flavor_bool(self, val, key):
        """Will validate and return the boolean for a given value.

        :param val: The value to parse into a boolean.
        :param key: The flavor key.
        :return: The boolean value for the attribute.  If is not well formed
                 will raise an ValidationError.
        """
        trues = ['true', 't', 'yes', 'y']
        falses = ['false', 'f', 'no', 'n']
        if val.lower() in trues:
            return True
        elif val.lower() in falses:
            return False
        else:
            msg = (_('Flavor attribute %(attr)s must be either True or '
                     'False.  Current value %(val)s is not allowed.') %
                   {'attr': key, 'val': val})
            raise exception.ValidationError(msg)
예제 #16
0
파일: vm.py 프로젝트: adreznec/nova-powervm
    def _flavor_bool(self, val, key):
        """Will validate and return the boolean for a given value.

        :param val: The value to parse into a boolean.
        :param key: The flavor key.
        :return: The boolean value for the attribute.  If is not well formed
                 will raise an ValidationError.
        """
        trues = ['true', 't', 'yes', 'y']
        falses = ['false', 'f', 'no', 'n']
        if val.lower() in trues:
            return True
        elif val.lower() in falses:
            return False
        else:
            msg = (_('Flavor attribute %(attr)s must be either True or '
                     'False.  Current value %(val)s is not allowed.') %
                   {'attr': key, 'val': val})
            raise exception.ValidationError(msg)
예제 #17
0
class LiveMigrationFailed(exception.NovaException):
    msg_fmt = _("Live migration of instance '%(name)s' failed for reason: "
                "%(reason)s")
예제 #18
0
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a PowerVMLiveMigrateData object
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s',
                  self.mig_data,
                  instance=self.instance)

        # Check proc compatibility modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode
                not in self.mig_data.dest_proc_compat.split(',')):
            msg = (_("Cannot migrate %(name)s because its "
                     "processor compatibility mode %(mode)s "
                     "is not in the list of modes \"%(modes)s\" "
                     "supported by the target host.") %
                   dict(name=self.instance.name,
                        mode=lpar_w.proc_compat_mode,
                        modes=', '.join(
                            self.mig_data.dest_proc_compat.split(','))))

            raise exception.MigrationPreCheckError(reason=msg)

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            msg = (_("Live migration of instance '%(name)s' failed because "
                     "the migration state is: %(state)s") %
                   dict(name=self.instance.name, state=lpar_w.migration_state))
            raise exception.MigrationPreCheckError(reason=msg)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter)

        # Get the 'source' pre-migration data for the volume drivers.
        vol_data = {}
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(vol_data)
        self.mig_data.vol_data = vol_data

        LOG.debug('Source migration data: %s',
                  self.mig_data,
                  instance=self.instance)

        # Create a FeedTask to scrub any orphaned mappings/storage associated
        # with this LPAR.  (Don't run it yet - we want to do the VOpt removal
        # within the same FeedTask.)
        stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter,
                                                       lpar_w.id)
        # Add subtasks to remove the VOpt devices under the same FeedTask.
        media.ConfigDrivePowerVM(self.drvr.adapter).dlt_vopt(
            lpar_w.uuid, stg_ftsk=stg_ftsk, remove_mappings=False)
        # Now execute the FeedTask, performing both scrub and VOpt removal.
        stg_ftsk.execute()

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.mig_data
예제 #19
0
class VolumePreMigrationFailed(nex.NovaException):
    msg_fmt = _("Unable to perform pre live migration steps on volume (id: "
                "%(volume_id)s) from virtual machine %(instance_name)s.")
예제 #20
0
class VolumeAttachFailed(nex.NovaException):
    msg_fmt = _("Unable to attach storage (id: %(volume_id)s) to virtual "
                "machine %(instance_name)s.  %(reason)s")
예제 #21
0
class InvalidRebuild(nex.NovaException):
    msg_fmt = _("Unable to rebuild virtual machine on new host.  Error is "
                "%(error)s")
예제 #22
0
class NoDiskDiscoveryException(nex.NovaException):
    """Failed to discover any disk."""
    msg_fmt = _("Having scanned SCSI bus %(bus)x on the management partition, "
                "disk with UDID %(udid)s failed to appear after %(polls)d "
                "polls over %(timeout)d seconds.")
예제 #23
0
    def _add_maps_for_fabric(self, fabric, slot_mgr):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is attached to the VM
        """
        vios_wraps = self.stg_ftsk.feed
        # Ensure the physical ports in the metadata are not for a different
        # host (stale). If so, rebuild the maps with current info.
        npiv_port_maps = self._ensure_phys_ports_for_system(
            self._get_fabric_meta(fabric), vios_wraps, fabric)
        volume_id = self.connection_info['data']['volume_id']

        # This loop adds the maps from the appropriate VIOS to the client VM
        slot_ids = copy.deepcopy(slot_mgr.build_map.get_vfc_slots(
            fabric, len(npiv_port_maps)))
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            if vios_w is None:
                LOG.error(_LE("Mappings were not able to find a proper VIOS. "
                              "The port mappings were %s."), npiv_port_maps)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id, instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))
            ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
                                "for Virtual I/O Server %(vios)s."),
                  {'inst': self.instance.name, 'vios': vios_w.name}]

            # Add the subtask to add the specific map.
            slot_num = slot_ids.pop()
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map, self.host_uuid, self.vm_uuid, npiv_port_map,
                lpar_slot_num=slot_num, logspec=ls)

        # Store the client slot number for the NPIV mapping (for rebuild
        # scenarios)
        def set_vol_meta():
            vios_wraps = self.stg_ftsk.feed
            port_maps = self._get_fabric_meta(fabric)
            for port_map in port_maps:
                # The port map is [ 'phys_wwpn', 'client_wwpn1 client_wwpn2' ]
                # We only need one of the two client wwpns.
                vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, port_map)
                c_wwpns = port_map[1].split()
                vfc_mapping = pvm_c_stor.c_wwpn_to_vfc_mapping(vios_w,
                                                               c_wwpns[0])

                # If there is no mapping, then don't add it.  It means that
                # the client WWPN is hosted on a different VIOS.
                if vfc_mapping is None:
                    continue

                # However, by this point we know that it is hosted on this
                # VIOS.  So the vfc_mapping will have the client adapter
                slot_mgr.register_vfc_mapping(vfc_mapping, fabric)

        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_vol_meta, name='fab_slot_%s_%s' % (fabric, volume_id)))

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)
        self.stg_ftsk.add_post_execute(task.FunctorTask(
            set_state, name='fab_%s_%s' % (fabric, volume_id)))
예제 #24
0
class PowerVMAPIFailed(nex.NovaException):
    msg_fmt = _("PowerVM API Failed to complete for instance=%(inst_name)s."
                "%(reason)s")
예제 #25
0
    def _add_maps_for_fabric(self, fabric, slot_mgr):
        """Adds the vFC storage mappings to the VM for a given fabric.

        :param fabric: The fabric to add the mappings to.
        :param slot_mgr: A NovaSlotManager.  Used to store/retrieve the client
                         slots used when a volume is attached to the VM
        """
        vios_wraps = self.stg_ftsk.feed
        # Ensure the physical ports in the metadata are not for a different
        # host (stale). If so, rebuild the maps with current info.
        npiv_port_maps = self._ensure_phys_ports_for_system(
            self._get_fabric_meta(fabric), vios_wraps, fabric)
        volume_id = self.connection_info['serial']

        # This loop adds the maps from the appropriate VIOS to the client VM
        slot_ids = copy.deepcopy(
            slot_mgr.build_map.get_vfc_slots(fabric, len(npiv_port_maps)))
        for npiv_port_map in npiv_port_maps:
            vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
            if vios_w is None:
                LOG.error(
                    "Mappings were not able to find a proper VIOS. "
                    "The port mappings were %s.",
                    npiv_port_maps,
                    instance=self.instance)
                raise exc.VolumeAttachFailed(
                    volume_id=volume_id,
                    instance_name=self.instance.name,
                    reason=_("Unable to find a Virtual I/O Server that "
                             "hosts the NPIV port map for the server."))
            ls = [
                LOG.info, "Adding NPIV mapping for instance %(inst)s "
                "for Virtual I/O Server %(vios)s.", {
                    'inst': self.instance.name,
                    'vios': vios_w.name
                }
            ]

            # Add the subtask to add the specific map.
            slot_num = slot_ids.pop()
            self.stg_ftsk.wrapper_tasks[vios_w.uuid].add_functor_subtask(
                pvm_vfcm.add_map,
                self.host_uuid,
                self.vm_uuid,
                npiv_port_map,
                lpar_slot_num=slot_num,
                logspec=ls)

        # Store the client slot number for the NPIV mapping (for rebuild
        # scenarios)
        def set_vol_meta():
            vios_wraps = self.stg_ftsk.feed
            port_maps = self._get_fabric_meta(fabric)
            for port_map in port_maps:
                # The port map is [ 'phys_wwpn', 'client_wwpn1 client_wwpn2' ]
                # We only need one of the two client wwpns.
                vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, port_map)
                c_wwpns = port_map[1].split()
                vfc_mapping = pvm_c_stor.c_wwpn_to_vfc_mapping(
                    vios_w, c_wwpns[0])

                # If there is no mapping, then don't add it.  It means that
                # the client WWPN is hosted on a different VIOS.
                if vfc_mapping is None:
                    continue

                # However, by this point we know that it is hosted on this
                # VIOS.  So the vfc_mapping will have the client adapter
                slot_mgr.register_vfc_mapping(vfc_mapping, fabric)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_vol_meta,
                             name='fab_slot_%s_%s' % (fabric, volume_id)))

        # After all the mappings, make sure the fabric state is updated.
        def set_state():
            self._set_fabric_state(fabric, FS_INST_MAPPED)

        self.stg_ftsk.add_post_execute(
            task.FunctorTask(set_state,
                             name='fab_%s_%s' % (fabric, volume_id)))
예제 #26
0
class NVRAMConfigOptionNotSet(nex.NovaException):
    msg_fmt = _("The configuration option '%(option)s' must be set.")
예제 #27
0
class NVRAMDownloadException(nex.NovaException):
    msg_fmt = _("The NVRAM could not be fetched for instance %(instance)s. "
                "Reason: %(reason)s")
예제 #28
0
class DeviceDeletionException(nex.NovaException):
    """Expected to delete a disk, but the disk is still present afterward."""
    msg_fmt = _("Device %(devpath)s is still present on the management "
                "partition after attempting to delete it.  Polled %(polls)d "
                "times over %(timeout)d seconds.")
예제 #29
0
class UniqueDiskDiscoveryException(nex.NovaException):
    """Expected to discover exactly one disk, but discovered >1."""
    msg_fmt = _("Expected to find exactly one disk on the management "
                "partition at %(path_pattern)s; found %(count)d.")
예제 #30
0
class LiveMigrationVolume(exception.NovaException):
    msg_fmt = _("Cannot migrate %(name)s because the volume %(volume)s "
                "cannot be attached on the destination host %(host)s.")
예제 #31
0
class OptRequiredIfOtherOptValue(nex.NovaException):
    msg_fmt = _("The %(then_opt)s option is required if %(if_opt)s is "
                "specified as '%(if_value)s'.")
예제 #32
0
    def check_source(self, context, block_device_info, vol_drvs):
        """Check the source host

        Here we check the source host to see if it's capable of migrating
        the instance to the destination host.  There may be conditions
        that can only be checked on the source side.

        Also, get the instance ready for the migration by removing any
        virtual optical devices attached to the LPAR.

        :param context: security context
        :param block_device_info: result of _get_instance_block_device_info
        :param vol_drvs: volume drivers for the attached volumes
        :returns: a PowerVMLiveMigrateData object
        """

        lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
        self.lpar_w = lpar_w

        LOG.debug('Dest Migration data: %s' % self.mig_data)

        # Check proc compatibility modes
        if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
                self.mig_data.dest_proc_compat.split(',')):
            msg = (_("Cannot migrate %(name)s because its "
                     "processor compatibility mode %(mode)s "
                     "is not in the list of modes \"%(modes)s\" "
                     "supported by the target host.") %
                   dict(name=self.instance.name,
                        mode=lpar_w.proc_compat_mode,
                        modes=', '.join(
                            self.mig_data.dest_proc_compat.split(','))))

            raise exception.MigrationPreCheckError(reason=msg)

        # Check if VM is ready for migration
        self._check_migration_ready(lpar_w, self.drvr.host_wrapper)

        if lpar_w.migration_state != 'Not_Migrating':
            msg = (_("Live migration of instance '%(name)s' failed because "
                     "the migration state is: %(state)s") %
                   dict(name=self.instance.name,
                        state=lpar_w.migration_state))
            raise exception.MigrationPreCheckError(reason=msg)

        # Check the number of migrations for capacity
        _verify_migration_capacity(self.drvr.host_wrapper, self.instance)

        self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter)

        # Get the 'source' pre-migration data for the volume drivers.
        vol_data = {}
        for vol_drv in vol_drvs:
            vol_drv.pre_live_migration_on_source(vol_data)
        self.mig_data.vol_data = vol_data

        LOG.debug('Src Migration data: %s' % self.mig_data)

        # Create a FeedTask to scrub any orphaned mappings/storage associated
        # with this LPAR.  (Don't run it yet - we want to do the VOpt removal
        # within the same FeedTask.)
        stg_ftsk = stor_task.ScrubOrphanStorageForLpar(self.drvr.adapter,
                                                       lpar_w.id)
        # Add subtasks to remove the VOpt devices under the same FeedTask.
        media.ConfigDrivePowerVM(self.drvr.adapter, self.drvr.host_uuid
                                 ).dlt_vopt(lpar_w.uuid, stg_ftsk=stg_ftsk,
                                            remove_mappings=False)
        # Now execute the FeedTask, performing both scrub and VOpt removal.
        stg_ftsk.execute()

        # Ensure the vterm is non-active
        vterm.close_vterm(self.drvr.adapter, lpar_w.uuid)

        return self.mig_data
예제 #33
0
class NoConfigNoClusterFound(AbstractDiskException):
    msg_fmt = _('Unable to locate any Cluster for this operation.')
예제 #34
0
class NoMediaRepoVolumeGroupFound(AbstractMediaException):
    msg_fmt = _("Unable to locate the volume group %(vol_grp)s to store the "
                "virtual optical media within.  Unable to create the "
                "media repository.")
예제 #35
0
class TooManyClustersFound(AbstractDiskException):
    msg_fmt = _("Unexpectedly found %(clust_count)d Clusters "
                "matching name '%(clust_name)s'.")
예제 #36
0
class NoConfigTooManyClusters(AbstractDiskException):
    msg_fmt = _("No cluster_name specified.  Refusing to select one of the "
                "%(clust_count)d Clusters found.")
예제 #37
0
class InstanceDiskMappingFailed(AbstractDiskException):
    msg_fmt = _("Failed to map boot disk of instance %(instance_name)s to "
                "the management partition from any Virtual I/O Server.")
예제 #38
0
class NoActiveViosForFeedTask(nex.NovaException):
    msg_fmt = _("There are no active Virtual I/O Servers available.")
예제 #39
0
class NVRAMDeleteException(nex.NovaException):
    msg_fmt = _("The NVRAM could not be deleted for instance %(instance)s. "
                "Reason: %(reason)s")
예제 #40
0
class NewMgmtMappingNotFoundException(nex.NovaException):
    """Just created a mapping to the mgmt partition, but can't find it."""
    msg_fmt = _("Failed to find newly-created mapping of storage element "
                "%(stg_name)s from Virtual I/O Server %(vios_name)s to the "
                "management partition.")
예제 #41
0
    def _store(self, inst_key, data, exists=None):
        """Store the NVRAM into the storage service.

        :param inst_key: The key by which to store the data in the repository.
        :param data: the NVRAM data base64 encoded string
        :param exists: (Optional, Default: None) If specified, tells the upload
                       whether or not the object exists.  Should be a boolean
                       or None.  If left as None, the method will look up
                       whether or not it exists.
        """

        # If the object doesn't exist, we tell it to 'leave_segments'.  This
        # prevents a lookup and saves the logs from an ERROR in the swift
        # client (that really isn't an error...sigh).  It should be empty
        # if not the first upload (which defaults to leave_segments=False)
        # so that it overrides the existing element on a subsequent upload.
        if exists is None:
            exists = self._exists(inst_key)
        options = dict(leave_segments=True) if not exists else None

        # The swift client already has a retry opertaion. The retry method
        # takes a 'reset' function as a parameter. This parameter is 'None'
        # for all operations except upload. For upload, it's set to a default
        # method that throws a ClientException if the object to upload doesn't
        # implement tell/see/reset. If the authentication error occurs during
        # upload, this ClientException is raised with no retry. For any other
        # operation, swift client will retry and succeed.
        @retrying.retry(retry_on_result=lambda result: result,
                        wait_fixed=250,
                        stop_max_attempt_number=2)
        def _run_upload_operation():
            """Run the upload operation

            Attempts retry for a maximum number of two times. The upload
            operation will fail with ClientException, if there is an
            authentication error. The second attempt only happens if the
            first attempt failed with ClientException. A return value of
            True means we should retry, and False means no failure during
            upload, thus no retry is required.

            Raises RetryError if the upload failed during second attempt,
            as the number of attempts for retry is reached.

            """
            source = six.StringIO(data)
            obj = swft_srv.SwiftUploadObject(source, object_name=inst_key)

            results = self._run_operation('upload',
                                          self.container, [obj],
                                          options=options)
            for result in results:
                if not result['success']:
                    # TODO(arun-mani - Bug 1611011): Filed for updating swift
                    # client to return http status code in case of failure
                    if isinstance(result['error'], swft_exc.ClientException):
                        # If upload failed during nvram/slot_map update due to
                        # expired keystone token, retry swift-client operation
                        # to allow regeneration of token
                        LOG.warning('NVRAM upload failed due to invalid '
                                    'token. Retrying upload.')
                        return True
                    # The upload failed.
                    raise api.NVRAMUploadException(instance=inst_key,
                                                   reason=result)
            return False

        try:
            _run_upload_operation()
        except retrying.RetryError as re:
            # The upload failed.
            reason = (_('Unable to store NVRAM after %d attempts') %
                      re.last_attempt.attempt_number)
            raise api.NVRAMUploadException(instance=inst_key, reason=reason)
예제 #42
0
class VGNotFound(AbstractDiskException):
    msg_fmt = _("Unable to locate the volume group '%(vg_name)s' for this "
                "operation.")
예제 #43
0
    def plug(self, vif, slot_num, new_vif=True):
        if not new_vif:
            return None

        physnet = vif.get_physical_network()
        if not physnet:
            # Get physnet from neutron network if not present in vif
            # TODO(svenkat): This section of code will be eliminated in
            # pike release. Design will be in place to fix any vif
            # that has physical_network missing. The fix will be in
            # compute startup code.
            net_id = vif['network']['id']
            admin_context = ctx.get_admin_context()
            napi = net_api.API()
            network = napi.get(admin_context, net_id)
            physnet = network.physical_network

        LOG.debug("Plugging vNIC SR-IOV vif for physical network %(physnet)s.",
                  {'physnet': physnet},
                  instance=self.instance)

        # Get the msys
        msys = pvm_ms.System.get(self.adapter)[0]
        # Physical ports for the given port label
        pports_w = sriovtask.find_pports_for_portlabel(physnet, self.adapter,
                                                       msys)
        pports = [pport.loc_code for pport in pports_w]

        if not pports:
            raise exception.VirtualInterfacePlugException(
                _("Unable to find acceptable Ethernet ports on physical "
                  "network '%(physnet)s' for instance %(inst)s for SRIOV "
                  "based VIF with MAC address %(vif_mac)s.") % {
                      'physnet': physnet,
                      'inst': self.instance.name,
                      'vif_mac': vif['address']
                  })

        # MAC
        mac_address = pvm_util.sanitize_mac_for_api(vif['address'])

        # vlan id
        vlan_id = int(vif['details']['vlan'])

        # Redundancy: plugin sets from binding:profile, then conf, then default
        redundancy = int(vif['details']['redundancy'])

        # Capacity: plugin sets from binding:profile, then conf, then default
        capacity = vif['details']['capacity']

        vnic = pvm_card.VNIC.bld(self.adapter,
                                 vlan_id,
                                 slot_num=slot_num,
                                 mac_addr=mac_address,
                                 allowed_vlans=pvm_util.VLANList.NONE,
                                 allowed_macs=pvm_util.MACList.NONE)

        sriovtask.set_vnic_back_devs(vnic,
                                     pports,
                                     sys_w=msys,
                                     redundancy=redundancy,
                                     capacity=capacity,
                                     check_port_status=True)

        return vnic.create(parent_type=pvm_lpar.LPAR,
                           parent_uuid=vm.get_pvm_uuid(self.instance))
예제 #44
0
class ClusterNotFoundByName(AbstractDiskException):
    msg_fmt = _("Unable to locate the Cluster '%(clust_name)s' for this "
                "operation.")