Example #1
0
    def manage_existing(self,
                        context,
                        topic,
                        volume_id,
                        request_spec,
                        filter_properties=None,
                        volume=None):
        """Ensure that the host exists and can accept the volume."""

        self._wait_for_scheduler()

        # FIXME(mdulko): Remove this in v3.0 of RPC API.
        if volume is None:
            # For older clients, mimic the old behavior and look up the
            # volume by its volume_id.
            volume = objects.Volume.get_by_id(context, volume_id)

        def _manage_existing_set_error(self, context, ex, request_spec):
            volume_state = {'volume_state': {'status': 'error'}}
            self._set_volume_state_and_notify('manage_existing', volume_state,
                                              context, ex, request_spec)

        try:
            self.driver.host_passes_filters(context, volume.host, request_spec,
                                            filter_properties)
        except exception.NoValidHost as ex:
            _manage_existing_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _manage_existing_set_error(self, context, ex, request_spec)
        else:
            volume_rpcapi.VolumeAPI().manage_existing(context, volume,
                                                      request_spec.get('ref'))
Example #2
0
    def manage_existing_snapshot(self,
                                 context,
                                 volume,
                                 snapshot,
                                 ref,
                                 request_spec,
                                 filter_properties=None):
        """Ensure that the host exists and can accept the snapshot."""

        self._wait_for_scheduler()

        try:
            backend = self.driver.backend_passes_filters(
                context, volume.service_topic_queue, request_spec,
                filter_properties)
            backend.consume_from_volume({'size': volume.size})

        except exception.NoValidBackend as ex:
            self._set_snapshot_state_and_notify('manage_existing_snapshot',
                                                snapshot,
                                                fields.SnapshotStatus.ERROR,
                                                context, ex, request_spec)
        else:
            volume_rpcapi.VolumeAPI().manage_existing_snapshot(
                context, snapshot, ref, volume.service_topic_queue)
Example #3
0
    def _detach_volume(self, context, attach_info, volume, properties,
                       force=False, remote=False):
        """Disconnect the volume from the host."""
        # Use Brick's code to do attach/detach
        connector = attach_info['connector']
        connector.disconnect_volume(attach_info['conn']['data'],
                                    attach_info['device'])

        if remote:
            # Call remote manager's terminate_connection which includes
            # driver's terminate_connection and remove export
            rpcapi = volume_rpcapi.VolumeAPI()
            rpcapi.terminate_connection(context, volume, properties,
                                        force=force)
        else:
            # Call local driver's terminate_connection and remove export.
            # NOTE(avishay) This is copied from the manager's code - need to
            # clean this up in the future.
            try:
                self.terminate_connection(volume, properties, force=force)
            except Exception as err:
                err_msg = (_('Unable to terminate volume connection: %(err)s')
                           % {'err': err})
                LOG.error(err_msg)
                raise exception.VolumeBackendAPIException(data=err_msg)

            try:
                LOG.debug(_("volume %s: removing export"), volume['id'])
                self.remove_export(context, volume)
            except Exception as ex:
                LOG.exception(_("Error detaching volume %(volume)s, "
                                "due to remove export failure."),
                              {"volume": volume['id']})
                raise exception.RemoveExportException(volume=volume['id'],
                                                      reason=ex)
Example #4
0
    def manage_existing(self,
                        context,
                        topic,
                        volume_id,
                        request_spec,
                        filter_properties=None):
        """Ensure that the host exists and can accept the volume."""

        self._wait_for_scheduler()

        def _manage_existing_set_error(self, context, ex, request_spec):
            volume_state = {'volume_state': {'status': 'error'}}
            self._set_volume_state_and_notify('manage_existing', volume_state,
                                              context, ex, request_spec)

        volume_ref = db.volume_get(context, volume_id)
        try:
            self.driver.host_passes_filters(context, volume_ref['host'],
                                            request_spec, filter_properties)
        except exception.NoValidHost as ex:
            _manage_existing_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _manage_existing_set_error(self, context, ex, request_spec)
        else:
            volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref,
                                                      request_spec.get('ref'))
Example #5
0
    def migrate_volume(self, context, volume, backend, force_copy,
                       request_spec, filter_properties):
        """Ensure that the backend exists and can accept the volume."""
        self._wait_for_scheduler()

        def _migrate_volume_set_error(self, context, ex, request_spec):
            if volume.status == 'maintenance':
                previous_status = (volume.previous_status or 'maintenance')
                volume_state = {
                    'volume_state': {
                        'migration_status': 'error',
                        'status': previous_status
                    }
                }
            else:
                volume_state = {'volume_state': {'migration_status': 'error'}}
            self._set_volume_state_and_notify('migrate_volume_to_host',
                                              volume_state, context, ex,
                                              request_spec)

        try:
            tgt_backend = self.driver.backend_passes_filters(
                context, backend, request_spec, filter_properties)
        except exception.NoValidBackend as ex:
            _migrate_volume_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _migrate_volume_set_error(self, context, ex, request_spec)
        else:
            volume_rpcapi.VolumeAPI().migrate_volume(context, volume,
                                                     tgt_backend, force_copy)
Example #6
0
    def retype(self,
               context,
               topic,
               volume_id,
               request_spec,
               filter_properties=None):
        """Schedule the modification of a volume's type.

        :param context: the request context
        :param topic: the topic listened on
        :param volume_id: the ID of the volume to retype
        :param request_spec: parameters for this retype request
        :param filter_properties: parameters to filter by
        """

        self._wait_for_scheduler()

        def _retype_volume_set_error(self, context, ex, request_spec,
                                     volume_ref, msg, reservations):
            if reservations:
                QUOTAS.rollback(context, reservations)
            previous_status = (volume_ref.previous_status or volume_ref.status)
            volume_state = {'volume_state': {'status': previous_status}}
            self._set_volume_state_and_notify('retype', volume_state, context,
                                              ex, request_spec, msg)

        volume_ref = db.volume_get(context, volume_id)
        reservations = request_spec.get('quota_reservations')
        new_type = request_spec.get('volume_type')
        if new_type is None:
            msg = _('New volume type not specified in request_spec.')
            ex = exception.ParameterNotFound(param='volume_type')
            _retype_volume_set_error(self, context, ex, request_spec,
                                     volume_ref, msg, reservations)

        # Default migration policy is 'never'
        migration_policy = request_spec.get('migration_policy')
        if not migration_policy:
            migration_policy = 'never'

        try:
            tgt_host = self.driver.find_retype_host(context, request_spec,
                                                    filter_properties,
                                                    migration_policy)
        except exception.NoValidHost as ex:
            msg = (_("Could not find a host for volume %(volume_id)s with "
                     "type %(type_id)s.") % {
                         'type_id': new_type['id'],
                         'volume_id': volume_id
                     })
            _retype_volume_set_error(self, context, ex, request_spec,
                                     volume_ref, msg, reservations)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _retype_volume_set_error(self, context, ex, request_spec,
                                         volume_ref, None, reservations)
        else:
            volume_rpcapi.VolumeAPI().retype(context, volume_ref,
                                             new_type['id'], tgt_host,
                                             migration_policy, reservations)
Example #7
0
    def _attach_volume(self, context, volume, properties, remote=False):
        """Attach the volume."""
        if remote:
            rpcapi = volume_rpcapi.VolumeAPI()
            conn = rpcapi.initialize_connection(context, volume, properties)
        else:
            conn = self.initialize_connection(volume, properties)

        # Use Brick's code to do attach/detach
        use_multipath = self.configuration.use_multipath_for_image_xfer
        protocol = conn['driver_volume_type']
        connector = initiator.InitiatorConnector.factory(
            protocol, use_multipath=use_multipath)
        device = connector.connect_volume(conn['data'])
        host_device = device['path']

        if not connector.check_valid_device(host_device):
            raise exception.DeviceUnavailable(path=host_device,
                                              reason=(_("Unable to access "
                                                        "the backend storage "
                                                        "via the path "
                                                        "%(path)s.") % {
                                                            'path': host_device
                                                        }))
        return {'conn': conn, 'device': device, 'connector': connector}
Example #8
0
    def manage_existing(self,
                        context,
                        volume,
                        request_spec,
                        filter_properties=None):
        """Ensure that the host exists and can accept the volume."""

        self._wait_for_scheduler()

        def _manage_existing_set_error(self, context, ex, request_spec):
            volume_state = {'volume_state': {'status': 'error_managing'}}
            self._set_volume_state_and_notify('manage_existing', volume_state,
                                              context, ex, request_spec)

        try:
            backend = self.driver.backend_passes_filters(
                context, volume.service_topic_queue, request_spec,
                filter_properties)

            # At the API we didn't have the pool info, so the volume DB entry
            # was created without it, now we add it.
            volume.host = backend.host
            volume.cluster_name = backend.cluster_name
            volume.save()

        except exception.NoValidBackend as ex:
            _manage_existing_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _manage_existing_set_error(self, context, ex, request_spec)
        else:
            volume_rpcapi.VolumeAPI().manage_existing(context, volume,
                                                      request_spec.get('ref'))
Example #9
0
    def create_snapshot(self,
                        ctxt,
                        volume,
                        snapshot,
                        backend,
                        request_spec=None,
                        filter_properties=None):
        """Create snapshot for a volume.

        The main purpose of this method is to check if target
        backend (of volume and snapshot) has sufficient capacity
        to host to-be-created snapshot.
        """
        self._wait_for_scheduler()

        try:
            tgt_backend = self.driver.backend_passes_filters(
                ctxt, backend, request_spec, filter_properties)
            tgt_backend.consume_from_volume(
                {'size': request_spec['volume_properties']['size']})
        except exception.NoValidBackend as ex:
            self._set_snapshot_state_and_notify('create_snapshot', snapshot,
                                                'error', ctxt, ex,
                                                request_spec)
        else:
            volume_rpcapi.VolumeAPI().create_snapshot(ctxt, volume, snapshot)
Example #10
0
    def migrate_volume_completion(self,
                                  ctxt,
                                  volume_id,
                                  new_volume_id,
                                  error=False):
        msg = _("migrate_volume_completion: completing migration for "
                "volume %(vol1)s (temporary volume %(vol2)s")
        LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
        volume = self.db.volume_get(ctxt, volume_id)
        new_volume = self.db.volume_get(ctxt, new_volume_id)
        rpcapi = volume_rpcapi.VolumeAPI()

        if error:
            msg = _("migrate_volume_completion is cleaning up an error "
                    "for volume %(vol1)s (temporary volume %(vol2)s")
            LOG.info(msg % {'vol1': volume['id'], 'vol2': new_volume['id']})
            new_volume['migration_status'] = None
            rpcapi.delete_volume(ctxt, new_volume)
            self.db.volume_update(ctxt, volume_id, {'migration_status': None})
            return volume_id

        self.db.volume_update(ctxt, volume_id,
                              {'migration_status': 'completing'})

        # Delete the source volume (if it fails, don't fail the migration)
        try:
            self.delete_volume(ctxt, volume_id)
        except Exception as ex:
            msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
            LOG.error(msg % {'vol': volume_id, 'err': ex})

        self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
        self.db.volume_destroy(ctxt, new_volume_id)
        self.db.volume_update(ctxt, volume_id, {'migration_status': None})
        return volume['id']
Example #11
0
    def extend_volume(self,
                      context,
                      volume,
                      new_size,
                      reservations,
                      request_spec=None,
                      filter_properties=None):
        def _extend_volume_set_error(self, context, ex, request_spec):
            volume_state = {
                'volume_state': {
                    'status': volume.previous_status,
                    'previous_status': None
                }
            }
            self._set_volume_state_and_notify('extend_volume', volume_state,
                                              context, ex, request_spec)

        if not filter_properties:
            filter_properties = {}

        filter_properties['new_size'] = new_size
        try:
            backend_state = self.driver.backend_passes_filters(
                context, volume.service_topic_queue, request_spec,
                filter_properties)
            backend_state.consume_from_volume({'size': new_size - volume.size})
            volume_rpcapi.VolumeAPI().extend_volume(context, volume, new_size,
                                                    reservations)
        except exception.NoValidBackend as ex:
            QUOTAS.rollback(context,
                            reservations,
                            project_id=volume.project_id)
            _extend_volume_set_error(self, context, ex, request_spec)
Example #12
0
 def __init__(self, *args, **kwargs):
     self.service = importutils.import_module(self.driver_name)
     self.az = CONF.storage_availability_zone
     self.backup_rpcapi = backup_rpcapi.BackupAPI()
     self.volume_rpcapi = volume_rpcapi.VolumeAPI()
     super(BackupManager, self).__init__(*args, **kwargs)
     self.is_initialized = False
Example #13
0
    def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
                                  error=False):
        volume = self.db.volume_get(ctxt, volume_id)
        new_volume = self.db.volume_get(ctxt, new_volume_id)
        rpcapi = volume_rpcapi.VolumeAPI()

        if error:
            new_volume['migration_status'] = None
            rpcapi.delete_volume(ctxt, new_volume)
            self.db.volume_update(ctxt, volume_id, {'migration_status': None})
            return volume_id

        self.db.volume_update(ctxt, volume_id,
                              {'migration_status': 'completing'})

        # Delete the source volume (if it fails, don't fail the migration)
        try:
            self.delete_volume(ctxt, volume_id)
        except Exception as ex:
            msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
            LOG.error(msg % {'vol': volume_id, 'err': ex})

        self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
        self.db.volume_update(ctxt, volume_id, {'migration_status': None})
        return volume['id']
Example #14
0
    def __init__(self, db_driver=None):
        self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
        self.volume_rpcapi = volume_rpcapi.VolumeAPI()
        self.availability_zone_names = ()
        self.volume_api = volume_api.API()

        super(API, self).__init__(db_driver)
Example #15
0
    def migrate_volume_to_host(self, context, topic, volume_id, host,
                               force_host_copy, request_spec,
                               filter_properties=None):
        """Ensure that the host exists and can accept the volume."""

        def _migrate_volume_set_error(self, context, ex, request_spec):
            volume_state = {'volume_state': {'migration_status': None}}
            self._set_volume_state_and_notify('migrate_volume_to_host',
                                              volume_state,
                                              context, ex, request_spec)

        try:
            tgt_host = self.driver.host_passes_filters(context, host,
                                                       request_spec,
                                                       filter_properties)
        except exception.NoValidHost as ex:
            _migrate_volume_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _migrate_volume_set_error(self, context, ex, request_spec)
        else:
            volume_ref = db.volume_get(context, volume_id)
            volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref,
                                                     tgt_host,
                                                     force_host_copy)
Example #16
0
 def __init__(self, db_driver=None, image_service=None):
     self.image_service = (image_service
                           or glance.get_default_image_service())
     self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
     self.volume_rpcapi = volume_rpcapi.VolumeAPI()
     self.availability_zone_names = ()
     super(API, self).__init__(db_driver)
Example #17
0
 def __init__(self, service_name=None, *args, **kwargs):
     self.service = importutils.import_module(self.driver_name)
     self.az = CONF.storage_availability_zone
     self.volume_managers = {}
     self.backup_rpcapi = backup_rpcapi.BackupAPI()
     self.volume_rpcapi = volume_rpcapi.VolumeAPI()
     super(BackupManager, self).__init__(service_name='backup',
                                         *args, **kwargs)
Example #18
0
 def test_do_cleanup_too_old(self):
     cleanup_request = objects.CleanupRequest(self.context)
     rpcapi = volume_rpcapi.VolumeAPI()
     with mock.patch.object(rpcapi.client,
                            'can_send_version',
                            return_value=False) as can_send_mock:
         self.assertRaises(exception.ServiceTooOld, rpcapi.do_cleanup,
                           self.context, cleanup_request)
         can_send_mock.assert_called_once_with('3.7')
Example #19
0
 def _copy_volume_data_cleanup(self, context, volume, properties,
                               attach_info, remote, force=False):
     self._detach_volume(attach_info)
     if remote:
         rpcapi = volume_rpcapi.VolumeAPI()
         rpcapi.terminate_connection(context, volume, properties,
                                     force=force)
     else:
         self.terminate_connection(volume, properties, force=False)
Example #20
0
 def __init__(self, ext_mgr=None):
     self.ext_mgr = ext_mgr
     super(ServiceController, self).__init__()
     self.volume_api = volume.API()
     self.rpc_apis = {
         constants.SCHEDULER_BINARY: scheduler_rpcapi.SchedulerAPI(),
         constants.VOLUME_BINARY: volume_rpcapi.VolumeAPI(),
         constants.BACKUP_BINARY: backup_rpcapi.BackupAPI(),
     }
Example #21
0
 def __init__(self, db_driver=None, image_service=None):
     self.image_service = (image_service
                           or glance.get_default_image_service())
     self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
     self.volume_rpcapi = volume_rpcapi.VolumeAPI()
     self.availability_zones = []
     self.availability_zones_last_fetched = None
     self.key_manager = keymgr.API()
     super(API, self).__init__(db_driver)
Example #22
0
 def test_do_cleanup(self, host, cluster, get_cctxt_mock):
     cleanup_request = objects.CleanupRequest(self.context,
                                              host=host,
                                              cluster_name=cluster)
     rpcapi = volume_rpcapi.VolumeAPI()
     rpcapi.do_cleanup(self.context, cleanup_request)
     get_cctxt_mock.assert_called_once_with(
         cleanup_request.service_topic_queue, '3.7')
     get_cctxt_mock.return_value.cast.assert_called_once_with(
         self.context, 'do_cleanup', cleanup_request=cleanup_request)
Example #23
0
 def __init__(self, scheduler_driver=None, service_name=None,
              *args, **kwargs):
     if not scheduler_driver:
         scheduler_driver = CONF.scheduler_driver
     self.driver = importutils.import_object(scheduler_driver)
     super(SchedulerManager, self).__init__(*args, **kwargs)
     self._startup_delay = True
     self.volume_api = volume_rpcapi.VolumeAPI()
     self.sch_api = scheduler_rpcapi.SchedulerAPI()
     self.rpc_api_version = versionutils.convert_version_to_int(
         self.RPC_API_VERSION)
Example #24
0
 def request_service_capabilities(self, context):
     volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
     try:
         self.backup_api.publish_service_capabilities(context)
     except exception.ServiceTooOld as e:
         # cinder-backup has publish_service_capabilities starting Stein
         # release only.
         msg = ("Failed to notify about cinder-backup service "
                "capabilities for host %(host)s. This is normal "
                "during a live upgrade. Error: %(e)s")
         LOG.warning(msg, {'host': self.host, 'e': e})
Example #25
0
    def retype(self, context, volume, request_spec, filter_properties=None):
        """Schedule the modification of a volume's type.

        :param context: the request context
        :param volume: the volume object to retype
        :param request_spec: parameters for this retype request
        :param filter_properties: parameters to filter by
        """

        self._wait_for_scheduler()

        def _retype_volume_set_error(self,
                                     context,
                                     ex,
                                     request_spec,
                                     volume_ref,
                                     reservations,
                                     msg=None):
            if reservations:
                QUOTAS.rollback(context, reservations)
            previous_status = (volume_ref.previous_status or volume_ref.status)
            volume_state = {'volume_state': {'status': previous_status}}
            self._set_volume_state_and_notify('retype', volume_state, context,
                                              ex, request_spec, msg)

        reservations = request_spec.get('quota_reservations')
        old_reservations = request_spec.get('old_reservations', None)
        new_type = request_spec.get('volume_type')
        if new_type is None:
            msg = _('New volume type not specified in request_spec.')
            ex = exception.ParameterNotFound(param='volume_type')
            _retype_volume_set_error(self, context, ex, request_spec, volume,
                                     reservations, msg)

        # Default migration policy is 'never'
        migration_policy = request_spec.get('migration_policy')
        if not migration_policy:
            migration_policy = 'never'

        try:
            tgt_backend = self.driver.find_retype_backend(
                context, request_spec, filter_properties, migration_policy)
        except Exception as ex:
            # Not having a valid host is an expected exception, so we don't
            # reraise on it.
            reraise = not isinstance(ex, exception.NoValidBackend)
            with excutils.save_and_reraise_exception(reraise=reraise):
                _retype_volume_set_error(self, context, ex, request_spec,
                                         volume, reservations)
        else:
            volume_rpcapi.VolumeAPI().retype(context, volume, new_type['id'],
                                             tgt_backend, migration_policy,
                                             reservations, old_reservations)
Example #26
0
 def __init__(self, *args, **kwargs):
     self.service = importutils.import_module(self.driver_name)
     self.az = CONF.storage_availability_zone
     self.volume_managers = {}
     # TODO(xyang): If backup_use_same_host is True, we'll find
     # the volume backend on the backup node. This allows us
     # to use a temp snapshot to backup an in-use volume if the
     # driver supports it. This code should go away when we add
     # support for backing up in-use volume using a temp snapshot
     # on a remote node.
     if CONF.backup_use_same_host:
         self._setup_volume_drivers()
     self.backup_rpcapi = backup_rpcapi.BackupAPI()
     self.volume_rpcapi = volume_rpcapi.VolumeAPI()
     super(BackupManager, self).__init__(*args, **kwargs)
Example #27
0
def _get_non_shared_target_hosts(ctxt):
    hosts = []
    numvols_needing_update = 0
    rpc.init(CONF)
    rpcapi = volume_rpcapi.VolumeAPI()

    services = objects.ServiceList.get_all_by_topic(ctxt, 'cinder-volume')
    for service in services:
        capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
        if not capabilities.get('shared_targets', True):
            hosts.append(service.host)
            numvols_needing_update += db_api.model_query(
                ctxt,
                models.Volume).filter_by(shared_targets=True,
                                         service_uuid=service.uuid).count()
    return hosts, numvols_needing_update
Example #28
0
 def __init__(self, *args, **kwargs):
     self.az = CONF.storage_availability_zone
     self.backup_rpcapi = backup_rpcapi.BackupAPI()
     self.volume_rpcapi = volume_rpcapi.VolumeAPI()
     super(BackupManager, self).__init__(*args, **kwargs)
     self.is_initialized = False
     self._set_tpool_size(CONF.backup_native_threads_pool_size)
     self._process_number = kwargs.get('process_number', 1)
     self.driver_name = CONF.backup_driver
     if self.driver_name in MAPPING:
         new_name = MAPPING[self.driver_name]
         LOG.warning(
             'Backup driver path %s is deprecated, update your '
             'configuration to the new path %s', self.driver_name, new_name)
         self.driver_name = new_name
     self.service = importutils.import_class(self.driver_name)
Example #29
0
    def migrate_volume_to_host(self,
                               context,
                               topic,
                               volume_id,
                               host,
                               force_host_copy,
                               request_spec,
                               filter_properties=None,
                               volume=None):
        """Ensure that the host exists and can accept the volume."""

        self._wait_for_scheduler()

        # FIXME(thangp): Remove this in v2.0 of RPC API.
        if volume is None:
            # For older clients, mimic the old behavior and look up the
            # volume by its volume_id.
            volume = objects.Volume.get_by_id(context, volume_id)

        def _migrate_volume_set_error(self, context, ex, request_spec):
            if volume.status == 'maintenance':
                previous_status = (volume.previous_status or 'maintenance')
                volume_state = {
                    'volume_state': {
                        'migration_status': 'error',
                        'status': previous_status
                    }
                }
            else:
                volume_state = {'volume_state': {'migration_status': 'error'}}
            self._set_volume_state_and_notify('migrate_volume_to_host',
                                              volume_state, context, ex,
                                              request_spec)

        try:
            tgt_host = self.driver.host_passes_filters(context, host,
                                                       request_spec,
                                                       filter_properties)
        except exception.NoValidHost as ex:
            _migrate_volume_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _migrate_volume_set_error(self, context, ex, request_spec)
        else:
            volume_rpcapi.VolumeAPI().migrate_volume(context, volume, tgt_host,
                                                     force_host_copy)
Example #30
0
def _get_non_shared_target_hosts(ctxt):
    hosts = []
    numvols_needing_update = 0
    rpc.init(CONF)
    rpcapi = volume_rpcapi.VolumeAPI()

    services = objects.ServiceList.get_all_by_topic(ctxt,
                                                    constants.VOLUME_TOPIC)
    for service in services:
        capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
        # Select only non iSCSI connections and iSCSI that are explicit
        if (capabilities.get('storage_protocol') != 'iSCSI'
                or not capabilities.get('shared_targets', True)):
            hosts.append(service.host)
            numvols_needing_update += db_api.model_query(
                ctxt,
                models.Volume).filter_by(shared_targets=True,
                                         service_uuid=service.uuid).count()
    return hosts, numvols_needing_update