Пример #1
0
    def find_retype_host(self,
                         context,
                         request_spec,
                         filter_properties=None,
                         migration_policy='never'):
        """Find a host that can accept the volume with its new type."""
        filter_properties = filter_properties or {}
        current_host = request_spec['volume_properties']['host']

        # The volume already exists on this host, and so we shouldn't check if
        # it can accept the volume again in the CapacityFilter.
        filter_properties['vol_exists_on'] = current_host

        weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                      filter_properties)
        if not weighed_hosts:
            raise exception.NoValidHost(
                reason=_('No valid hosts for volume '
                         '%(id)s with type %(type)s') % {
                             'id': request_spec['volume_id'],
                             'type': request_spec['volume_type']
                         })

        for weighed_host in weighed_hosts:
            host_state = weighed_host.obj
            if host_state.host == current_host:
                return host_state

        if utils.extract_host(current_host, 'pool') is None:
            # legacy volumes created before pool is introduced has no pool
            # info in host.  But host_state.host always include pool level
            # info. In this case if above exact match didn't work out, we
            # find host_state that are of the same host of volume being
            # retyped. In other words, for legacy volumes, retyping could
            # cause migration between pools on same host, which we consider
            # it is different from migration between hosts thus allow that
            # to happen even migration policy is 'never'.
            for weighed_host in weighed_hosts:
                host_state = weighed_host.obj
                backend = utils.extract_host(host_state.host, 'backend')
                if backend == current_host:
                    return host_state

        if migration_policy == 'never':
            raise exception.NoValidHost(
                reason=_('Current host not valid for '
                         'volume %(id)s with type '
                         '%(type)s, migration not '
                         'allowed') % {
                             'id': request_spec['volume_id'],
                             'type': request_spec['volume_type']
                         })

        top_host = self._choose_top_host(weighed_hosts, request_spec)
        return top_host.obj
Пример #2
0
    def find_retype_host(self, context, request_spec, filter_properties=None,
                         migration_policy='never'):
        """Find a host that can accept the volume with its new type."""
        filter_properties = filter_properties or {}
        current_host = request_spec['volume_properties']['host']

        # The volume already exists on this host, and so we shouldn't check if
        # it can accept the volume again in the CapacityFilter.
        filter_properties['vol_exists_on'] = current_host

        weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                      filter_properties)
        if not weighed_hosts:
            raise exception.NoValidHost(reason=_('No valid hosts for volume '
                                                 '%(id)s with type %(type)s') %
                                        {'id': request_spec['volume_id'],
                                         'type': request_spec['volume_type']})

        for weighed_host in weighed_hosts:
            host_state = weighed_host.obj
            if host_state.host == current_host:
                return host_state

        if utils.extract_host(current_host, 'pool') is None:
            # legacy volumes created before pool is introduced has no pool
            # info in host.  But host_state.host always include pool level
            # info. In this case if above exact match didn't work out, we
            # find host_state that are of the same host of volume being
            # retyped. In other words, for legacy volumes, retyping could
            # cause migration between pools on same host, which we consider
            # it is different from migration between hosts thus allow that
            # to happen even migration policy is 'never'.
            for weighed_host in weighed_hosts:
                host_state = weighed_host.obj
                backend = utils.extract_host(host_state.host, 'backend')
                if backend == current_host:
                    return host_state

        if migration_policy == 'never':
            raise exception.NoValidHost(reason=_('Current host not valid for '
                                                 'volume %(id)s with type '
                                                 '%(type)s, migration not '
                                                 'allowed') %
                                        {'id': request_spec['volume_id'],
                                         'type': request_spec['volume_type']})

        top_host = self._choose_top_host(weighed_hosts, request_spec)
        return top_host.obj
Пример #3
0
    def _detach_device(self,
                       context,
                       attach_info,
                       device,
                       properties,
                       is_snapshot=False,
                       force=False):
        """Disconnect the volume or snapshot from the host. """
        connector = attach_info['connector']
        connector.disconnect_volume(attach_info['conn']['data'],
                                    attach_info['device'])

        rpcapi = self.jacket_rpcapi
        if not is_snapshot:
            rpcapi.terminate_connection(context,
                                        device,
                                        properties,
                                        force=force)
            rpcapi.remove_export(context, device)
        else:
            volume = self.db.volume_get(context, device.volume_id)
            host = volume_utils.extract_host(volume['host'], 'backend')
            backend = self._get_volume_backend(host=host)
            self._get_driver(backend)._detach_snapshot(context, attach_info,
                                                       device, properties,
                                                       force)
Пример #4
0
    def _get_available_backup_service_host(self, host, az, volume_host=None):
        """Return an appropriate backup service host."""

        # FIXME(dulek): We need to keep compatibility with Liberty, where c-bak
        # were coupled with c-vol. If we're running in mixed Liberty-Mitaka
        # environment we will be scheduling backup jobs the old way.
        #
        # This snippet should go away in Newton. Note that volume_host
        # parameter will also be unnecessary then.
        if not self._is_scalable_only():
            if volume_host:
                volume_host = volume_utils.extract_host(volume_host,
                                                        level='host')
            if volume_host and self._is_backup_service_enabled(
                    az, volume_host):
                return volume_host
            elif host and self._is_backup_service_enabled(az, host):
                return host
            else:
                raise exception.ServiceNotFound(service_id='storage-backup')

        backup_host = None
        if (not host or not CONF.backup_use_same_host):
            backup_host = self._get_any_available_backup_service(az)
        elif self._is_backup_service_enabled(az, host):
            backup_host = host
        if not backup_host:
            raise exception.ServiceNotFound(service_id='storage-backup')
        return backup_host
Пример #5
0
    def _get_available_backup_service_host(self, host, az, volume_host=None):
        """Return an appropriate backup service host."""

        # FIXME(dulek): We need to keep compatibility with Liberty, where c-bak
        # were coupled with c-vol. If we're running in mixed Liberty-Mitaka
        # environment we will be scheduling backup jobs the old way.
        #
        # This snippet should go away in Newton. Note that volume_host
        # parameter will also be unnecessary then.
        if not self._is_scalable_only():
            if volume_host:
                volume_host = volume_utils.extract_host(volume_host,
                                                        level='host')
            if volume_host and self._is_backup_service_enabled(az,
                                                               volume_host):
                return volume_host
            elif host and self._is_backup_service_enabled(az, host):
                return host
            else:
                raise exception.ServiceNotFound(service_id='storage-backup')

        backup_host = None
        if (not host or not CONF.backup_use_same_host):
            backup_host = self._get_any_available_backup_service(az)
        elif self._is_backup_service_enabled(az, host):
            backup_host = host
        if not backup_host:
            raise exception.ServiceNotFound(service_id='storage-backup')
        return backup_host
Пример #6
0
    def update_pools(self, capability, service):
        """Update storage pools information from backend reported info."""
        if not capability:
            return

        pools = capability.get('pools', None)
        active_pools = set()
        if pools and isinstance(pools, list):
            # Update all pools stats according to information from list
            # of pools in volume capacity
            for pool_cap in pools:
                pool_name = pool_cap['pool_name']
                self._append_backend_info(pool_cap)
                cur_pool = self.pools.get(pool_name, None)
                if not cur_pool:
                    # Add new pool
                    cur_pool = PoolState(self.host, pool_cap, pool_name)
                    self.pools[pool_name] = cur_pool
                cur_pool.update_from_volume_capability(pool_cap, service)

                active_pools.add(pool_name)
        elif pools is None:
            # To handle legacy driver that doesn't report pool
            # information in the capability, we have to prepare
            # a pool from backend level info, or to update the one
            # we created in self.pools.
            pool_name = self.volume_backend_name
            if pool_name is None:
                # To get DEFAULT_POOL_NAME
                pool_name = vol_utils.extract_host(self.host, 'pool', True)

            if len(self.pools) == 0:
                # No pool was there
                single_pool = PoolState(self.host, capability, pool_name)
                self._append_backend_info(capability)
                self.pools[pool_name] = single_pool
            else:
                # this is an update from legacy driver
                try:
                    single_pool = self.pools[pool_name]
                except KeyError:
                    single_pool = PoolState(self.host, capability, pool_name)
                    self._append_backend_info(capability)
                    self.pools[pool_name] = single_pool

            single_pool.update_from_volume_capability(capability, service)
            active_pools.add(pool_name)

        # remove non-active pools from self.pools
        nonactive_pools = set(self.pools.keys()) - active_pools
        for pool in nonactive_pools:
            LOG.debug(
                "Removing non-active pool %(pool)s @ %(host)s "
                "from scheduler cache.", {
                    'pool': pool,
                    'host': self.host
                })
            del self.pools[pool]
Пример #7
0
    def update_pools(self, capability, service):
        """Update storage pools information from backend reported info."""
        if not capability:
            return

        pools = capability.get('pools', None)
        active_pools = set()
        if pools and isinstance(pools, list):
            # Update all pools stats according to information from list
            # of pools in volume capacity
            for pool_cap in pools:
                pool_name = pool_cap['pool_name']
                self._append_backend_info(pool_cap)
                cur_pool = self.pools.get(pool_name, None)
                if not cur_pool:
                    # Add new pool
                    cur_pool = PoolState(self.host, pool_cap, pool_name)
                    self.pools[pool_name] = cur_pool
                cur_pool.update_from_volume_capability(pool_cap, service)

                active_pools.add(pool_name)
        elif pools is None:
            # To handle legacy driver that doesn't report pool
            # information in the capability, we have to prepare
            # a pool from backend level info, or to update the one
            # we created in self.pools.
            pool_name = self.volume_backend_name
            if pool_name is None:
                # To get DEFAULT_POOL_NAME
                pool_name = vol_utils.extract_host(self.host, 'pool', True)

            if len(self.pools) == 0:
                # No pool was there
                single_pool = PoolState(self.host, capability, pool_name)
                self._append_backend_info(capability)
                self.pools[pool_name] = single_pool
            else:
                # this is an update from legacy driver
                try:
                    single_pool = self.pools[pool_name]
                except KeyError:
                    single_pool = PoolState(self.host, capability, pool_name)
                    self._append_backend_info(capability)
                    self.pools[pool_name] = single_pool

            single_pool.update_from_volume_capability(capability, service)
            active_pools.add(pool_name)

        # remove non-active pools from self.pools
        nonactive_pools = set(self.pools.keys()) - active_pools
        for pool in nonactive_pools:
            LOG.debug("Removing non-active pool %(pool)s @ %(host)s "
                      "from scheduler cache.", {'pool': pool,
                                                'host': self.host})
            del self.pools[pool]
Пример #8
0
 def _attach_device(self, context, backup_device,
                    properties, is_snapshot=False):
     """Attach backup device."""
     if not is_snapshot:
         return self._attach_volume(context, backup_device, properties)
     else:
         volume = self.db.volume_get(context, backup_device.volume_id)
         host = volume_utils.extract_host(volume['host'], 'backend')
         backend = self._get_volume_backend(host=host)
         rc = self._get_driver(backend)._attach_snapshot(
             context, backup_device, properties)
         return rc
Пример #9
0
 def test_host_passes_filters_happy_day(self, _mock_service_get_topic):
     """Do a successful pass through of with host_passes_filters()."""
     sched, ctx = self._host_passes_filters_setup(
         _mock_service_get_topic)
     request_spec = {'volume_id': 1,
                     'volume_type': {'name': 'LVM_iSCSI'},
                     'volume_properties': {'project_id': 1,
                                           'size': 1}}
     ret_host = sched.host_passes_filters(ctx, 'host1#lvm1',
                                          request_spec, {})
     self.assertEqual('host1', utils.extract_host(ret_host.host))
     self.assertTrue(_mock_service_get_topic.called)
Пример #10
0
    def host_passes(self, host_state, filter_properties):
        context = filter_properties['context']
        host = volume_utils.extract_host(host_state.host, 'host')

        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)

        # Without 'local_to_instance' hint
        if not instance_uuid:
            return True

        if not uuidutils.is_uuid_like(instance_uuid):
            raise exception.InvalidUUID(uuid=instance_uuid)

        # TODO(adrienverge): Currently it is not recommended to allow instance
        # migrations for hypervisors where this hint will be used. In case of
        # instance migration, a previously locally-created volume will not be
        # automatically migrated. Also in case of instance migration during the
        # volume's scheduling, the result is unpredictable. A future
        # enhancement would be to subscribe to Nova migration events (e.g. via
        # Ceilometer).

        # First, lookup for already-known information in local cache
        if instance_uuid in self._cache:
            return self._cache[instance_uuid] == host

        if not self._nova_has_extended_server_attributes(context):
            LOG.warning(
                _LW('Hint "%s" dropped because '
                    'ExtendedServerAttributes not active in Nova.'),
                HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        server = nova.API().get_server(context,
                                       instance_uuid,
                                       privileged_user=True,
                                       timeout=REQUESTS_TIMEOUT)

        if not hasattr(server, INSTANCE_HOST_PROP):
            LOG.warning(
                _LW('Hint "%s" dropped because Nova did not return '
                    'enough information. Either Nova policy needs to '
                    'be changed or a privileged account for Nova '
                    'should be specified in conf.'), HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)

        # Match if given instance is hosted on host
        return self._cache[instance_uuid] == host
    def test_default_of_spreading_first(self):
        hostinfo_list = self._get_all_hosts()

        # host1: allocated_capacity_gb=0, weight=0        Norm=0.0
        # host2: allocated_capacity_gb=1748, weight=-1748
        # host3: allocated_capacity_gb=256, weight=-256
        # host4: allocated_capacity_gb=1848, weight=-1848 Norm=-1.0
        # host5: allocated_capacity_gb=1548, weight=-1540

        # so, host1 should win:
        weighed_host = self._get_weighed_host(hostinfo_list)
        self.assertEqual(0.0, weighed_host.weight)
        self.assertEqual(
            'host1', utils.extract_host(weighed_host.obj.host))
    def test_capacity_weight_multiplier2(self):
        self.flags(allocated_capacity_weight_multiplier=-2.0)
        hostinfo_list = self._get_all_hosts()

        # host1: allocated_capacity_gb=0, weight=0        Norm=0.0
        # host2: allocated_capacity_gb=1748, weight=-3496
        # host3: allocated_capacity_gb=256, weight=-512
        # host4: allocated_capacity_gb=1848, weight=-3696 Norm=-2.0
        # host5: allocated_capacity_gb=1548, weight=-3080

        # so, host1 should win:
        weighed_host = self._get_weighed_host(hostinfo_list)
        self.assertEqual(0.0, weighed_host.weight)
        self.assertEqual(
            'host1', utils.extract_host(weighed_host.obj.host))
    def test_capacity_weight_multiplier1(self):
        self.flags(allocated_capacity_weight_multiplier=1.0)
        hostinfo_list = self._get_all_hosts()

        # host1: allocated_capacity_gb=0, weight=0          Norm=0.0
        # host2: allocated_capacity_gb=1748, weight=1748
        # host3: allocated_capacity_gb=256, weight=256
        # host4: allocated_capacity_gb=1848, weight=1848    Norm=1.0
        # host5: allocated_capacity_gb=1548, weight=1540

        # so, host4 should win:
        weighed_host = self._get_weighed_host(hostinfo_list)
        self.assertEqual(1.0, weighed_host.weight)
        self.assertEqual(
            'host4', utils.extract_host(weighed_host.obj.host))
Пример #14
0
 def _attach_device(self,
                    context,
                    backup_device,
                    properties,
                    is_snapshot=False):
     """Attach backup device."""
     if not is_snapshot:
         return self._attach_volume(context, backup_device, properties)
     else:
         volume = self.db.volume_get(context, backup_device.volume_id)
         host = volume_utils.extract_host(volume['host'], 'backend')
         backend = self._get_volume_backend(host=host)
         rc = self._get_driver(backend)._attach_snapshot(
             context, backup_device, properties)
         return rc
Пример #15
0
 def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic):
     # Retype should pass if current host fails filters but another host
     # is suitable when policy=on-demand.
     sched, ctx = self._host_passes_filters_setup(
         _mock_service_get_topic)
     extra_specs = {'volume_backend_name': 'lvm1'}
     request_spec = {'volume_id': 1,
                     'volume_type': {'name': 'LVM_iSCSI',
                                     'extra_specs': extra_specs},
                     'volume_properties': {'project_id': 1,
                                           'size': 200,
                                           'host': 'host4'}}
     host_state = sched.find_retype_host(ctx, request_spec,
                                         filter_properties={},
                                         migration_policy='on-demand')
     self.assertEqual('host1', utils.extract_host(host_state.host))
Пример #16
0
    def _detach_device(self, context, attach_info, device,
                       properties, is_snapshot=False, force=False):
        """Disconnect the volume or snapshot from the host. """
        connector = attach_info['connector']
        connector.disconnect_volume(attach_info['conn']['data'],
                                    attach_info['device'])

        rpcapi = self.jacket_rpcapi
        if not is_snapshot:
            rpcapi.terminate_connection(context, device, properties,
                                        force=force)
            rpcapi.remove_export(context, device)
        else:
            volume = self.db.volume_get(context, device.volume_id)
            host = volume_utils.extract_host(volume['host'], 'backend')
            backend = self._get_volume_backend(host=host)
            self._get_driver(backend)._detach_snapshot(
                context, attach_info, device, properties, force)
Пример #17
0
 def test_retype_policy_never_migrate_pass(self, _mock_service_get_topic):
     # Retype should pass if current host passes filters and
     # policy=never. host4 doesn't have enough space to hold an additional
     # 200GB, but it is already the host of this volume and should not be
     # counted twice.
     sched, ctx = self._host_passes_filters_setup(
         _mock_service_get_topic)
     extra_specs = {'volume_backend_name': 'lvm4'}
     request_spec = {'volume_id': 1,
                     'volume_type': {'name': 'LVM_iSCSI',
                                     'extra_specs': extra_specs},
                     'volume_properties': {'project_id': 1,
                                           'size': 200,
                                           'host': 'host4#lvm4'}}
     host_state = sched.find_retype_host(ctx, request_spec,
                                         filter_properties={},
                                         migration_policy='never')
     self.assertEqual('host4', utils.extract_host(host_state.host))
Пример #18
0
 def _schedule(self, context, request_spec, filter_properties=None):
     weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                   filter_properties)
     # When we get the weighed_hosts, we clear those hosts whose backend
     # is not same as consistencygroup's backend.
     CG_backend = request_spec.get('CG_backend')
     if weighed_hosts and CG_backend:
         # Get host name including host@backend#pool info from
         # weighed_hosts.
         for host in weighed_hosts[::-1]:
             backend = utils.extract_host(host.obj.host)
             if backend != CG_backend:
                 weighed_hosts.remove(host)
     if not weighed_hosts:
         LOG.warning(_LW('No weighed hosts found for volume '
                         'with properties: %s'),
                     filter_properties['request_spec']['volume_type'])
         return None
     return self._choose_top_host(weighed_hosts, request_spec)
Пример #19
0
 def _schedule(self, context, request_spec, filter_properties=None):
     weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                   filter_properties)
     # When we get the weighed_hosts, we clear those hosts whose backend
     # is not same as consistencygroup's backend.
     CG_backend = request_spec.get('CG_backend')
     if weighed_hosts and CG_backend:
         # Get host name including host@backend#pool info from
         # weighed_hosts.
         for host in weighed_hosts[::-1]:
             backend = utils.extract_host(host.obj.host)
             if backend != CG_backend:
                 weighed_hosts.remove(host)
     if not weighed_hosts:
         LOG.warning(
             _LW('No weighed hosts found for volume '
                 'with properties: %s'),
             filter_properties['request_spec']['volume_type'])
         return None
     return self._choose_top_host(weighed_hosts, request_spec)
Пример #20
0
 def test_get_volume_rpc_host(self):
     host = 'Host@backend'
     # default level is 'backend'
     # check if host with backend is returned
     self.assertEqual(volume_utils.extract_host(host),
                      volume_utils.get_volume_rpc_host(host))
Пример #21
0
    def _cast_create_volume(self, context, request_spec, filter_properties):
        source_volid = request_spec['source_volid']
        source_replicaid = request_spec['source_replicaid']
        volume_id = request_spec['volume_id']
        volume = request_spec['volume']
        snapshot_id = request_spec['snapshot_id']
        image_id = request_spec['image_id']
        cgroup_id = request_spec['consistencygroup_id']
        host = None
        cgsnapshot_id = request_spec['cgsnapshot_id']

        if cgroup_id:
            # If cgroup_id existed, we should cast volume to the scheduler
            # to choose a proper pool whose backend is same as CG's backend.
            cgroup = storage.ConsistencyGroup.get_by_id(context, cgroup_id)
            # FIXME(wanghao): CG_backend got added before request_spec was
            # converted to versioned storage. We should make sure that this
            # will be handled by object version translations once we add
            # RequestSpec object.
            request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
        elif snapshot_id and CONF.snapshot_same_host:
            # NOTE(Rongze Zhu): A simple solution for bug 1008866.
            #
            # If snapshot_id is set and CONF.snapshot_same_host is True, make
            # the call create volume directly to the volume host where the
            # snapshot resides instead of passing it through the scheduler, so
            # snapshot can be copied to the new volume.
            snapshot = storage.Snapshot.get_by_id(context, snapshot_id)
            source_volume_ref = storage.Volume.get_by_id(context,
                                                         snapshot.volume_id)
            host = source_volume_ref.host
        elif source_volid:
            source_volume_ref = storage.Volume.get_by_id(context,
                                                         source_volid)
            host = source_volume_ref.host
        elif source_replicaid:
            source_volume_ref = storage.Volume.get_by_id(context,
                                                         source_replicaid)
            host = source_volume_ref.host

        # if not host:
        #     # Cast to the scheduler and let it handle whatever is needed
        #     # to select the target host for this volume.
        #     self.scheduler_rpcapi.create_volume(
        #         context,
        #         CONF.volume_topic,
        #         volume_id,
        #         snapshot_id=snapshot_id,
        #         image_id=image_id,
        #         request_spec=request_spec,
        #         filter_properties=filter_properties,
        #         volume=volume)
        # else:
        #     # Bypass the scheduler and send the request directly to the volume
        #     # manager.
        #     volume.host = host
        #     volume.scheduled_at = timeutils.utcnow()
        #     volume.save()
        #     if not cgsnapshot_id:
        #         self.compute_rpcapi.create_volume(
        #             context,
        #             volume,
        #             volume.host,
        #             request_spec,
        #             filter_properties,
        #             allow_reschedule=False)

        #by luorui : no scheduler,need host for rpcapi and others
        host = None
        volume.host = host
        volume.scheduled_at = timeutils.utcnow()
        volume.save()
        if not cgsnapshot_id:
            self.jacket_rpcapi.create_volume(
                context,
                volume,
                volume.host,
                request_spec,
                filter_properties,
                allow_reschedule=False)
Пример #22
0
    def test_extract_host(self):
        host = 'Host'
        # default level is 'backend'
        self.assertEqual(host,
                         volume_utils.extract_host(host))
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'host'))
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'backend'))
        # default_pool_name doesn't work for level other than 'pool'
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'host', True))
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'host', False))
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'backend', True))
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'backend', False))
        self.assertIsNone(volume_utils.extract_host(host, 'pool'))
        self.assertEqual('_pool0',
                         volume_utils.extract_host(host, 'pool', True))

        host = 'Host@Backend'
        self.assertEqual('Host@Backend',
                         volume_utils.extract_host(host))
        self.assertEqual('Host',
                         volume_utils.extract_host(host, 'host'))
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'backend'))
        self.assertIsNone(volume_utils.extract_host(host, 'pool'))
        self.assertEqual('_pool0',
                         volume_utils.extract_host(host, 'pool', True))

        host = 'Host@Backend#Pool'
        pool = 'Pool'
        self.assertEqual('Host@Backend',
                         volume_utils.extract_host(host))
        self.assertEqual('Host',
                         volume_utils.extract_host(host, 'host'))
        self.assertEqual('Host@Backend',
                         volume_utils.extract_host(host, 'backend'))
        self.assertEqual(pool,
                         volume_utils.extract_host(host, 'pool'))
        self.assertEqual(pool,
                         volume_utils.extract_host(host, 'pool', True))

        host = 'Host#Pool'
        self.assertEqual('Host',
                         volume_utils.extract_host(host))
        self.assertEqual('Host',
                         volume_utils.extract_host(host, 'host'))
        self.assertEqual('Host',
                         volume_utils.extract_host(host, 'backend'))
        self.assertEqual(pool,
                         volume_utils.extract_host(host, 'pool'))
        self.assertEqual(pool,
                         volume_utils.extract_host(host, 'pool', True))
Пример #23
0
    def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
        add_volumes_new = ""
        for volume in volumes:
            if volume['id'] in add_volumes_list:
                # Volume already in CG. Remove from add_volumes.
                add_volumes_list.remove(volume['id'])

        for add_vol in add_volumes_list:
            try:
                add_vol_ref = self.db.volume_get(context, add_vol)
            except exception.VolumeNotFound:
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because volume cannot be "
                         "found.") % {
                             'volume_id': add_vol,
                             'group_id': group.id
                         })
                raise exception.InvalidVolume(reason=msg)
            orig_group = add_vol_ref.get('consistencygroup_id', None)
            if orig_group:
                # If volume to be added is already in the group to be updated,
                # it should have been removed from the add_volumes_list in the
                # beginning of this function. If we are here, it means it is
                # in a different group.
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because it is already in "
                         "consistency group %(orig_group)s.") % {
                             'volume_id': add_vol_ref['id'],
                             'group_id': group.id,
                             'orig_group': orig_group
                         })
                raise exception.InvalidVolume(reason=msg)
            if add_vol_ref:
                add_vol_type_id = add_vol_ref.get('volume_type_id', None)
                if not add_vol_type_id:
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because it has no volume "
                             "type.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id
                             })
                    raise exception.InvalidVolume(reason=msg)
                if add_vol_type_id not in group.volume_type_id:
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because volume type "
                             "%(volume_type)s is not supported by the "
                             "group.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id,
                                 'volume_type': add_vol_type_id
                             })
                    raise exception.InvalidVolume(reason=msg)
                if (add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS):
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because volume is in an "
                             "invalid state: %(status)s. Valid states are: "
                             "%(valid)s.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id,
                                 'status': add_vol_ref['status'],
                                 'valid': VALID_ADD_VOL_TO_CG_STATUS
                             })
                    raise exception.InvalidVolume(reason=msg)

                # group.host and add_vol_ref['host'] are in this format:
                # 'host@backend#pool'. Extract host (host@backend) before
                # doing comparison.
                vol_host = vol_utils.extract_host(add_vol_ref['host'])
                group_host = vol_utils.extract_host(group.host)
                if group_host != vol_host:
                    raise exception.InvalidVolume(
                        reason=_("Volume is not local to this node."))

                # Volume exists. It will be added to CG.
                if add_volumes_new:
                    add_volumes_new += ","
                add_volumes_new += add_vol_ref['id']

            else:
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because volume does not exist.") %
                       {
                           'volume_id': add_vol_ref['id'],
                           'group_id': group.id
                       })
                raise exception.InvalidVolume(reason=msg)

        return add_volumes_new
Пример #24
0
    def _get_weighted_candidates_group(self,
                                       context,
                                       request_spec_list,
                                       filter_properties_list=None):
        """Finds hosts that supports the consistencygroup.

        Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()

        weighed_hosts = []
        index = 0
        for request_spec in request_spec_list:
            volume_properties = request_spec['volume_properties']
            # Since Cinder is using mixed filters from Oslo and it's own, which
            # takes 'resource_XX' and 'volume_XX' as input respectively,
            # copying 'volume_XX' to 'resource_XX' will make both filters
            # happy.
            resource_properties = volume_properties.copy()
            volume_type = request_spec.get("volume_type", None)
            resource_type = request_spec.get("volume_type", None)
            request_spec.update({'resource_properties': resource_properties})

            config_options = self._get_configuration_options()

            filter_properties = {}
            if filter_properties_list:
                filter_properties = filter_properties_list[index]
                if filter_properties is None:
                    filter_properties = {}
            self._populate_retry(filter_properties, resource_properties)

            # Add consistencygroup_support in extra_specs if it is not there.
            # Make sure it is populated in filter_properties
            if 'consistencygroup_support' not in resource_type.get(
                    'extra_specs', {}):
                resource_type['extra_specs'].update(
                    consistencygroup_support='<is> True')

            filter_properties.update({
                'context': context,
                'request_spec': request_spec,
                'config_options': config_options,
                'volume_type': volume_type,
                'resource_type': resource_type
            })

            self.populate_filter_properties(request_spec, filter_properties)

            # Find our local list of acceptable hosts by filtering and
            # weighing our options. we virtually consume resources on
            # it so subsequent selections can adjust accordingly.

            # Note: remember, we are using an iterator here. So only
            # traverse this list once.
            all_hosts = self.host_manager.get_all_host_states(elevated)
            if not all_hosts:
                return []

            # Filter local hosts based on requirements ...
            hosts = self.host_manager.get_filtered_hosts(
                all_hosts, filter_properties)

            if not hosts:
                return []

            LOG.debug("Filtered %s", hosts)

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            temp_weighed_hosts = self.host_manager.get_weighed_hosts(
                hosts, filter_properties)
            if not temp_weighed_hosts:
                return []
            if index == 0:
                weighed_hosts = temp_weighed_hosts
            else:
                new_weighed_hosts = []
                for host1 in weighed_hosts:
                    for host2 in temp_weighed_hosts:
                        # Should schedule creation of CG on backend level,
                        # not pool level.
                        if (utils.extract_host(
                                host1.obj.host) == utils.extract_host(
                                    host2.obj.host)):
                            new_weighed_hosts.append(host1)
                weighed_hosts = new_weighed_hosts
                if not weighed_hosts:
                    return []

            index += 1

        return weighed_hosts
Пример #25
0
    def create(self, context, name, description, volume_id,
               container, incremental=False, availability_zone=None,
               force=False, snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.')
                       % {'vol1': volume_id,
                          'vol2': snapshot.volume_id})
                raise exception.InvalidVolume(reason=msg)
        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".')
                   % volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use '
                    'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".')
                   % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        host = self._get_available_backup_service_host(
            None, volume.availability_zone,
            volume_utils.extract_host(volume.host, 'host'))

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1,
                            'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(msg, {'s_pid': context.project_id,
                                      's_size': volume['size'],
                                      'd_consumed': _consumed(over),
                                      'd_quota': quotas[over]})
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {'s_pid': context.project_id,
                                      'd_consumed': _consumed(over)})
                    raise exception.BackupLimitExceeded(
                        allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = storage.BackupList.get_all_by_volume(context.elevated(),
                                                           volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp']
                                         < snapshot['created_at']))
                    else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = storage.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id,
                              {'status': 'backing-up',
                               'previous_status': previous_status})

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = storage.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Пример #26
0
    def _get_weighted_candidates_group(self, context, request_spec_list,
                                       filter_properties_list=None):
        """Finds hosts that supports the consistencygroup.

        Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        elevated = context.elevated()

        weighed_hosts = []
        index = 0
        for request_spec in request_spec_list:
            volume_properties = request_spec['volume_properties']
            # Since Cinder is using mixed filters from Oslo and it's own, which
            # takes 'resource_XX' and 'volume_XX' as input respectively,
            # copying 'volume_XX' to 'resource_XX' will make both filters
            # happy.
            resource_properties = volume_properties.copy()
            volume_type = request_spec.get("volume_type", None)
            resource_type = request_spec.get("volume_type", None)
            request_spec.update({'resource_properties': resource_properties})

            config_options = self._get_configuration_options()

            filter_properties = {}
            if filter_properties_list:
                filter_properties = filter_properties_list[index]
                if filter_properties is None:
                    filter_properties = {}
            self._populate_retry(filter_properties, resource_properties)

            # Add consistencygroup_support in extra_specs if it is not there.
            # Make sure it is populated in filter_properties
            if 'consistencygroup_support' not in resource_type.get(
                    'extra_specs', {}):
                resource_type['extra_specs'].update(
                    consistencygroup_support='<is> True')

            filter_properties.update({'context': context,
                                      'request_spec': request_spec,
                                      'config_options': config_options,
                                      'volume_type': volume_type,
                                      'resource_type': resource_type})

            self.populate_filter_properties(request_spec,
                                            filter_properties)

            # Find our local list of acceptable hosts by filtering and
            # weighing our options. we virtually consume resources on
            # it so subsequent selections can adjust accordingly.

            # Note: remember, we are using an iterator here. So only
            # traverse this list once.
            all_hosts = self.host_manager.get_all_host_states(elevated)
            if not all_hosts:
                return []

            # Filter local hosts based on requirements ...
            hosts = self.host_manager.get_filtered_hosts(all_hosts,
                                                         filter_properties)

            if not hosts:
                return []

            LOG.debug("Filtered %s", hosts)

            # weighted_host = WeightedHost() ... the best
            # host for the job.
            temp_weighed_hosts = self.host_manager.get_weighed_hosts(
                hosts,
                filter_properties)
            if not temp_weighed_hosts:
                return []
            if index == 0:
                weighed_hosts = temp_weighed_hosts
            else:
                new_weighed_hosts = []
                for host1 in weighed_hosts:
                    for host2 in temp_weighed_hosts:
                        # Should schedule creation of CG on backend level,
                        # not pool level.
                        if (utils.extract_host(host1.obj.host) ==
                                utils.extract_host(host2.obj.host)):
                            new_weighed_hosts.append(host1)
                weighed_hosts = new_weighed_hosts
                if not weighed_hosts:
                    return []

            index += 1

        return weighed_hosts
Пример #27
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".') % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        host = self._get_available_backup_service_host(
            None, volume.availability_zone,
            volume_utils.extract_host(volume.host, 'host'))

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = storage.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = storage.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id, {
            'status': 'backing-up',
            'previous_status': previous_status
        })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = storage.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup