Пример #1
0
 def _schedule(self, context, request_spec, filter_properties=None):
     weighed_backends = self._get_weighted_candidates(
         context, request_spec, filter_properties)
     # When we get the weighed_backends, we clear those backends that don't
     # match the resource's backend (it could be assigend from group,
     # snapshot or volume).
     resource_backend = request_spec.get('resource_backend')
     if weighed_backends and resource_backend:
         resource_backend_has_pool = bool(
             volume_utils.extract_host(resource_backend, 'pool'))
         # Get host name including host@backend#pool info from
         # weighed_backends.
         for backend in weighed_backends[::-1]:
             backend_id = (
                 backend.obj.backend_id if resource_backend_has_pool else
                 volume_utils.extract_host(backend.obj.backend_id))
             if backend_id != resource_backend:
                 weighed_backends.remove(backend)
     if not weighed_backends:
         LOG.warning(
             'No weighed backend found for volume '
             'with properties: %s',
             filter_properties['request_spec'].get('volume_type'))
         return None
     return self._choose_top_backend(weighed_backends, request_spec)
Пример #2
0
    def create_cloned_volume(self, volume, src_vref):
        """Clone a volume."""
        if src_vref.size > volume.size:
            msg = (_("create_cloned_volume: source volume %(src_vol)s "
                     "size is %(src_size)dGB and doesn't fit in target "
                     "volume %(tgt_vol)s of size %(tgt_size)dGB.") % {
                         'src_vol': src_vref.name,
                         'src_size': src_vref.size,
                         'tgt_vol': volume.name,
                         'tgt_size': volume.size
                     })
            LOG.error(msg)
            raise exception.InvalidInput(message=msg)
        dest_pool = volume_utils.extract_host(volume.host, level='pool')
        dest_vol_name = self._trans_name_down(volume.name)
        src_pool = volume_utils.extract_host(src_vref.host, level='pool')
        src_vol_name = self._trans_name_down(src_vref.name)

        method = 'block/lvm/clone'
        request_type = 'post'
        params = {
            'srcVolumeName': src_vol_name,
            'srcPoolName': src_pool,
            'destVolumeName': dest_vol_name,
            'destPoolName': dest_pool
        }
        self._rest.send_rest_api(method=method,
                                 params=params,
                                 request_type=request_type)

        if volume.size > src_vref.size:
            self.extend_volume(volume, volume.size)
Пример #3
0
    def backend_passes_filters(self, context, backend, request_spec,
                               filter_properties):
        """Check if the specified backend passes the filters."""
        weighed_backends = self._get_weighted_candidates(
            context, request_spec, filter_properties)
        # If backend has no pool defined we will ignore it in the comparison
        ignore_pool = not bool(volume_utils.extract_host(backend, 'pool'))
        for weighed_backend in weighed_backends:
            backend_id = weighed_backend.obj.backend_id
            if ignore_pool:
                backend_id = volume_utils.extract_host(backend_id)
            if backend_id == backend:
                return weighed_backend.obj

        reason_param = {
            'resource': 'volume',
            'id': '??id missing??',
            'backend': backend
        }
        for resource in ['volume', 'group', 'snapshot']:
            resource_id = request_spec.get('%s_id' % resource, None)
            if resource_id:
                reason_param.update({'resource': resource, 'id': resource_id})
                break
        raise exception.NoValidBackend(
            _('Cannot place %(resource)s %(id)s '
              'on %(backend)s.') % reason_param)
Пример #4
0
    def test_capacity_weight_no_unknown_or_infinite(self):
        self.flags(capacity_weight_multiplier=-1.0)
        del self.host_manager.service_states['host5']
        backend_info_list = self._get_all_backends()

        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=(1024-math.floor(1024*0.1))=-922
        #        Norm=-0.837837837838
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
        #        Norm=-1.0
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=(256-512*0)=-256
        #        Norm=-0.292383292383
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=(2048*1.0-2047-math.floor(2048*0.05))=101
        #        Norm=0.0

        # so, host4 should win:
        weighed_hosts = self._get_weighed_hosts(backend_info_list)
        best_host = weighed_hosts[0]
        self.assertEqual(0.0, best_host.weight)
        self.assertEqual('host4',
                         volume_utils.extract_host(best_host.obj.host))
        # and host2 is the worst:
        worst_host = weighed_hosts[-1]
        self.assertEqual(-1.0, worst_host.weight)
        self.assertEqual('host2',
                         volume_utils.extract_host(worst_host.obj.host))
Пример #5
0
    def find_retype_backend(self,
                            context,
                            request_spec,
                            filter_properties=None,
                            migration_policy='never'):
        """Find a backend that can accept the volume with its new type."""
        filter_properties = filter_properties or {}
        backend = (request_spec['volume_properties'].get('cluster_name')
                   or request_spec['volume_properties']['host'])

        # The volume already exists on this backend, and so we shouldn't check
        # if it can accept the volume again in the CapacityFilter.
        filter_properties['vol_exists_on'] = backend

        weighed_backends = self._get_weighted_candidates(
            context, request_spec, filter_properties)
        if not weighed_backends:
            raise exception.NoValidBackend(
                reason=_('No valid backends for volume %(id)s with type '
                         '%(type)s') % {
                             'id': request_spec['volume_id'],
                             'type': request_spec['volume_type']
                         })

        for weighed_backend in weighed_backends:
            backend_state = weighed_backend.obj
            if backend_state.backend_id == backend:
                return backend_state

        if volume_utils.extract_host(backend, 'pool') is None:
            # legacy volumes created before pool is introduced has no pool
            # info in host.  But host_state.host always include pool level
            # info. In this case if above exact match didn't work out, we
            # find host_state that are of the same host of volume being
            # retyped. In other words, for legacy volumes, retyping could
            # cause migration between pools on same host, which we consider
            # it is different from migration between hosts thus allow that
            # to happen even migration policy is 'never'.
            for weighed_backend in weighed_backends:
                backend_state = weighed_backend.obj
                new_backend = volume_utils.extract_host(
                    backend_state.backend_id, 'backend')
                if new_backend == backend:
                    return backend_state

        if migration_policy == 'never':
            raise exception.NoValidBackend(
                reason=_('Current backend not valid for volume %(id)s with '
                         'type %(type)s, migration not allowed') % {
                             'id': request_spec['volume_id'],
                             'type': request_spec['volume_type']
                         })

        top_backend = self._choose_top_backend(weighed_backends, request_spec)
        return top_backend.obj
Пример #6
0
 def _find_valid_backends(self, backend_list1, backend_list2):
     new_backends = []
     for backend1 in backend_list1:
         for backend2 in backend_list2:
             # Should schedule creation of group on backend level,
             # not pool level.
             if (volume_utils.extract_host(
                     backend1.obj.backend_id) == volume_utils.extract_host(
                         backend2.obj.backend_id)):
                 new_backends.append(backend1)
     if not new_backends:
         return []
     return new_backends
Пример #7
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Create a new volume base on a specific snapshot."""
        if snapshot.volume_size > volume.size:
            msg = (_("create_volume_from_snapshot: snapshot %(snapshot_name)s "
                     "size is %(snapshot_size)dGB and doesn't fit in target "
                     "volume %(volume_name)s of size %(volume_size)dGB.") % {
                         'snapshot_name': snapshot.name,
                         'snapshot_size': snapshot.volume_size,
                         'volume_name': volume.name,
                         'volume_size': volume.size
                     })
            LOG.error(msg)
            raise exception.InvalidInput(message=msg)
        src_vol_name = self._trans_name_down(snapshot.volume_name)
        source_vol = snapshot.volume
        src_pool = volume_utils.extract_host(source_vol['host'], level='pool')
        dest_name = self._trans_name_down(volume.name)
        dest_pool = volume_utils.extract_host(volume.host, level='pool')
        snap_name = self._trans_name_down(snapshot.name)

        # lock the snapshot before clone from it
        self._snapshot_lock_op('lock', src_vol_name, snap_name, src_pool)

        # do clone from snap to a volume
        method = 'snapshot/volume/cloneLvm'
        request_type = 'post'
        params = {
            'originalLvm': src_vol_name,
            'originalPool': src_pool,
            'originalSnap': snap_name,
            'name': dest_name,
            'pool': dest_pool
        }
        self._rest.send_rest_api(method=method,
                                 params=params,
                                 request_type=request_type)

        # do filling the cloned volume
        self._filling_volume(dest_name, dest_pool)

        # wait until the cloned volume has been filled
        self._wait_volume_filled(dest_name, dest_pool)

        # unlock the original snapshot
        self._snapshot_lock_op('unlock', src_vol_name, snap_name, src_pool)

        if volume.size > snapshot.volume_size:
            self.extend_volume(volume, volume.size)
Пример #8
0
    def _is_flexgroup(self, vol_id=None, host=None):
        """Discover if a volume is a FlexGroup or not"""
        if host is None:
            host = self._get_volume_host(vol_id)

        pool_name = volume_utils.extract_host(host, level='pool')
        return self.ssc_library.is_flexgroup(pool_name)
Пример #9
0
 def test_retype_policy_never_migrate_pass(self, _mock_service_get_topic):
     # Retype should pass if current host passes filters and
     # policy=never. host4 doesn't have enough space to hold an additional
     # 200GB, but it is already the host of this volume and should not be
     # counted twice.
     sched, ctx = self._backend_passes_filters_setup(
         _mock_service_get_topic)
     extra_specs = {'volume_backend_name': 'lvm4'}
     request_spec = {
         'volume_id': fake.VOLUME_ID,
         'volume_type': {
             'name': 'LVM_iSCSI',
             'extra_specs': extra_specs
         },
         'volume_properties': {
             'project_id': 1,
             'size': 200,
             'host': 'host4#lvm4'
         }
     }
     request_spec = objects.RequestSpec.from_primitives(request_spec)
     host_state = sched.find_retype_backend(ctx,
                                            request_spec,
                                            filter_properties={},
                                            migration_policy='never')
     self.assertEqual('host4', volume_utils.extract_host(host_state.host))
Пример #10
0
    def _get_pool_id(self, volume):
        pool_id_list = []
        pool_name = volume_utils.extract_host(volume.host, level='pool')
        all_pools = self.client.query_pool_info()
        for pool in all_pools:
            if pool_name == pool['poolName']:
                pool_id_list.append(pool['poolId'])
            if pool_name.isdigit() and int(pool_name) == int(pool['poolId']):
                pool_id_list.append(pool['poolId'])

        if not pool_id_list:
            msg = _('Storage pool %(pool)s does not exist on the array. '
                    'Please check.') % {
                        "pool": pool_id_list
                    }
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        # Prevent the name and id from being the same sign
        if len(pool_id_list) > 1:
            msg = _('Storage pool tag %(pool)s exists in multiple storage '
                    'pools %(pool_list). Please check.') % {
                        "pool": pool_name,
                        "pool_list": pool_id_list
                    }
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        return pool_id_list[0]
Пример #11
0
 def migrate_volume(self, ctxt, volume, host):
     LOG.debug('enter: migrate_volume id %(id)s, host %(host)s',
               {'id': volume['id'], 'host': host['host']})
     pool = volume_utils.extract_host(volume['host'], 'pool')
     if 'system_id' not in host['capabilities']:
         LOG.error('Target host has no system_id')
         return (False, None)
     if host['capabilities']['system_id'] != self._state['system_id']:
         LOG.info('The target host does not belong to the same '
                  'storage system as the current volume')
         return (False, None)
     if host['capabilities']['pool_name'] == pool:
         LOG.info('The target host belongs to the same storage system '
                  'and pool as the current volume.')
         return (True, None)
     LOG.info('The target host belongs to the same storage system '
              'as the current but to a different pool. '
              'The same storage system will clone volume into the new pool')
     volume_name = VOLUME_PREFIX + volume['id'][-12:]
     tmp_name = VOLUME_PREFIX + 'tmp'
     tmp_name += str(random.randint(0, 999999)).zfill(8)
     self._cmd.create_volume(tmp_name,
                             str(volume['size']),
                             host['capabilities']['pool_name'],
                             '10')
     self._local_clone_copy(
         volume_name, tmp_name, 'migrate_volume')
     self._cmd.delete_volume(volume_name)
     self._cmd.set_volume_property(tmp_name,
                                   {'type': '"RAID Volume"',
                                    'new_name': volume_name})
     return (True, None)
Пример #12
0
 def create_volume(self, volume):
     LOG.debug('create_volume, volume %s.', volume['id'])
     volume_name = VOLUME_PREFIX + volume['id'][-12:]
     pool_name = volume_utils.extract_host(volume['host'], 'pool')
     ret = self._cmd.create_volume(
         volume_name,
         str(volume['size']),
         pool_name)
     if ret['key'] == 310:
         msg = _('Volume: %s with same name '
                 'already exists on the system.') % volume_name
         raise exception.VolumeBackendAPIException(data=msg)
     elif ret['key'] == 102:
         allow_size = 0
         for p in self._stats['pools']:
             if p['pool_name'] == pool_name:
                 allow_size = p['free_capacity_gb']
                 break
         raise exception.VolumeSizeExceedsLimit(size=int(volume['size']),
                                                limit=allow_size)
     elif ret['key'] == 307:
         raise exception.VolumeLimitExceeded(allowed=96,
                                             name=volume_name)
     elif ret['key'] == 308:
         raise exception.VolumeLimitExceeded(allowed=4096,
                                             name=volume_name)
     model_update = None
     return model_update
    def _test_failover_model_updates(self, in_volumes, in_snapshots,
                                     driver_volumes, driver_result,
                                     out_volumes, out_snapshots,
                                     in_groups=None, out_groups=None,
                                     driver_group_result=None,
                                     secondary_id=None):
        host = volume_utils.extract_host(self.manager.host)
        utils.create_service(self.context, {'host': host,
                                            'binary': constants.VOLUME_BINARY})
        for volume in in_volumes:
            utils.create_volume(self.context, self.manager.host, **volume)

        for snapshot in in_snapshots:
            utils.create_snapshot(self.context, **snapshot)

        for group in in_groups:
            utils.create_group(self.context, self.manager.host, **group)

        with mock.patch.object(
                self.manager.driver, 'failover_host',
                return_value=(secondary_id, driver_result,
                              driver_group_result)) as driver_mock:
            self.manager.failover_host(self.context, secondary_id)

            self.assertSetEqual(driver_volumes,
                                {v.id for v in driver_mock.call_args[0][1]})

        self._check_failover_db(objects.VolumeList, out_volumes)
        self._check_failover_db(objects.SnapshotList, out_snapshots)
        self._check_failover_db(objects.GroupList, out_groups)
Пример #14
0
 def get_backup_host(self, volume, driver=None):
     if volume:
         volume_host = volume_utils.extract_host(volume.host, 'host')
     else:
         volume_host = None
     az = volume.availability_zone if volume else None
     return self._get_available_backup_service_host(volume_host, az, driver)
Пример #15
0
def mock_host_manager_db_calls(mock_obj, backends_with_pools=False,
                               disabled=None):
    service_states = (
        SERVICE_STATES_WITH_POOLS if backends_with_pools else SERVICE_STATES
    )
    services = []
    az_map = {
        'host1': 'zone1',
        'host2': 'zone1',
        'host3': 'zone2',
        'host4': 'zone3',
        'host5': 'zone3',
    }
    sid = 0
    for svc, state in service_states.items():
        sid += 1
        services.append(
            {
                'id': sid,
                'host': svc,
                'availability_zone': az_map[volume_utils.extract_host(svc,
                                                                      'host')],
                'topic': 'volume',
                'disabled': False,
                'updated_at': timeutils.utcnow(),
                'uuid': state.get('uuid', uuidutils.generate_uuid()),
            }
        )

    if disabled is None:
        mock_obj.return_value = services
    else:
        mock_obj.return_value = [service for service in services
                                 if service['disabled'] == disabled]
Пример #16
0
 def extend_volume(self, volume, new_size):
     volume_name = self._convert_name(volume.name)
     ret = self._cmd.extend_volume(volume_name, int(new_size))
     if ret['key'] == 303:
         raise exception.VolumeNotFound(volume_id=volume_name)
     elif ret['key'] == 321:
         msg = _('Volume capacity shall not be '
                 'less than the current size %sG.') % volume['size']
         raise exception.VolumeBackendAPIException(data=msg)
     elif ret['key'] == 102:
         pool_name = volume_utils.extract_host(volume['host'], 'pool')
         allow_size = 0
         for p in self._stats['pools']:
             if p['pool_name'] == pool_name:
                 allow_size = p['free_capacity_gb']
                 break
         raise exception.VolumeSizeExceedsLimit(size=int(new_size),
                                                limit=allow_size)
     elif ret['key'] != 0:
         msg = (_('Failed to extend_volume %(vol)s to size %(size)s, '
                  'code=%(ret)s, error=%(msg)s.') % {
                      'vol': volume_name,
                      'size': new_size,
                      'ret': ret['key'],
                      'msg': ret['msg']
                  })
         raise exception.VolumeBackendAPIException(data=msg)
Пример #17
0
    def test_default_of_spreading_first(self, volume_type, winner):
        backend_info_list = self._get_all_backends()

        # Results for the 1st test
        # {'provisioning:type': 'thin'}:
        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=1024-math.floor(1024*0.1)=922
        #        Norm=0.837837837838
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=2048*1.5-1748-math.floor(2048*0.1)=1120
        #        Norm=1.0
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=256-512*0=256
        #        Norm=0.292383292383
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=2048*1.0-2047-math.floor(2048*0.05)=-101
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-1
        #        Norm=0.0819000819001

        # so, host2 should win:
        weight_properties = {
            'size': 1,
            'volume_type': volume_type,
        }
        weighed_host = self._get_weighed_hosts(
            backend_info_list, weight_properties=weight_properties)[0]
        self.assertEqual(1.0, weighed_host.weight)
        self.assertEqual(winner,
                         volume_utils.extract_host(weighed_host.obj.host))
Пример #18
0
    def test_capacity_weight_multiplier2(self, volume_type, winner):
        self.flags(capacity_weight_multiplier=2.0)
        backend_info_list = self._get_all_backends()

        # Results for the 1st test
        # {'provisioning:type': 'thin'}:
        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=(1024-math.floor(1024*0.1))*2=1844
        #        Norm=1.67567567568
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
        #        Norm=2.0
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=(256-512*0)*2=512
        #        Norm=0.584766584767
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-2
        #        Norm=0.1638001638

        # so, host2 should win:
        weight_properties = {
            'size': 1,
            'volume_type': volume_type,
        }
        weighed_host = self._get_weighed_hosts(
            backend_info_list, weight_properties=weight_properties)[0]
        self.assertEqual(1.0 * 2, weighed_host.weight)
        self.assertEqual(winner,
                         volume_utils.extract_host(weighed_host.obj.host))
Пример #19
0
 def create_volume(self, volume):
     LOG.debug('create_volume, volume %s.', volume['id'])
     volume_name = self._convert_name(volume.name)
     pool_name = volume_utils.extract_host(volume['host'], 'pool')
     ret = self._cmd.create_volume(volume_name, str(volume['size']),
                                   pool_name)
     if ret['key'] == 310:
         msg = _('Volume: %s with same name '
                 'already exists on the system.') % volume_name
         raise exception.VolumeBackendAPIException(data=msg)
     elif ret['key'] == 102:
         allow_size = 0
         for p in self._stats['pools']:
             if p['pool_name'] == pool_name:
                 allow_size = p['free_capacity_gb']
                 break
         raise exception.VolumeSizeExceedsLimit(size=int(volume['size']),
                                                limit=allow_size)
     elif ret['key'] == 307:
         raise exception.VolumeLimitExceeded(allowed=96, name=volume_name)
     elif ret['key'] == 308:
         raise exception.VolumeLimitExceeded(allowed=4096, name=volume_name)
     elif ret['key'] != 0:
         msg = (_('Failed to create_volume %(vol)s on pool %(pool)s, '
                  'code=%(ret)s, error=%(msg)s.') % {
                      'vol': volume_name,
                      'pool': pool_name,
                      'ret': ret['key'],
                      'msg': ret['msg']
                  })
         raise exception.VolumeBackendAPIException(data=msg)
     return None
Пример #20
0
    def _get_destination_ip_and_path(self, volume):
        share = volume_utils.extract_host(volume['host'], level='pool')
        share_ip, share_path = na_utils.get_export_host_junction_path(share)
        dest_ip = self._get_ip_verify_on_cluster(share_ip)
        dest_path = os.path.join(share_path, volume['name'])

        return dest_ip, dest_path
Пример #21
0
    def test_capacity_weight_multiplier1(self, volume_type, winner):
        self.flags(capacity_weight_multiplier=-1.0)
        backend_info_list = self._get_all_backends()

        # Results for the 1st test
        # {'provisioning:type': 'thin'}:
        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=-(1024-math.floor(1024*0.1))=-922
        #        Norm=-0.00829542413701
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
        #        Norm=-0.00990099009901
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=-(256-512*0)=-256
        #        Norm=--0.002894884083
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=-(2048*1.0-2047-math.floor(2048*0.05))=101
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-float('inf')
        #        Norm=-1.0

        # so, host4 should win:
        weight_properties = {
            'size': 1,
            'volume_type': volume_type,
        }
        weighed_host = self._get_weighed_hosts(
            backend_info_list, weight_properties=weight_properties)
        weighed_host = weighed_host[0]
        self.assertEqual(0.0, weighed_host.weight)
        self.assertEqual(winner,
                         volume_utils.extract_host(weighed_host.obj.host))
Пример #22
0
    def create_snapshot(self, snapshot):
        """Create snapshot of volume in backend.

        The snapshot type of AS13000 is copy-on-write.
        """
        source_volume = snapshot.volume
        volume_name = self._trans_name_down(source_volume.name)
        if not self._check_volume(source_volume):
            msg = (_('create_snapshot: Source_volume %s does not exist.') %
                   volume_name)
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        pool = volume_utils.extract_host(source_volume.host, level='pool')
        snapshot_name = self._trans_name_down(snapshot.name)

        method = 'snapshot/volume'
        request_type = 'post'
        params = {
            'snapName': snapshot_name,
            'volumeName': volume_name,
            'poolName': pool,
            'snapType': 'r'
        }
        self._rest.send_rest_api(method=method,
                                 params=params,
                                 request_type=request_type)
Пример #23
0
 def fake_driver_create_grp(context, group):
     """Make sure that the pool is part of the host."""
     self.assertIn('host', group)
     host = group.host
     pool = volume_utils.extract_host(host, level='pool')
     self.assertEqual('fakepool', pool)
     return {'status': fields.GroupStatus.AVAILABLE}
Пример #24
0
 def create_volume(self, volume):
     appliance_name = volume_utils.extract_host(volume.host, "pool")
     appliance_id = self.appliances_to_ids_map[appliance_name]
     LOG.debug(
         "Create PowerStore volume %(volume_name)s of size "
         "%(volume_size)s GiB with id %(volume_id)s on appliance "
         "%(appliance_name)s.", {
             "volume_name": volume.name,
             "volume_size": volume.size,
             "volume_id": volume.id,
             "appliance_name": appliance_name,
         })
     size_in_bytes = utils.gib_to_bytes(volume.size)
     provider_id = self.client.create_volume(appliance_id, volume.name,
                                             size_in_bytes)
     LOG.debug(
         "Successfully created PowerStore volume %(volume_name)s of "
         "size %(volume_size)s GiB with id %(volume_id)s on "
         "appliance %(appliance_name)s. "
         "PowerStore volume id: %(volume_provider_id)s.", {
             "volume_name": volume.name,
             "volume_size": volume.size,
             "volume_id": volume.id,
             "appliance_name": appliance_name,
             "volume_provider_id": provider_id,
         })
     return {
         "provider_id": provider_id,
     }
Пример #25
0
    def _cast_create_volume(self, context: context.RequestContext,
                            request_spec: dict[str, Any],
                            filter_properties: dict) -> None:
        source_volid = request_spec['source_volid']
        volume = request_spec['volume']
        snapshot_id = request_spec['snapshot_id']
        image_id = request_spec['image_id']
        cgroup_id = request_spec['consistencygroup_id']
        group_id = request_spec['group_id']
        backup_id = request_spec['backup_id']
        if cgroup_id:
            # If cgroup_id existed, we should cast volume to the scheduler
            # to choose a proper pool whose backend is same as CG's backend.
            cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
            request_spec['resource_backend'] = volume_utils.extract_host(
                cgroup.resource_backend)
        elif group_id:
            # If group_id exists, we should cast volume to the scheduler
            # to choose a proper pool whose backend is same as group's backend.
            group = objects.Group.get_by_id(context, group_id)
            request_spec['resource_backend'] = volume_utils.extract_host(
                group.resource_backend)
        elif snapshot_id and CONF.snapshot_same_host:
            # NOTE(Rongze Zhu): A simple solution for bug 1008866.
            #
            # If snapshot_id is set and CONF.snapshot_same_host is True, make
            # the call create volume directly to the volume host where the
            # snapshot resides instead of passing it through the scheduler, so
            # snapshot can be copied to the new volume.
            # NOTE(tommylikehu): In order to check the backend's capacity
            # before creating volume, we schedule this request to scheduler
            # service with the desired backend information.
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            request_spec['resource_backend'] = snapshot.volume.resource_backend
        elif source_volid:
            source_volume_ref = objects.Volume.get_by_id(context, source_volid)
            request_spec['resource_backend'] = (
                source_volume_ref.resource_backend)

        self.scheduler_rpcapi.create_volume(
            context,
            volume,
            snapshot_id=snapshot_id,
            image_id=image_id,
            request_spec=request_spec,
            filter_properties=filter_properties,
            backup_id=backup_id)
Пример #26
0
    def create_volume(self, volume):
        """Driver entry point for creating a new volume (Data ONTAP LUN)."""

        LOG.debug('create_volume on %s', volume['host'])

        # get Data ONTAP volume name as pool name
        pool_name = volume_utils.extract_host(volume['host'], level='pool')

        if pool_name is None:
            msg = _("Pool is not available in the volume host field.")
            raise exception.InvalidHost(reason=msg)

        extra_specs = na_utils.get_volume_extra_specs(volume)

        lun_name = volume['name']

        size = int(volume['size']) * units.Gi

        metadata = {
            'OsType': self.lun_ostype,
            'SpaceReserved': self.lun_space_reservation,
            'Path': '/vol/%s/%s' % (pool_name, lun_name)
        }

        qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs)
        qos_policy_group_name = (na_utils.get_qos_policy_group_name_from_info(
            qos_policy_group_info))
        qos_policy_group_is_adaptive = volume_utils.is_boolean_str(
            extra_specs.get('netapp:qos_policy_group_is_adaptive'))

        try:
            self._create_lun(pool_name, lun_name, size, metadata,
                             qos_policy_group_name,
                             qos_policy_group_is_adaptive)
        except Exception:
            LOG.exception("Exception creating LUN %(name)s in pool %(pool)s.",
                          {
                              'name': lun_name,
                              'pool': pool_name
                          })
            self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
            msg = _("Volume %s could not be created.")
            raise exception.VolumeBackendAPIException(data=msg %
                                                      (volume['name']))
        LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s', {
            'name': lun_name,
            'qos': qos_policy_group_info
        })

        metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name)
        metadata['Volume'] = pool_name
        metadata['Qtree'] = None

        handle = self._create_lun_handle(metadata)
        self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))

        model_update = self._get_volume_model_update(volume)

        return model_update
Пример #27
0
 def create_volume_from_snapshot(self, volume, snapshot):
     snapshot_name = self._convert_name(snapshot.name)
     volume_name = self._convert_name(volume.name)
     source_volume = self._convert_name(snapshot.volume_name)
     pool = volume_utils.extract_host(volume['host'], 'pool')
     self._cmd.create_volume(volume_name, str(volume['size']), pool, '10')
     self._local_clone_copy(source_volume, volume_name,
                            'create_volume_from_snapshot', snapshot_name)
Пример #28
0
    def _get_cctxt(self, host=None, version=None, **kwargs):
        if host:
            server = volume_utils.extract_host(host)

            # TODO(dulek): If we're pinned before 3.6, we should send stuff the
            # old way - addressing server=host@backend, topic=cinder-volume.
            # Otherwise we're addressing server=host,
            # topic=cinder-volume.host@backend. This conditional can go away
            # when we stop supporting 3.x.
            if self.client.can_send_version('3.6'):
                kwargs['topic'] = '%(topic)s.%(host)s' % {
                    'topic': self.TOPIC,
                    'host': server
                }
                server = volume_utils.extract_host(server, 'host')
            kwargs['server'] = server

        return super(VolumeAPI, self)._get_cctxt(version=version, **kwargs)
Пример #29
0
    def test_capacity_weight_cap_infinite(self):
        self.flags(capacity_weight_multiplier=-1.0)
        self.host_manager.service_states['host5'] = {
            'total_capacity_gb': 'infinite',
            'free_capacity_gb': 3000,
            'allocated_capacity_gb': 1548,
            'provisioned_capacity_gb': 1548,
            'max_over_subscription_ratio': 1.0,
            'thin_provisioning_support': True,
            'thick_provisioning_support': False,
            'reserved_percentage': 5,
            'timestamp': datetime.utcnow()
        }
        backend_info_list = self._get_all_backends()

        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=(1024-math.floor(1024*0.1))=-922
        #        Norm= -0.00829542413701
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
        #        Norm=-0.00990099009901
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=(256-512*0)=-256
        #        Norm=-0.002894884083
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=(2048*1.0-2047-math.floor(2048*0.05))=101
        #        Norm=0.0
        # host5: free_capacity_gb=3000 free=infinite
        #        Norm=-1.0

        # so, host4 should win:
        weighed_hosts = self._get_weighed_hosts(backend_info_list)
        best_host = weighed_hosts[0]
        self.assertEqual(0.0, best_host.weight)
        self.assertEqual('host4',
                         volume_utils.extract_host(best_host.obj.host))
        # and host5 is the worst:
        worst_host = weighed_hosts[-1]
        self.assertEqual(-1.0, worst_host.weight)
        self.assertEqual('host5',
                         volume_utils.extract_host(worst_host.obj.host))
Пример #30
0
    def _add_lun_to_target(self, target_name, volume):
        """Add volume to target."""
        pool = volume_utils.extract_host(volume.host, level='pool')
        volume_name = self._trans_name_down(volume.name)

        method = 'block/lun'
        request_type = 'post'
        params = {'name': target_name, 'pool': pool, 'lvm': volume_name}
        self._rest.send_rest_api(method=method,
                                 params=params,
                                 request_type=request_type)