def test_capacity_weight_no_unknown_or_infinite(self):
        self.flags(capacity_weight_multiplier=-1.0)
        del self.host_manager.service_states['host5']
        backend_info_list = self._get_all_backends()

        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=(1024-math.floor(1024*0.1))=-922
        #        Norm=-0.837837837838
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=(2048*1.5-1748-math.floor(2048*0.1))=-1120
        #        Norm=-1.0
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=(256-512*0)=-256
        #        Norm=-0.292383292383
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=(2048*1.0-2047-math.floor(2048*0.05))=101
        #        Norm=0.0

        # so, host4 should win:
        weighed_hosts = self._get_weighed_hosts(backend_info_list)
        best_host = weighed_hosts[0]
        self.assertEqual(0.0, best_host.weight)
        self.assertEqual('host4', utils.extract_host(best_host.obj.host))
        # and host2 is the worst:
        worst_host = weighed_hosts[-1]
        self.assertEqual(-1.0, worst_host.weight)
        self.assertEqual('host2', utils.extract_host(worst_host.obj.host))
예제 #2
0
    def create_cloned_volume(self, volume, src_vref):
        """Clone a volume."""
        if src_vref.size > volume.size:
            msg = (_("create_cloned_volume: source volume %(src_vol)s "
                     "size is %(src_size)dGB and doesn't fit in target "
                     "volume %(tgt_vol)s of size %(tgt_size)dGB.") %
                   {'src_vol': src_vref.name,
                    'src_size': src_vref.size,
                    'tgt_vol': volume.name,
                    'tgt_size': volume.size})
            LOG.error(msg)
            raise exception.InvalidInput(message=msg)
        dest_pool = volume_utils.extract_host(volume.host, level='pool')
        dest_vol_name = self._trans_name_down(volume.name)
        src_pool = volume_utils.extract_host(src_vref.host, level='pool')
        src_vol_name = self._trans_name_down(src_vref.name)

        method = 'block/lvm/clone'
        request_type = 'post'
        params = {'srcVolumeName': src_vol_name,
                  'srcPoolName': src_pool,
                  'destVolumeName': dest_vol_name,
                  'destPoolName': dest_pool}
        self._rest.send_rest_api(method=method,
                                 params=params,
                                 request_type=request_type)

        if volume.size > src_vref.size:
            self.extend_volume(volume, volume.size)
예제 #3
0
 def _schedule(self, context, request_spec, filter_properties=None):
     weighed_backends = self._get_weighted_candidates(context, request_spec,
                                                      filter_properties)
     # When we get the weighed_backends, we clear those backends that don't
     # match the resource's backend (it could be assigend from group,
     # snapshot or volume).
     resource_backend = request_spec.get('resource_backend')
     if weighed_backends and resource_backend:
         resource_backend_has_pool = bool(utils.extract_host(
             resource_backend, 'pool'))
         # Get host name including host@backend#pool info from
         # weighed_backends.
         for backend in weighed_backends[::-1]:
             backend_id = (
                 backend.obj.backend_id if resource_backend_has_pool
                 else utils.extract_host(backend.obj.backend_id)
             )
             if backend_id != resource_backend:
                 weighed_backends.remove(backend)
     if not weighed_backends:
         LOG.warning('No weighed backend found for volume '
                     'with properties: %s',
                     filter_properties['request_spec'].get('volume_type'))
         return None
     return self._choose_top_backend(weighed_backends, request_spec)
예제 #4
0
    def _cast_create_volume(self, context, request_spec, filter_properties):
        source_volume_ref = None
        source_volid = (request_spec['source_volid'] or
                        request_spec['source_replicaid'])
        volume = request_spec['volume']
        snapshot_id = request_spec['snapshot_id']
        image_id = request_spec['image_id']
        cgroup_id = request_spec['consistencygroup_id']
        cgsnapshot_id = request_spec['cgsnapshot_id']
        group_id = request_spec['group_id']
        if cgroup_id:
            # If cgroup_id existed, we should cast volume to the scheduler
            # to choose a proper pool whose backend is same as CG's backend.
            cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
            request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
        elif group_id:
            # If group_id exists, we should cast volume to the scheduler
            # to choose a proper pool whose backend is same as group's backend.
            group = objects.Group.get_by_id(context, group_id)
            # FIXME(wanghao): group_backend got added before request_spec was
            # converted to versioned objects. We should make sure that this
            # will be handled by object version translations once we add
            # RequestSpec object.
            request_spec['group_backend'] = vol_utils.extract_host(group.host)
        elif snapshot_id and CONF.snapshot_same_host:
            # NOTE(Rongze Zhu): A simple solution for bug 1008866.
            #
            # If snapshot_id is set and CONF.snapshot_same_host is True, make
            # the call create volume directly to the volume host where the
            # snapshot resides instead of passing it through the scheduler, so
            # snapshot can be copied to the new volume.
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            source_volume_ref = snapshot.volume
        elif source_volid:
            source_volume_ref = objects.Volume.get_by_id(context, source_volid)

        if not source_volume_ref:
            # Cast to the scheduler and let it handle whatever is needed
            # to select the target host for this volume.
            self.scheduler_rpcapi.create_volume(
                context,
                volume,
                snapshot_id=snapshot_id,
                image_id=image_id,
                request_spec=request_spec,
                filter_properties=filter_properties)
        else:
            # Bypass the scheduler and send the request directly to the volume
            # manager.
            volume.host = source_volume_ref.host
            volume.cluster_name = source_volume_ref.cluster_name
            volume.scheduled_at = timeutils.utcnow()
            volume.save()
            if not cgsnapshot_id:
                self.volume_rpcapi.create_volume(
                    context,
                    volume,
                    request_spec,
                    filter_properties,
                    allow_reschedule=False)
예제 #5
0
 def _find_valid_backends(self, backend_list1, backend_list2):
     new_backends = []
     for backend1 in backend_list1:
         for backend2 in backend_list2:
             # Should schedule creation of group on backend level,
             # not pool level.
             if (utils.extract_host(backend1.obj.backend_id) ==
                     utils.extract_host(backend2.obj.backend_id)):
                 new_backends.append(backend1)
     if not new_backends:
         return []
     return new_backends
예제 #6
0
 def _find_valid_hosts(self, host_list1, host_list2):
     new_hosts = []
     for host1 in host_list1:
         for host2 in host_list2:
             # Should schedule creation of group on backend level,
             # not pool level.
             if (utils.extract_host(host1.obj.host) ==
                     utils.extract_host(host2.obj.host)):
                 new_hosts.append(host1)
     if not new_hosts:
         return []
     return new_hosts
예제 #7
0
    def find_retype_backend(self, context, request_spec,
                            filter_properties=None, migration_policy='never'):
        """Find a backend that can accept the volume with its new type."""
        filter_properties = filter_properties or {}
        backend = (request_spec['volume_properties'].get('cluster_name')
                   or request_spec['volume_properties']['host'])

        # The volume already exists on this backend, and so we shouldn't check
        # if it can accept the volume again in the CapacityFilter.
        filter_properties['vol_exists_on'] = backend

        weighed_backends = self._get_weighted_candidates(context, request_spec,
                                                         filter_properties)
        if not weighed_backends:
            raise exception.NoValidBackend(
                reason=_('No valid backends for volume %(id)s with type '
                         '%(type)s') % {'id': request_spec['volume_id'],
                                        'type': request_spec['volume_type']})

        for weighed_backend in weighed_backends:
            backend_state = weighed_backend.obj
            if backend_state.backend_id == backend:
                return backend_state

        if utils.extract_host(backend, 'pool') is None:
            # legacy volumes created before pool is introduced has no pool
            # info in host.  But host_state.host always include pool level
            # info. In this case if above exact match didn't work out, we
            # find host_state that are of the same host of volume being
            # retyped. In other words, for legacy volumes, retyping could
            # cause migration between pools on same host, which we consider
            # it is different from migration between hosts thus allow that
            # to happen even migration policy is 'never'.
            for weighed_backend in weighed_backends:
                backend_state = weighed_backend.obj
                new_backend = utils.extract_host(backend_state.backend_id,
                                                 'backend')
                if new_backend == backend:
                    return backend_state

        if migration_policy == 'never':
            raise exception.NoValidBackend(
                reason=_('Current backend not valid for volume %(id)s with '
                         'type %(type)s, migration not allowed') %
                {'id': request_spec['volume_id'],
                 'type': request_spec['volume_type']})

        top_backend = self._choose_top_backend(weighed_backends, request_spec)
        return top_backend.obj
예제 #8
0
파일: api.py 프로젝트: muraliran/cinder
    def _get_available_backup_service_host(self, host, az, volume_host=None):
        """Return an appropriate backup service host."""

        # FIXME(dulek): We need to keep compatibility with Liberty, where c-bak
        # were coupled with c-vol. If we're running in mixed Liberty-Mitaka
        # environment we will be scheduling backup jobs the old way.
        #
        # This snippet should go away in Newton. Note that volume_host
        # parameter will also be unnecessary then.
        if not self._is_scalable_only():
            if volume_host:
                volume_host = volume_utils.extract_host(volume_host,
                                                        level='host')
            if volume_host and self._is_backup_service_enabled(az,
                                                               volume_host):
                return volume_host
            elif host and self._is_backup_service_enabled(az, host):
                return host
            else:
                raise exception.ServiceNotFound(service_id='cinder-backup')

        backup_host = None
        if (not host or not CONF.backup_use_same_host):
            backup_host = self._get_any_available_backup_service(az)
        elif self._is_backup_service_enabled(az, host):
            backup_host = host
        if not backup_host:
            raise exception.ServiceNotFound(service_id='cinder-backup')
        return backup_host
예제 #9
0
 def update_migrated_volume(self, ctxt, volume, new_volume):
     host = utils.extract_host(new_volume['host'])
     cctxt = self.client.prepare(server=host, version='1.19')
     cctxt.call(ctxt,
                'update_migrated_volume',
                volume=volume,
                new_volume=new_volume)
예제 #10
0
파일: rpcapi.py 프로젝트: nitti/cinder
 def create_consistencygroup_from_src(self, ctxt, group, host,
                                      cgsnapshot=None):
     new_host = utils.extract_host(host)
     cctxt = self.client.prepare(server=new_host, version='1.22')
     cctxt.cast(ctxt, 'create_consistencygroup_from_src',
                group_id=group['id'],
                cgsnapshot_id=cgsnapshot['id'])
예제 #11
0
    def _check_pool_and_fs(self, volume, fs_label):
        """Validation of the pool and filesystem.

        Checks if the file system for the volume-type chosen matches the
        one passed in the volume reference. Also, checks if the pool
        for the volume type matches the pool for the host passed.

        :param volume: Reference to the volume.
        :param fs_label: Label of the file system.
        """
        pool_from_vol_type = self.get_pool(volume)

        pool_from_host = utils.extract_host(volume['host'], level='pool')

        if self.config['services'][pool_from_vol_type]['hdp'] != fs_label:
            msg = (_("Failed to manage existing volume because the pool of "
                     "the volume type chosen does not match the file system "
                     "passed in the volume reference."),
                   {'File System passed': fs_label,
                    'File System for volume type':
                        self.config['services'][pool_from_vol_type]['hdp']})
            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)

        if pool_from_host != pool_from_vol_type:
            msg = (_("Failed to manage existing volume because the pool of "
                     "the volume type chosen does not match the pool of "
                     "the host."),
                   {'Pool of the volume type': pool_from_vol_type,
                    'Pool of the host': pool_from_host})
            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
    def test_capacity_weight_multiplier1(self, volume_type, winner):
        self.flags(capacity_weight_multiplier=-1.0)
        backend_info_list = self._get_all_backends()

        # Results for the 1st test
        # {'provisioning:type': 'thin'}:
        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=-(1024-math.floor(1024*0.1))=-922
        #        Norm=-0.00829542413701
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
        #        Norm=-0.00990099009901
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=-(256-512*0)=-256
        #        Norm=--0.002894884083
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=-(2048*1.0-2047-math.floor(2048*0.05))=101
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-float('inf')
        #        Norm=-1.0

        # so, host4 should win:
        weight_properties = {
            'size': 1,
            'volume_type': volume_type,
        }
        weighed_host = self._get_weighed_hosts(
            backend_info_list,
            weight_properties=weight_properties)[0]
        self.assertEqual(0.0, weighed_host.weight)
        self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
예제 #13
0
 def migrate_volume_completion(self, ctxt, volume, new_volume, error):
     new_host = utils.extract_host(volume['host'])
     cctxt = self.client.prepare(server=new_host, version='1.10')
     return cctxt.call(ctxt, 'migrate_volume_completion',
                       volume_id=volume['id'],
                       new_volume_id=new_volume['id'],
                       error=error)
    def test_capacity_weight_multiplier2(self, volume_type, winner):
        self.flags(capacity_weight_multiplier=2.0)
        backend_info_list = self._get_all_backends()

        # Results for the 1st test
        # {'provisioning:type': 'thin'}:
        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=(1024-math.floor(1024*0.1))*2=1844
        #        Norm=1.67567567568
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
        #        Norm=2.0
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=(256-512*0)*2=512
        #        Norm=0.584766584767
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-2
        #        Norm=0.1638001638

        # so, host2 should win:
        weight_properties = {
            'size': 1,
            'volume_type': volume_type,
        }
        weighed_host = self._get_weighed_hosts(
            backend_info_list,
            weight_properties=weight_properties)[0]
        self.assertEqual(1.0 * 2, weighed_host.weight)
        self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
예제 #15
0
 def fake_driver_create_cg(context, group):
     """Make sure that the pool is part of the host."""
     self.assertIn('host', group)
     host = group.host
     pool = volutils.extract_host(host, level='pool')
     self.assertEqual('fakepool', pool)
     return {'status': 'available'}
예제 #16
0
파일: tegile.py 프로젝트: NetApp/cinder
    def create_volume(self, volume):
        pool = volume_utils.extract_host(volume['host'], level='pool',
                                         default_pool_name=self._default_pool)
        tegile_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE,
                         'datasetPath': '%s/%s/%s' %
                                        (pool,
                                         TEGILE_LOCAL_CONTAINER_NAME,
                                         self._default_project),
                         'local': 'true',
                         'name': volume['name'],
                         'poolName': '%s' % pool,
                         'projectName': '%s' % self._default_project,
                         'protocol': self._protocol,
                         'thinProvision': 'true',
                         'volSize': volume['size'] * units.Gi}
        params = list()
        params.append(tegile_volume)
        params.append(True)

        self._api_executor.send_api_request(method='createVolume',
                                            params=params)

        LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."),
                 {'volname': volume['name'], 'volid': volume['id']})

        return self.get_additional_info(volume, pool, self._default_project)
    def test_default_of_spreading_first(self, volume_type, winner):
        backend_info_list = self._get_all_backends()

        # Results for the 1st test
        # {'provisioning:type': 'thin'}:
        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=1024-math.floor(1024*0.1)=922
        #        Norm=0.837837837838
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=2048*1.5-1748-math.floor(2048*0.1)=1120
        #        Norm=1.0
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=256-512*0=256
        #        Norm=0.292383292383
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=2048*1.0-2047-math.floor(2048*0.05)=-101
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-1
        #        Norm=0.0819000819001

        # so, host2 should win:
        weight_properties = {
            'size': 1,
            'volume_type': volume_type,
        }
        weighed_host = self._get_weighed_hosts(
            backend_info_list,
            weight_properties=weight_properties)[0]
        self.assertEqual(1.0, weighed_host.weight)
        self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
예제 #18
0
파일: rpcapi.py 프로젝트: j-griffith/cinder
    def _get_cctxt(self, host=None, version=None, **kwargs):
        if host:
            server = utils.extract_host(host)

            # TODO(dulek): If we're pinned before 3.6, we should send stuff the
            # old way - addressing server=host@backend, topic=cinder-volume.
            # Otherwise we're addressing server=host,
            # topic=cinder-volume.host@backend. This conditional can go away
            # when we stop supporting 3.x.
            if self.client.can_send_version('3.6'):
                kwargs['topic'] = '%(topic)s.%(host)s' % {'topic': self.TOPIC,
                                                          'host': server}
                server = utils.extract_host(server, 'host')
            kwargs['server'] = server

        return super(VolumeAPI, self)._get_cctxt(version=version, **kwargs)
예제 #19
0
    def test_capacity_weight_multiplier2(self):
        self.flags(capacity_weight_multiplier=2.0)
        hostinfo_list = self._get_all_hosts()

        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=(1024-math.floor(1024*0.1))*2=1844
        #        Norm=1.67567567568
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
        #        Norm=2.0
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=(256-512*0)*2=512
        #        Norm=0.584766584767
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-2
        #        Norm=0.1638001638

        # so, host2 should win:
        weighed_host = self._get_weighed_host(hostinfo_list)
        self.assertEqual(weighed_host.weight, 1.0 * 2)
        self.assertEqual(
            utils.extract_host(weighed_host.obj.host), 'host2')
예제 #20
0
    def create_cgsnapshot(self, cgsnapshot, snapshots):
        """Creates a Cinder cgsnapshot object.

        The Cinder cgsnapshot object is created by making use of an
        ephemeral ONTAP CG in order to provide write-order consistency for a
        set of flexvol snapshots. First, a list of the flexvols backing the
        given Cinder CG must be gathered. An ONTAP cg-snapshot of these
        flexvols will create a snapshot copy of all the Cinder volumes in the
        CG group. For each Cinder volume in the CG, it is then necessary to
        clone its backing LUN from the ONTAP cg-snapshot. The naming convention
        used for the clones is what indicates the clone's role as a Cinder
        snapshot and its inclusion in a Cinder CG. The ONTAP CG-snapshot of
        the flexvols is no longer required after having cloned the LUNs
        backing the Cinder volumes in the Cinder CG.

        :return: An implicit update for cgsnapshot and snapshots models that
        is interpreted by the manager to set their models to available.
        """
        flexvols = set()
        for snapshot in snapshots:
            flexvols.add(volume_utils.extract_host(snapshot['volume']['host'],
                                                   level='pool'))

        self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id'])

        for snapshot in snapshots:
            self._clone_lun(snapshot['volume']['name'], snapshot['name'],
                            source_snapshot=cgsnapshot['id'])

        for flexvol in flexvols:
            self._handle_busy_snapshot(flexvol, cgsnapshot['id'])
            self.zapi_client.delete_snapshot(flexvol, cgsnapshot['id'])

        return None, None
예제 #21
0
파일: hnas_iscsi.py 프로젝트: NetApp/cinder
    def _check_pool_and_fs(self, volume, fs_label):
        """Validates pool and file system of a volume being managed.

        Checks if the file system for the volume-type chosen matches the
        one passed in the volume reference. Also, checks if the pool
        for the volume type matches the pool for the host passed.

        :param volume: Reference to the volume.
        :param fs_label: Label of the file system.
        :raises: ManageExistingVolumeTypeMismatch
        """
        pool_from_vol_type = hnas_utils.get_pool(self.config, volume)

        pool_from_host = utils.extract_host(volume.host, level='pool')
        pool = self.config['services'][pool_from_vol_type]['hdp']
        if pool != fs_label:
            msg = (_("Failed to manage existing volume because the "
                     "pool %(pool)s of the volume type chosen does not "
                     "match the file system %(fs_label)s passed in the "
                     "volume reference.")
                   % {'pool': pool, 'fs_label': fs_label})
            LOG.error(msg)
            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)

        if pool_from_host != pool_from_vol_type:
            msg = (_("Failed to manage existing volume because the pool "
                     "%(pool)s of the volume type chosen does not match the "
                     "pool %(pool_host)s of the host.") %
                   {'pool': pool_from_vol_type, 'pool_host': pool_from_host})
            LOG.error(msg)
            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
예제 #22
0
    def test_capacity_weight_multiplier1(self):
        self.flags(capacity_weight_multiplier=-1.0)
        hostinfo_list = self._get_all_hosts()

        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=-(1024-math.floor(1024*0.1))=-922
        #        Norm=-0.00829542413701
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
        #        Norm=-0.00990099009901
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=-(256-512*0)=-256
        #        Norm=--0.002894884083
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=-(2048*1.0-2047-math.floor(2048*0.05))=101
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-float('inf')
        #        Norm=-1.0

        # so, host4 should win:
        weighed_host = self._get_weighed_host(hostinfo_list)
        self.assertEqual(weighed_host.weight, 0.0)
        self.assertEqual(
            utils.extract_host(weighed_host.obj.host), 'host4')
예제 #23
0
    def test_default_of_spreading_first(self):
        hostinfo_list = self._get_all_hosts()

        # host1: thin_provisioning_support = False
        #        free_capacity_gb=1024,
        #        free=1024-math.floor(1024*0.1)=922
        #        Norm=0.837837837838
        # host2: thin_provisioning_support = True
        #        free_capacity_gb=300,
        #        free=2048*1.5-1748-math.floor(2048*0.1)=1120
        #        Norm=1.0
        # host3: thin_provisioning_support = False
        #        free_capacity_gb=512, free=256-512*0=256
        #        Norm=0.292383292383
        # host4: thin_provisioning_support = True
        #        free_capacity_gb=200,
        #        free=2048*1.0-2047-math.floor(2048*0.05)=-101
        #        Norm=0.0
        # host5: free_capacity_gb=unknown free=-1
        #        Norm=0.0819000819001

        # so, host2 should win:
        weighed_host = self._get_weighed_host(hostinfo_list)
        self.assertEqual(weighed_host.weight, 1.0)
        self.assertEqual(
            utils.extract_host(weighed_host.obj.host), 'host2')
예제 #24
0
    def _get_service(self, volume):
        """Get service parameters.

        Get the available service parameters for a given volume using
        its type.

        :param volume: dictionary volume reference
        """

        LOG.debug("_get_service: volume: %s", volume)
        label = utils.extract_host(volume['host'], level='pool')

        if label in self.config['services'].keys():
            svc = self.config['services'][label]
            LOG.info(_LI("Get service: %(lbl)s->%(svc)s"),
                     {'lbl': label, 'svc': svc['fslabel']})
            service = (svc['hdp'], svc['path'], svc['fslabel'])
        else:
            LOG.info(_LI("Available services: %s"),
                     self.config['services'].keys())
            LOG.error(_LE("No configuration found for service: %s"),
                      label)
            raise exception.ParameterNotFound(param=label)

        return service
예제 #25
0
파일: nfs_base.py 프로젝트: C2python/cinder
    def create_volume(self, volume):
        """Creates a volume.

        :param volume: volume reference
        """
        LOG.debug('create_volume on %s', volume['host'])
        self._ensure_shares_mounted()

        # get share as pool name
        pool_name = volume_utils.extract_host(volume['host'], level='pool')

        if pool_name is None:
            msg = _("Pool is not available in the volume host field.")
            raise exception.InvalidHost(reason=msg)

        extra_specs = na_utils.get_volume_extra_specs(volume)

        try:
            volume['provider_location'] = pool_name
            LOG.debug('Using pool %s.', pool_name)
            self._do_create_volume(volume)
            self._do_qos_for_volume(volume, extra_specs)
            return {'provider_location': volume['provider_location']}
        except Exception:
            LOG.exception(_LE("Exception creating vol %(name)s on "
                          "pool %(pool)s."),
                          {'name': volume['name'],
                           'pool': volume['provider_location']})
            # We need to set this for the model update in order for the
            # manager to behave correctly.
            volume['provider_location'] = None

        msg = _("Volume %(vol)s could not be created in pool %(pool)s.")
        raise exception.VolumeBackendAPIException(data=msg % {
            'vol': volume['name'], 'pool': pool_name})
예제 #26
0
    def _get_service(self, volume):
        """Get service parameters.

        Get the available service parameters for a given volume using
        its type.

        :param volume: dictionary volume reference
        :returns: Tuple containing the service parameters (label,
        export path and export file system) or error if no configuration is
        found.
        :raises: ParameterNotFound
        """
        LOG.debug("_get_service: volume: %(vol)s", {'vol': volume})
        label = utils.extract_host(volume.host, level='pool')

        if label in self.config['services'].keys():
            svc = self.config['services'][label]
            LOG.info(_LI("_get_service: %(lbl)s->%(svc)s"),
                     {'lbl': label, 'svc': svc['export']['fs']})
            service = (svc['hdp'], svc['export']['path'], svc['export']['fs'])
        else:
            LOG.info(_LI("Available services: %(svc)s"),
                     {'svc': self.config['services'].keys()})
            LOG.error(_LE("No configuration found for service: %(lbl)s"),
                      {'lbl': label})
            raise exception.ParameterNotFound(param=label)

        return service
예제 #27
0
    def _check_pool_and_share(self, volume, nfs_share):
        """Validates the pool and the NFS share.

        Checks if the NFS share for the volume-type chosen matches the
        one passed in the volume reference. Also, checks if the pool
        for the volume type matches the pool for the host passed.

        :param volume:    cinder volume reference
        :param nfs_share: NFS share passed to manage
        """
        pool_from_vol_type = self.get_pool(volume)

        pool_from_host = utils.extract_host(volume['host'], level='pool')

        if self.config['services'][pool_from_vol_type]['hdp'] != nfs_share:
            msg = (_("Failed to manage existing volume because the pool of "
                   "the volume type chosen does not match the NFS share "
                     "passed in the volume reference."),
                   {'Share passed': nfs_share,
                    'Share for volume type':
                    self.config['services'][pool_from_vol_type]['hdp']})
            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)

        if pool_from_host != pool_from_vol_type:
            msg = (_("Failed to manage existing volume because the pool of "
                     "the volume type chosen does not match the pool of "
                     "the host."),
                   {'Pool of the volume type': pool_from_vol_type,
                   'Pool of the host': pool_from_host})
            raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
예제 #28
0
    def create_cgsnapshot(self, ctxt, group, cgsnapshot):

        host = utils.extract_host(group['host'])
        cctxt = self.client.prepare(server=host, version='1.18')
        cctxt.cast(ctxt, 'create_cgsnapshot',
                   group_id=group['id'],
                   cgsnapshot_id=cgsnapshot['id'])
예제 #29
0
파일: nbd.py 프로젝트: Nexenta/cinder
 def _get_nbd_number(self, volume):
     host = volutils.extract_host(volume['host'], 'host')
     nbds = self._get_nbd_devices(host)
     for dev in nbds:
         if dev['objectPath'] == self.bucket_path + '/' + volume['name']:
             return dev['number']
     return -1
예제 #30
0
 def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
     new_host = utils.extract_host(volume['host'])
     cctxt = self.client.prepare(server=new_host, version='1.8')
     host_p = {'host': dest_host.host,
               'capabilities': dest_host.capabilities}
     cctxt.cast(ctxt, 'migrate_volume', volume_id=volume['id'],
                host=host_p, force_host_copy=force_host_copy)
예제 #31
0
    def failover_host(self, context, volumes, secondary_id=None, groups=None):
        """Failover to replication target."""
        volume_updates = []
        back_end_ip = None
        svc_host = vol_utils.extract_host(self.host, 'backend')
        service = objects.Service.get_by_args(context, svc_host,
                                              'cinder-volume')

        if secondary_id and secondary_id != self.replica.backend_id:
            LOG.error("Kaminario driver received failover_host "
                      "request, But backend is non replicated device")
            raise exception.UnableToFailOver(reason=_("Failover requested "
                                                      "on non replicated "
                                                      "backend."))

        if (service.active_backend_id and
                service.active_backend_id != self.configuration.san_ip):
            self.snap_updates = []
            rep_volumes = []
            # update status for non-replicated primary volumes
            for v in volumes:
                vol_name = self.get_volume_name(v['id'])
                vol = self.client.search("volumes", name=vol_name)
                if v.replication_status != K2_REP_FAILED_OVER and vol.total:
                    status = 'available'
                    if v.volume_attachment:
                        map_rs = self.client.search("mappings",
                                                    volume=vol.hits[0])
                        status = 'in-use'
                        if map_rs.total:
                            map_rs.hits[0].delete()
                    volume_updates.append({'volume_id': v['id'],
                                           'updates':
                                           {'status': status}})
                else:
                    rep_volumes.append(v)

            # In-sync from secondaray array to primary array
            for v in rep_volumes:
                vol_name = self.get_volume_name(v['id'])
                vol = self.client.search("volumes", name=vol_name)
                rvol_name = self.get_rep_name(vol_name)
                rvol = self.target.search("volumes", name=rvol_name)
                session_name = self.get_session_name(v['id'])
                rsession_name = self.get_rep_name(session_name)
                ssn = self.target.search("replication/sessions",
                                         name=rsession_name)
                if ssn.total:
                    tgt_ssn = ssn.hits[0]
                ssn = self.client.search("replication/sessions",
                                         name=session_name)
                if ssn.total:
                    src_ssn = ssn.hits[0]

                if (tgt_ssn.state == 'failed_over' and
                   tgt_ssn.current_role == 'target' and vol.total and src_ssn):
                    map_rs = self.client.search("mappings", volume=vol.hits[0])
                    if map_rs.total:
                        map_rs.hits[0].delete()
                    tgt_ssn.state = 'in_sync'
                    tgt_ssn.save()
                    self._check_for_status(src_ssn, 'in_sync')
                if (rvol.total and src_ssn.state == 'in_sync' and
                   src_ssn.current_role == 'target'):
                    gen_no = self._create_volume_replica_user_snap(self.target,
                                                                   tgt_ssn)
                    self.snap_updates.append({'tgt_ssn': tgt_ssn,
                                              'gno': gen_no,
                                              'stime': time.time()})
                LOG.debug("The target session: %s state is "
                          "changed to in sync", rsession_name)

            self._is_user_snap_sync_finished()

            # Delete secondary volume mappings and create snapshot
            for v in rep_volumes:
                vol_name = self.get_volume_name(v['id'])
                vol = self.client.search("volumes", name=vol_name)
                rvol_name = self.get_rep_name(vol_name)
                rvol = self.target.search("volumes", name=rvol_name)
                session_name = self.get_session_name(v['id'])
                rsession_name = self.get_rep_name(session_name)
                ssn = self.target.search("replication/sessions",
                                         name=rsession_name)
                if ssn.total:
                    tgt_ssn = ssn.hits[0]
                ssn = self.client.search("replication/sessions",
                                         name=session_name)
                if ssn.total:
                    src_ssn = ssn.hits[0]
                if (rvol.total and src_ssn.state == 'in_sync' and
                   src_ssn.current_role == 'target'):
                    map_rs = self.target.search("mappings",
                                                volume=rvol.hits[0])
                    if map_rs.total:
                        map_rs.hits[0].delete()
                    gen_no = self._create_volume_replica_user_snap(self.target,
                                                                   tgt_ssn)
                    self.snap_updates.append({'tgt_ssn': tgt_ssn,
                                              'gno': gen_no,
                                              'stime': time.time()})
            self._is_user_snap_sync_finished()
            # changing source sessions to failed-over
            for v in rep_volumes:
                vol_name = self.get_volume_name(v['id'])
                vol = self.client.search("volumes", name=vol_name)
                rvol_name = self.get_rep_name(vol_name)
                rvol = self.target.search("volumes", name=rvol_name)
                session_name = self.get_session_name(v['id'])
                rsession_name = self.get_rep_name(session_name)
                ssn = self.target.search("replication/sessions",
                                         name=rsession_name)
                if ssn.total:
                    tgt_ssn = ssn.hits[0]
                ssn = self.client.search("replication/sessions",
                                         name=session_name)
                if ssn.total:
                    src_ssn = ssn.hits[0]
                if (rvol.total and src_ssn.state == 'in_sync' and
                   src_ssn.current_role == 'target'):
                    src_ssn.state = 'failed_over'
                    src_ssn.save()
                    self._check_for_status(tgt_ssn, 'suspended')
                    LOG.debug("The target session: %s state is "
                              "changed to failed over", session_name)

                    src_ssn.state = 'in_sync'
                    src_ssn.save()
                    LOG.debug("The target session: %s state is "
                              "changed to in sync", session_name)
                    rep_status = fields.ReplicationStatus.DISABLED
                    volume_updates.append({'volume_id': v['id'],
                                           'updates':
                                          {'replication_status': rep_status}})

            back_end_ip = self.configuration.san_ip
        else:
            """Failover to replication target."""
            for v in volumes:
                vol_name = self.get_volume_name(v['id'])
                rv = self.get_rep_name(vol_name)
                if self.target.search("volumes", name=rv).total:
                    self._failover_volume(v)
                    volume_updates.append(
                        {'volume_id': v['id'],
                         'updates':
                         {'replication_status': K2_REP_FAILED_OVER}})
                else:
                    volume_updates.append({'volume_id': v['id'],
                                           'updates': {'status': 'error', }})
            back_end_ip = self.replica.backend_id
        return back_end_ip, volume_updates, []
예제 #32
0
 def delete_cgsnapshot(self, ctxt, cgsnapshot, host):
     new_host = utils.extract_host(host)
     cctxt = self.client.prepare(server=new_host, version='1.18')
     cctxt.cast(ctxt, 'delete_cgsnapshot', cgsnapshot_id=cgsnapshot['id'])
예제 #33
0
 def delete_consistencygroup(self, ctxt, group):
     host = utils.extract_host(group['host'])
     cctxt = self.client.prepare(server=host, version='1.18')
     cctxt.cast(ctxt, 'delete_consistencygroup', group_id=group['id'])
예제 #34
0
 def create_consistencygroup(self, ctxt, group, host):
     new_host = utils.extract_host(host)
     cctxt = self.client.prepare(server=new_host, version='1.18')
     cctxt.cast(ctxt, 'create_consistencygroup', group_id=group['id'])
예제 #35
0
 def reenable_replication(self, ctxt, volume):
     new_host = utils.extract_host(volume['host'])
     cctxt = self.client.prepare(server=new_host, version='1.17')
     cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id'])
예제 #36
0
    def restore(self, context, backup_id, volume_id=None, name=None):
        """Make the RPC call to restore a volume backup."""
        check_policy(context, 'restore')
        backup = self.get(context, backup_id)
        if backup['status'] != 'available':
            msg = _('Backup status must be available')
            raise exception.InvalidBackup(reason=msg)

        size = backup['size']
        if size is None:
            msg = _('Backup to be restored has invalid size')
            raise exception.InvalidBackup(reason=msg)

        # Create a volume if none specified. If a volume is specified check
        # it is large enough for the backup
        if volume_id is None:
            if name is None:
                name = 'restore_backup_%s' % backup_id

            description = 'auto-created_from_restore_from_backup'

            LOG.info(_LI("Creating volume of %(size)s GB for restore of "
                         "backup %(backup_id)s."),
                     {'size': size, 'backup_id': backup_id},
                     context=context)
            volume = self.volume_api.create(context, size, name, description)
            volume_id = volume['id']

            while True:
                volume = self.volume_api.get(context, volume_id)
                if volume['status'] != 'creating':
                    break
                greenthread.sleep(1)
        else:
            volume = self.volume_api.get(context, volume_id)

        if volume['status'] != "available":
            msg = _('Volume to be restored to must be available')
            raise exception.InvalidVolume(reason=msg)

        LOG.debug('Checking backup size %(bs)s against volume size %(vs)s',
                  {'bs': size, 'vs': volume['size']})
        if size > volume['size']:
            msg = (_('volume size %(volume_size)d is too small to restore '
                     'backup of size %(size)d.') %
                   {'volume_size': volume['size'], 'size': size})
            raise exception.InvalidVolume(reason=msg)

        LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
                     "backup %(backup_id)s"),
                 {'volume_id': volume_id, 'backup_id': backup_id},
                 context=context)

        # Setting the status here rather than setting at start and unrolling
        # for each error condition, it should be a very small window
        backup.status = 'restoring'
        backup.save()
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        self.db.volume_update(context, volume_id, {'status':
                                                   'restoring-backup'})

        self.backup_rpcapi.restore_backup(context, volume_host, backup,
                                          volume_id)

        d = {'backup_id': backup_id,
             'volume_id': volume_id, }

        return d
예제 #37
0
파일: api.py 프로젝트: whitepages/cinder
    def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
        add_volumes_new = ""
        for volume in volumes:
            if volume['id'] in add_volumes_list:
                # Volume already in CG. Remove from add_volumes.
                add_volumes_list.remove(volume['id'])

        for add_vol in add_volumes_list:
            try:
                add_vol_ref = self.db.volume_get(context, add_vol)
            except exception.VolumeNotFound:
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because volume cannot be "
                         "found.") % {
                             'volume_id': add_vol,
                             'group_id': group.id
                         })
                raise exception.InvalidVolume(reason=msg)
            orig_group = add_vol_ref.get('consistencygroup_id', None)
            if orig_group:
                # If volume to be added is already in the group to be updated,
                # it should have been removed from the add_volumes_list in the
                # beginning of this function. If we are here, it means it is
                # in a different group.
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because it is already in "
                         "consistency group %(orig_group)s.") % {
                             'volume_id': add_vol_ref['id'],
                             'group_id': group.id,
                             'orig_group': orig_group
                         })
                raise exception.InvalidVolume(reason=msg)
            if add_vol_ref:
                add_vol_type_id = add_vol_ref.get('volume_type_id', None)
                if not add_vol_type_id:
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because it has no volume "
                             "type.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id
                             })
                    raise exception.InvalidVolume(reason=msg)
                if add_vol_type_id not in group.volume_type_id:
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because volume type "
                             "%(volume_type)s is not supported by the "
                             "group.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id,
                                 'volume_type': add_vol_type_id
                             })
                    raise exception.InvalidVolume(reason=msg)
                if (add_vol_ref['status']
                        not in VALID_REMOVE_VOL_FROM_CG_STATUS):
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because volume is in an "
                             "invalid state: %(status)s. Valid states are: "
                             "%(valid)s.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id,
                                 'status': add_vol_ref['status'],
                                 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS
                             })
                    raise exception.InvalidVolume(reason=msg)

                # group.host and add_vol_ref['host'] are in this format:
                # 'host@backend#pool'. Extract host (host@backend) before
                # doing comparison.
                vol_host = vol_utils.extract_host(add_vol_ref['host'])
                group_host = vol_utils.extract_host(group.host)
                if group_host != vol_host:
                    raise exception.InvalidVolume(
                        reason=_("Volume is not local to this node."))

                # Volume exists. It will be added to CG.
                if add_volumes_new:
                    add_volumes_new += ","
                add_volumes_new += add_vol_ref['id']

            else:
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because volume does not exist.") %
                       {
                           'volume_id': add_vol_ref['id'],
                           'group_id': group.id
                       })
                raise exception.InvalidVolume(reason=msg)

        return add_volumes_new
예제 #38
0
파일: api.py 프로젝트: zhanghaijie01/cinder
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None,
               metadata=None):
        """Make the RPC call to create a volume backup."""
        volume = self.volume_api.get(context, volume_id)
        context.authorize(policy.CREATE_POLICY, target_obj=volume)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
            if snapshot['status'] not in ["available"]:
                msg = (_('Snapshot to be backed up must be available, '
                         'but the current status is "%s".') %
                       snapshot['status'])
                raise exception.InvalidSnapshot(reason=msg)
        elif volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume.host, 'host')
        availability_zone = availability_zone or volume.availability_zone
        host = self._get_available_backup_service_host(volume_host,
                                                       availability_zone)

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            quota_utils.process_reserve_over_quota(context,
                                                   e,
                                                   resource='backups',
                                                   size=volume.size)
        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at
            self.db.snapshot_update(
                context, snapshot_id,
                {'status': fields.SnapshotStatus.BACKING_UP})
        else:
            self.db.volume_update(context, volume_id, {
                'status': 'backing-up',
                'previous_status': previous_status
            })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'availability_zone': availability_zone,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
                'metadata': metadata or {}
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
예제 #39
0
    def _test_group_api(self, method, rpc_method, **kwargs):
        ctxt = context.RequestContext('fake_user', 'fake_project')

        if 'rpcapi_class' in kwargs:
            rpcapi_class = kwargs['rpcapi_class']
            del kwargs['rpcapi_class']
        else:
            rpcapi_class = volume_rpcapi.VolumeAPI
        rpcapi = rpcapi_class()
        expected_retval = 'foo' if method == 'call' else None

        target = {"version": kwargs.pop('version', rpcapi.RPC_API_VERSION)}

        if 'request_spec' in kwargs:
            spec = jsonutils.to_primitive(kwargs['request_spec'])
            kwargs['request_spec'] = spec

        expected_msg = copy.deepcopy(kwargs)
        if 'host' in expected_msg:
            del expected_msg['host']
        if 'group_snapshot' in expected_msg:
            group_snapshot = expected_msg['group_snapshot']
            if group_snapshot:
                group_snapshot.group
                kwargs['group_snapshot'].group

        if 'host' in kwargs:
            host = kwargs['host']
        elif 'group' in kwargs:
            host = kwargs['group'].service_topic_queue
        elif 'group_snapshot' in kwargs:
            host = kwargs['group_snapshot'].service_topic_queue

        target['server'] = utils.extract_host(host, 'host')
        target['topic'] = '%s.%s' % (constants.VOLUME_TOPIC,
                                     utils.extract_host(host))

        self.fake_args = None
        self.fake_kwargs = None

        def _fake_prepare_method(*args, **kwds):
            for kwd in kwds:
                self.assertEqual(kwds[kwd], target[kwd])
            return rpcapi.client

        def _fake_rpc_method(*args, **kwargs):
            self.fake_args = args
            self.fake_kwargs = kwargs
            if expected_retval:
                return expected_retval

        self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method)
        self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method)

        retval = getattr(rpcapi, method)(ctxt, **kwargs)
        self.assertEqual(expected_retval, retval)
        expected_args = [ctxt, method]

        for arg, expected_arg in zip(self.fake_args, expected_args):
            self.assertEqual(expected_arg, arg)

        for kwarg, value in self.fake_kwargs.items():
            if isinstance(value, objects.Group):
                expected_group = expected_msg[kwarg].obj_to_primitive()
                group = value.obj_to_primitive()
                self.assertEqual(expected_group, group)
            elif isinstance(value, objects.GroupSnapshot):
                expected_grp_snap = expected_msg[kwarg].obj_to_primitive()
                grp_snap = value.obj_to_primitive()
                self.assertEqual(expected_grp_snap, grp_snap)
            else:
                self.assertEqual(expected_msg[kwarg], value)
    def _test_volume_api(self, method, rpc_method, **kwargs):
        ctxt = context.RequestContext('fake_user', 'fake_project')

        if 'rpcapi_class' in kwargs:
            rpcapi_class = kwargs['rpcapi_class']
            del kwargs['rpcapi_class']
        else:
            rpcapi_class = volume_rpcapi.VolumeAPI
        rpcapi = rpcapi_class()
        expected_retval = 'foo' if method == 'call' else None

        target = {
            "version": kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
        }

        if 'request_spec' in kwargs:
            spec = jsonutils.to_primitive(kwargs['request_spec'])
            kwargs['request_spec'] = spec

        expected_msg = copy.deepcopy(kwargs)
        if 'volume' in expected_msg:
            volume = expected_msg['volume']
            del expected_msg['volume']
            expected_msg['volume_id'] = volume['id']
        if 'snapshot' in expected_msg:
            snapshot = expected_msg['snapshot']
            del expected_msg['snapshot']
            expected_msg['snapshot_id'] = snapshot['id']
            expected_msg['snapshot'] = snapshot
        if 'host' in expected_msg:
            del expected_msg['host']
        if 'dest_host' in expected_msg:
            dest_host = expected_msg['dest_host']
            dest_host_dict = {'host': dest_host.host,
                              'capabilities': dest_host.capabilities}
            del expected_msg['dest_host']
            expected_msg['host'] = dest_host_dict
        if 'new_volume' in expected_msg:
            volume = expected_msg['new_volume']
            del expected_msg['new_volume']
            expected_msg['new_volume_id'] = volume['id']

        if 'cgsnapshot' in expected_msg:
            cgsnapshot = expected_msg['cgsnapshot']
            if cgsnapshot:
                del expected_msg['cgsnapshot']
                expected_msg['cgsnapshot_id'] = cgsnapshot['id']
            else:
                expected_msg['cgsnapshot_id'] = None

        if 'host' in kwargs:
            host = kwargs['host']
        elif 'group' in kwargs:
            host = kwargs['group']['host']
        elif 'volume' not in kwargs and 'snapshot' in kwargs:
            host = 'fake_host'
        else:
            host = kwargs['volume']['host']

        target['server'] = utils.extract_host(host)
        target['topic'] = '%s.%s' % (CONF.volume_topic, host)

        self.fake_args = None
        self.fake_kwargs = None

        def _fake_prepare_method(*args, **kwds):
            for kwd in kwds:
                self.assertEqual(kwds[kwd], target[kwd])
            return rpcapi.client

        def _fake_rpc_method(*args, **kwargs):
            self.fake_args = args
            self.fake_kwargs = kwargs
            if expected_retval:
                return expected_retval

        self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method)
        self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method)

        retval = getattr(rpcapi, method)(ctxt, **kwargs)

        self.assertEqual(expected_retval, retval)
        expected_args = [ctxt, method]

        for arg, expected_arg in zip(self.fake_args, expected_args):
            self.assertEqual(expected_arg, arg)

        for kwarg, value in self.fake_kwargs.items():
            if isinstance(value, objects.Snapshot):
                expected_snapshot = expected_msg[kwarg].obj_to_primitive()
                snapshot = value.obj_to_primitive()
                self.assertEqual(expected_snapshot, snapshot)
            elif isinstance(value, objects.ConsistencyGroup):
                expected_cg = expected_msg[kwarg].obj_to_primitive()
                cg = value.obj_to_primitive()
                self.assertEqual(expected_cg, cg)
            else:
                self.assertEqual(expected_msg[kwarg], value)
예제 #41
0
 def test_get_volume_rpc_host(self):
     host = 'Host@backend'
     # default level is 'backend'
     # check if host with backend is returned
     self.assertEqual(volume_utils.extract_host(host),
                      volume_utils.get_volume_rpc_host(host))
예제 #42
0
    def _get_service(self, volume):
        """Get available service parameters.

        Get the available service parameters for a given volume using its type.
        :param volume: dictionary volume reference
        """

        label = utils.extract_host(volume['host'], level='pool')
        LOG.info(_LI("Using service label: %s"), label)

        if label in self.config['services'].keys():
            svc = self.config['services'][label]
            # HNAS - one time lookup
            # see if the client supports CHAP authentication and if
            # iscsi_secret has already been set, retrieve the secret if
            # available, otherwise generate and store
            if self.config['chap_enabled'] == 'True':
                # it may not exist, create and set secret
                if 'iscsi_secret' not in svc:
                    LOG.info(_LI("Retrieving secret for service: %s"), label)

                    out = self.bend.get_targetsecret(self.config['hnas_cmd'],
                                                     self.config['mgmt_ip0'],
                                                     self.config['username'],
                                                     self.config['password'],
                                                     'cinder-' + label,
                                                     svc['hdp'])
                    svc['iscsi_secret'] = out
                    if svc['iscsi_secret'] == "":
                        svc['iscsi_secret'] = utils.generate_password()[0:15]
                        self.bend.set_targetsecret(self.config['hnas_cmd'],
                                                   self.config['mgmt_ip0'],
                                                   self.config['username'],
                                                   self.config['password'],
                                                   'cinder-' + label,
                                                   svc['hdp'],
                                                   svc['iscsi_secret'])

                        LOG.info(_LI("Set tgt CHAP secret for service: %s"),
                                 label)
            else:
                # We set blank password when the client does not
                # support CHAP. Later on, if the client tries to create a new
                # target that does not exists in the backend, we check for this
                # value and use a temporary dummy password.
                if 'iscsi_secret' not in svc:
                    # Warns in the first time
                    LOG.info(_LI("CHAP authentication disabled"))

                svc['iscsi_secret'] = ""

            if 'iscsi_target' not in svc:
                LOG.info(_LI("Retrieving target for service: %s"), label)

                out = self.bend.get_targetiqn(self.config['hnas_cmd'],
                                              self.config['mgmt_ip0'],
                                              self.config['username'],
                                              self.config['password'],
                                              'cinder-' + label,
                                              svc['hdp'],
                                              svc['iscsi_secret'])
                svc['iscsi_target'] = out

            self.config['services'][label] = svc

            service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
                       svc['port'], svc['hdp'], svc['iscsi_target'],
                       svc['iscsi_secret'])
        else:
            LOG.info(_LI("Available services: %s"),
                     self.config['services'].keys())
            LOG.error(_LE("No configuration found for service: %s"), label)
            raise exception.ParameterNotFound(param=label)

        return service
    def test_extract_host(self):
        host = 'Host'
        # default level is 'backend'
        self.assertEqual(host, volume_utils.extract_host(host))
        self.assertEqual(host, volume_utils.extract_host(host, 'host'))
        self.assertEqual(host, volume_utils.extract_host(host, 'backend'))
        # default_pool_name doesn't work for level other than 'pool'
        self.assertEqual(host, volume_utils.extract_host(host, 'host', True))
        self.assertEqual(host, volume_utils.extract_host(host, 'host', False))
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'backend', True))
        self.assertEqual(host,
                         volume_utils.extract_host(host, 'backend', False))
        self.assertEqual(None, volume_utils.extract_host(host, 'pool'))
        self.assertEqual('_pool0',
                         volume_utils.extract_host(host, 'pool', True))

        host = 'Host@Backend'
        self.assertEqual('Host@Backend', volume_utils.extract_host(host))
        self.assertEqual('Host', volume_utils.extract_host(host, 'host'))
        self.assertEqual(host, volume_utils.extract_host(host, 'backend'))
        self.assertEqual(None, volume_utils.extract_host(host, 'pool'))
        self.assertEqual('_pool0',
                         volume_utils.extract_host(host, 'pool', True))

        host = 'Host@Backend#Pool'
        pool = 'Pool'
        self.assertEqual('Host@Backend', volume_utils.extract_host(host))
        self.assertEqual('Host', volume_utils.extract_host(host, 'host'))
        self.assertEqual('Host@Backend',
                         volume_utils.extract_host(host, 'backend'))
        self.assertEqual(pool, volume_utils.extract_host(host, 'pool'))
        self.assertEqual(pool, volume_utils.extract_host(host, 'pool', True))

        host = 'Host#Pool'
        self.assertEqual('Host', volume_utils.extract_host(host))
        self.assertEqual('Host', volume_utils.extract_host(host, 'host'))
        self.assertEqual('Host', volume_utils.extract_host(host, 'backend'))
        self.assertEqual(pool, volume_utils.extract_host(host, 'pool'))
        self.assertEqual(pool, volume_utils.extract_host(host, 'pool', True))
예제 #44
0
    def test_extract_host(self):
        host = 'Host'
        # default level is 'backend'
        self.assertEqual(volume_utils.extract_host(host), 'Host')
        self.assertEqual(volume_utils.extract_host(host, 'host'), 'Host')
        self.assertEqual(volume_utils.extract_host(host, 'backend'), 'Host')
        # default_pool_name doesn't work for level other than 'pool'
        self.assertEqual(volume_utils.extract_host(host, 'host', True), 'Host')
        self.assertEqual(volume_utils.extract_host(host, 'host', False),
                         'Host')
        self.assertEqual(volume_utils.extract_host(host, 'backend', True),
                         'Host')
        self.assertEqual(volume_utils.extract_host(host, 'backend', False),
                         'Host')
        self.assertEqual(volume_utils.extract_host(host, 'pool'), None)
        self.assertEqual(volume_utils.extract_host(host, 'pool', True),
                         '_pool0')

        host = 'Host@Backend'
        self.assertEqual(volume_utils.extract_host(host), 'Host@Backend')
        self.assertEqual(volume_utils.extract_host(host, 'host'), 'Host')
        self.assertEqual(volume_utils.extract_host(host, 'backend'),
                         'Host@Backend')
        self.assertEqual(volume_utils.extract_host(host, 'pool'), None)
        self.assertEqual(volume_utils.extract_host(host, 'pool', True),
                         '_pool0')

        host = 'Host@Backend#Pool'
        self.assertEqual(volume_utils.extract_host(host), 'Host@Backend')
        self.assertEqual(volume_utils.extract_host(host, 'host'), 'Host')
        self.assertEqual(volume_utils.extract_host(host, 'backend'),
                         'Host@Backend')
        self.assertEqual(volume_utils.extract_host(host, 'pool'), 'Pool')
        self.assertEqual(volume_utils.extract_host(host, 'pool', True), 'Pool')

        host = 'Host#Pool'
        self.assertEqual(volume_utils.extract_host(host), 'Host')
        self.assertEqual(volume_utils.extract_host(host, 'host'), 'Host')
        self.assertEqual(volume_utils.extract_host(host, 'backend'), 'Host')
        self.assertEqual(volume_utils.extract_host(host, 'pool'), 'Pool')
        self.assertEqual(volume_utils.extract_host(host, 'pool', True), 'Pool')
예제 #45
0
    def _cast_create_volume(self, context, request_spec, filter_properties):
        source_volid = request_spec['source_volid']
        source_replicaid = request_spec['source_replicaid']
        volume_id = request_spec['volume_id']
        volume = request_spec['volume']
        snapshot_id = request_spec['snapshot_id']
        image_id = request_spec['image_id']
        cgroup_id = request_spec['consistencygroup_id']
        host = None
        cgsnapshot_id = request_spec['cgsnapshot_id']

        if cgroup_id:
            # If cgroup_id existed, we should cast volume to the scheduler
            # to choose a proper pool whose backend is same as CG's backend.
            cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
            # FIXME(wanghao): CG_backend got added before request_spec was
            # converted to versioned objects. We should make sure that this
            # will be handled by object version translations once we add
            # RequestSpec object.
            request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
        elif snapshot_id and CONF.snapshot_same_host:
            # NOTE(Rongze Zhu): A simple solution for bug 1008866.
            #
            # If snapshot_id is set and CONF.snapshot_same_host is True, make
            # the call create volume directly to the volume host where the
            # snapshot resides instead of passing it through the scheduler, so
            # snapshot can be copied to the new volume.
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            source_volume_ref = objects.Volume.get_by_id(
                context, snapshot.volume_id)
            host = source_volume_ref.host
        elif source_volid:
            source_volume_ref = objects.Volume.get_by_id(context, source_volid)
            host = source_volume_ref.host
        elif source_replicaid:
            source_volume_ref = objects.Volume.get_by_id(
                context, source_replicaid)
            host = source_volume_ref.host

        if not host:
            # Cast to the scheduler and let it handle whatever is needed
            # to select the target host for this volume.
            self.scheduler_rpcapi.create_volume(
                context,
                CONF.volume_topic,
                volume_id,
                snapshot_id=snapshot_id,
                image_id=image_id,
                request_spec=request_spec,
                filter_properties=filter_properties,
                volume=volume)
        else:
            # Bypass the scheduler and send the request directly to the volume
            # manager.
            volume.host = host
            volume.scheduled_at = timeutils.utcnow()
            volume.save()
            if not cgsnapshot_id:
                self.volume_rpcapi.create_volume(context,
                                                 volume,
                                                 volume.host,
                                                 request_spec,
                                                 filter_properties,
                                                 allow_reschedule=False)
예제 #46
0
    def _get_service_target(self, volume):
        """Get the available service parameters

           Get the available service parameters for a given volume using
           its type.
           :param volume: dictionary volume reference
        """

        hdp = self._get_service(volume)
        info = _loc_info(volume['provider_location'])
        (arid, lun_name) = info['id_lu']

        evsid = self.bend.get_evs(self.config['hnas_cmd'],
                                  self.config['mgmt_ip0'],
                                  self.config['username'],
                                  self.config['password'], hdp)
        svc_label = utils.extract_host(volume['host'], level='pool')
        svc = self.config['services'][svc_label]

        LOG.info(_LI("_get_service_target hdp: %s."), hdp)
        LOG.info(_LI("config[services]: %s."), self.config['services'])

        mapped, lunid, tgt = self.bend.check_lu(self.config['hnas_cmd'],
                                                self.config['mgmt_ip0'],
                                                self.config['username'],
                                                self.config['password'],
                                                lun_name, hdp)

        LOG.info(_LI("Target is %(map)s! Targetlist = %(tgtl)s."), {
            'map': "mapped" if mapped else "not mapped",
            'tgtl': tgt
        })

        # The volume is already mapped to a LUN, so no need to create any
        # targets
        if mapped:
            service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
                       svc['port'], hdp, tgt['alias'], tgt['secret'])
            return service

        # Each EVS can have up to 32 targets. Each target can have up to 32
        # LUNs attached and have the name format 'evs<id>-tgt<0-N>'. We run
        # from the first 'evs1-tgt0' until we find a target that is not already
        # created in the BE or is created but have slots to place new targets.
        found_tgt = False
        for i in range(0, MAX_HNAS_ISCSI_TARGETS):
            tgt_alias = 'evs' + evsid + '-tgt' + six.text_type(i)
            # TODO(erlon): we need to go to the BE 32 times here
            tgt_exist, tgt = self.bend.check_target(self.config['hnas_cmd'],
                                                    self.config['mgmt_ip0'],
                                                    self.config['username'],
                                                    self.config['password'],
                                                    hdp, tgt_alias)
            if tgt_exist and len(tgt['luns']) < 32 or not tgt_exist:
                # Target exists and has free space or, target does not exist
                # yet. Proceed and use the target or create a target using this
                # name.
                found_tgt = True
                break

        # If we've got here and found_tgt is not True, we run out of targets,
        # raise and go away.
        if not found_tgt:
            LOG.error(_LE("No more targets avaliable."))
            raise exception.NoMoreTargets(param=tgt_alias)

        LOG.info(_LI("Using target label: %s."), tgt_alias)

        # Check if we have a secret stored for this target so we don't have to
        # go to BE on every query
        if 'targets' not in self.config.keys():
            self.config['targets'] = {}

        if tgt_alias not in self.config['targets'].keys():
            self.config['targets'][tgt_alias] = {}

        tgt_info = self.config['targets'][tgt_alias]

        # HNAS - one time lookup
        # see if the client supports CHAP authentication and if
        # iscsi_secret has already been set, retrieve the secret if
        # available, otherwise generate and store
        if self.config['chap_enabled'] == 'True':
            # It may not exist, create and set secret.
            if 'iscsi_secret' not in tgt_info.keys():
                LOG.info(_LI("Retrieving secret for service: %s."), tgt_alias)

                out = self.bend.get_targetsecret(self.config['hnas_cmd'],
                                                 self.config['mgmt_ip0'],
                                                 self.config['username'],
                                                 self.config['password'],
                                                 tgt_alias, hdp)
                tgt_info['iscsi_secret'] = out
                if tgt_info['iscsi_secret'] == "":
                    randon_secret = utils.generate_password()[0:15]
                    tgt_info['iscsi_secret'] = randon_secret
                    self.bend.set_targetsecret(self.config['hnas_cmd'],
                                               self.config['mgmt_ip0'],
                                               self.config['username'],
                                               self.config['password'],
                                               tgt_alias, hdp,
                                               tgt_info['iscsi_secret'])

                    LOG.info(_LI("Set tgt CHAP secret for service: %s."),
                             tgt_alias)
        else:
            # We set blank password when the client does not
            # support CHAP. Later on, if the client tries to create a new
            # target that does not exists in the backend, we check for this
            # value and use a temporary dummy password.
            if 'iscsi_secret' not in tgt_info.keys():
                # Warns in the first time
                LOG.info(_LI("CHAP authentication disabled."))

            tgt_info['iscsi_secret'] = ""

        if 'tgt_iqn' not in tgt_info:
            LOG.info(_LI("Retrieving target for service: %s."), tgt_alias)

            out = self.bend.get_targetiqn(self.config['hnas_cmd'],
                                          self.config['mgmt_ip0'],
                                          self.config['username'],
                                          self.config['password'], tgt_alias,
                                          hdp, tgt_info['iscsi_secret'])
            tgt_info['tgt_iqn'] = out

        self.config['targets'][tgt_alias] = tgt_info

        service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'], svc['port'],
                   hdp, tgt_alias, tgt_info['iscsi_secret'])

        return service
예제 #47
0
파일: api.py 프로젝트: abusse/cinder
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               availability_zone=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        if volume['status'] != "available":
            msg = _('Volume to be backed up must be available')
            raise exception.InvalidVolume(reason=msg)
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume, volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # do quota reserver before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warn(
                        msg % {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warn(msg % {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        self.db.volume_update(context, volume_id, {'status': 'backing-up'})
        options = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'display_name': name,
            'display_description': description,
            'volume_id': volume_id,
            'status': 'creating',
            'container': container,
            'size': volume['size'],
            'host': volume_host,
        }
        try:
            backup = self.db.backup_create(context, options)
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.db.backup_destroy(context, backup['id'])
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup['host'], backup['id'],
                                         volume_id)

        return backup
예제 #48
0
 def promote_replica(self, ctxt, volume):
     new_host = utils.extract_host(volume['host'])
     cctxt = self.client.prepare(server=new_host, version='1.17')
     cctxt.cast(ctxt, 'promote_replica', volume_id=volume['id'])
예제 #49
0
def get_pool_name(volume):
    return vol_utils.extract_host(volume.host, 'pool')
예제 #50
0
    def create(self, context, name, description, volume_id,
               container, incremental=False, availability_zone=None,
               force=False):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)

        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".')
                   % volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use '
                    'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume, volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # do quota reserver before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1,
                            'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(msg, {'s_pid': context.project_id,
                                      's_size': volume['size'],
                                      'd_consumed': _consumed(over),
                                      'd_quota': quotas[over]})
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {'s_pid': context.project_id,
                                      'd_consumed': _consumed(over)})
                    raise exception.BackupLimitExceeded(
                        allowed=quotas[over])

        # Find the latest backup of the volume and use it as the parent
        # backup to do an incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(context.elevated(),
                                                           volume_id)
            if backups.objects:
                latest_backup = max(backups.objects,
                                    key=lambda x: x['created_at'])
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != "available":
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        self.db.volume_update(context, volume_id,
                              {'status': 'backing-up',
                               'previous_status': previous_status})
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': 'creating',
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': volume_host,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
예제 #51
0
    def create_backup(self, context, backup):
        """Create volume backups using configured backup service."""
        volume_id = backup.volume_id
        volume = self.db.volume_get(context, volume_id)
        previous_status = volume.get('previous_status', None)
        LOG.info(_LI('Create backup started, backup: %(backup_id)s '
                     'volume: %(volume_id)s.'),
                 {'backup_id': backup.id, 'volume_id': volume_id})

        self._notify_about_backup_usage(context, backup, "create.start")
        volume_host = volume_utils.extract_host(volume['host'], 'backend')
        backend = self._get_volume_backend(host=volume_host)

        backup.host = self.host
        backup.service = self.driver_name
        backup.save()

        expected_status = 'backing-up'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = _('Create backup aborted, expected volume status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                'expected_status': expected_status,
                'actual_status': actual_status,
            }
            self._update_backup_error(backup, context, err)
            raise exception.InvalidVolume(reason=err)

        expected_status = 'creating'
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Create backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                'expected_status': expected_status,
                'actual_status': actual_status,
            }
            self._update_backup_error(backup, context, err)
            backup.save()
            raise exception.InvalidBackup(reason=err)

        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught,
            # the volume status will be set back to available and
            # the backup status to 'error'
            utils.require_driver_initialized(self.driver)

            backup_service = self.service.get_backup_driver(context)
            self._get_driver(backend).backup_volume(context, backup,
                                                    backup_service)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': previous_status,
                                       'previous_status': 'error_backing-up'})
                self._update_backup_error(backup, context, six.text_type(err))

        # Restore the original status.
        self.db.volume_update(context, volume_id,
                              {'status': previous_status,
                               'previous_status': 'backing-up'})
        backup.status = 'available'
        backup.size = volume['size']
        backup.availability_zone = self.az
        backup.save()
        # Handle the num_dependent_backups of parent backup when child backup
        # has created successfully.
        if backup.parent_id:
            parent_backup = objects.Backup.get_by_id(context,
                                                     backup.parent_id)
            parent_backup.num_dependent_backups += 1
            parent_backup.save()
        LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
        self._notify_about_backup_usage(context, backup, "create.end")
예제 #52
0
    def _create_from_image(self, context, volume,
                           image_location, image_id, image_meta,
                           image_service, **kwargs):
        LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
                  " at location %(image_location)s.",
                  {'volume_id': volume.id,
                   'image_location': image_location, 'image_id': image_id})

        # NOTE(e0ne): check for free space in image_conversion_dir before
        # image downloading.
        if (CONF.image_conversion_dir and not
                os.path.exists(CONF.image_conversion_dir)):
            os.makedirs(CONF.image_conversion_dir)
        image_utils.check_available_space(CONF.image_conversion_dir,
                                          image_meta['size'], image_id)

        virtual_size = image_meta.get('virtual_size')
        if virtual_size:
            virtual_size = image_utils.check_virtual_size(virtual_size,
                                                          volume.size,
                                                          image_id)

        # Create the volume from an image.
        #
        # First see if the driver can clone the image directly.
        #
        # NOTE (singn): two params need to be returned
        # dict containing provider_location for cloned volume
        # and clone status.
        # NOTE (lixiaoy1): Currently all images are raw data, we can't
        # use clone_image to copy data if new volume is encrypted.
        volume_is_encrypted = volume.encryption_key_id is not None
        cloned = False
        model_update = None
        if not volume_is_encrypted:
            model_update, cloned = self.driver.clone_image(context,
                                                           volume,
                                                           image_location,
                                                           image_meta,
                                                           image_service)

        # Try and clone the image if we have it set as a glance location.
        if not cloned and 'cinder' in CONF.allowed_direct_url_schemes:
            model_update, cloned = self._clone_image_volume(context,
                                                            volume,
                                                            image_location,
                                                            image_meta)
        # Try and use the image cache.
        should_create_cache_entry = False
        if self.image_volume_cache and not cloned:
            internal_context = cinder_context.get_internal_tenant_context()
            if not internal_context:
                LOG.info(_LI('Unable to get Cinder internal context, will '
                             'not use image-volume cache.'))
            else:
                model_update, cloned = self._create_from_image_cache(
                    context,
                    internal_context,
                    volume,
                    image_id,
                    image_meta
                )
                # Don't cache encrypted volume.
                if not cloned and not volume_is_encrypted:
                    should_create_cache_entry = True

        # Fall back to default behavior of creating volume,
        # download the image data and copy it into the volume.
        original_size = volume.size
        backend_name = volume_utils.extract_host(volume.service_topic_queue)
        try:
            if not cloned:
                with image_utils.TemporaryImages.fetch(
                        image_service, context, image_id,
                        backend_name) as tmp_image:
                    # Try to create the volume as the minimal size, then we can
                    # extend once the image has been downloaded.
                    data = image_utils.qemu_img_info(tmp_image)

                    virtual_size = image_utils.check_virtual_size(
                        data.virtual_size, volume.size, image_id)

                    if should_create_cache_entry:
                        if virtual_size and virtual_size != original_size:
                            volume.size = virtual_size
                            volume.save()

                    model_update = self._create_from_image_download(
                        context,
                        volume,
                        image_location,
                        image_id,
                        image_service
                    )

            if should_create_cache_entry:
                # Update the newly created volume db entry before we clone it
                # for the image-volume creation.
                if model_update:
                    volume.update(model_update)
                    volume.save()
                self.manager._create_image_cache_volume_entry(internal_context,
                                                              volume,
                                                              image_id,
                                                              image_meta)
        finally:
            # If we created the volume as the minimal size, extend it back to
            # what was originally requested. If an exception has occurred we
            # still need to put this back before letting it be raised further
            # up the stack.
            if volume.size != original_size:
                self.driver.extend_volume(volume, original_size)
                volume.size = original_size
                volume.save()

        self._handle_bootable_volume_glance_meta(context, volume,
                                                 image_id=image_id,
                                                 image_meta=image_meta)
        return model_update
예제 #53
0
파일: test.py 프로젝트: homolkad/deb-cinder
    def _test_rpc_api(self, method, rpc_method, server=None, fanout=False,
                      version=None, expected_method=None,
                      expected_kwargs_diff=None, retval=None,
                      expected_retval=None, **kwargs):
        """Runs a test against RPC API method.

        :param method: Name of RPC API method.
        :param rpc_method: Expected RPC message type (cast or call).
        :param server: Expected hostname.
        :param fanout: True if expected call/cast should be fanout.
        :param version: Expected autocalculated RPC API version.
        :param expected_method: Expected RPC method name.
        :param expected_kwargs_diff: Map of expected changes between keyword
                                     arguments passed into the method and sent
                                     over RPC.
        :param retval: Value returned by RPC call/cast.
        :param expected_retval: Expected RPC API response (if different than
                                retval).
        :param kwargs: Parameters passed into the RPC API method.
        """

        rpcapi = self.rpcapi()
        expected_kwargs_diff = expected_kwargs_diff or {}
        version = version or self.base_version
        topic = None
        if server is not None:
            backend = utils.extract_host(server)
            server = utils.extract_host(server, 'host')
            topic = 'cinder-volume.%s' % backend

        if expected_method is None:
            expected_method = method

        if expected_retval is None:
            expected_retval = retval

        target = {
            "server": server,
            "fanout": fanout,
            "version": version,
            "topic": topic,
        }

        # Initially we expect that we'll pass same arguments to RPC API method
        # and RPC call/cast...
        expected_msg = copy.deepcopy(kwargs)
        # ... but here we're taking exceptions into account.
        expected_msg.update(expected_kwargs_diff)

        def _fake_prepare_method(*args, **kwds):
            # This is checking if target will be properly created.
            for kwd in kwds:
                self.assertEqual(target[kwd], kwds[kwd])
            return rpcapi.client

        def _fake_rpc_method(*args, **kwargs):
            # This checks if positional arguments passed to RPC method match.
            self.assertEqual((self.context, expected_method), args)

            # This checks if keyword arguments passed to RPC method match.
            for kwarg, value in kwargs.items():
                # Getting possible changes into account.
                if isinstance(value, objects_base.CinderObject):
                    # We need to compare objects differently.
                    self._assertEqualObjects(expected_msg[kwarg], value)
                else:
                    self.assertEqual(expected_msg[kwarg], value)

            # Returning fake value we're supposed to return.
            if retval:
                return retval

        # Enable mocks that will check everything and run RPC method.
        with mock.patch.object(rpcapi.client, "prepare",
                               side_effect=_fake_prepare_method):
            with mock.patch.object(rpcapi.client, rpc_method,
                                   side_effect=_fake_rpc_method):
                real_retval = getattr(rpcapi, method)(self.context, **kwargs)
                self.assertEqual(expected_retval, real_retval)
예제 #54
0
    def restore_backup(self, context, backup, volume_id):
        """Restore volume backups from configured backup service."""
        LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
                     'volume: %(volume_id)s.'),
                 {'backup_id': backup.id, 'volume_id': volume_id})

        volume = self.db.volume_get(context, volume_id)
        volume_host = volume_utils.extract_host(volume['host'], 'backend')
        backend = self._get_volume_backend(host=volume_host)
        self._notify_about_backup_usage(context, backup, "restore.start")

        backup.host = self.host
        backup.save()

        expected_status = 'restoring-backup'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted, expected volume status '
                     '%(expected_status)s but got %(actual_status)s.') %
                   {'expected_status': expected_status,
                    'actual_status': actual_status})
            backup.status = 'available'
            backup.save()
            raise exception.InvalidVolume(reason=err)

        expected_status = 'restoring'
        actual_status = backup['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted: expected backup status '
                     '%(expected_status)s but got %(actual_status)s.') %
                   {'expected_status': expected_status,
                    'actual_status': actual_status})
            self._update_backup_error(backup, context, err)
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        if volume['size'] > backup['size']:
            LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
                         'larger than backup: %(backup_id)s, '
                         'size: %(backup_size)d, continuing with restore.'),
                     {'vol_id': volume['id'],
                      'vol_size': volume['size'],
                      'backup_id': backup['id'],
                      'backup_size': backup['size']})

        backup_service = self._map_service_to_driver(backup['service'])
        configured_service = self.driver_name
        if backup_service != configured_service:
            err = _('Restore backup aborted, the backup service currently'
                    ' configured [%(configured_service)s] is not the'
                    ' backup service that was used to create this'
                    ' backup [%(backup_service)s].') % {
                'configured_service': configured_service,
                'backup_service': backup_service,
            }
            backup.status = 'available'
            backup.save()
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught,
            # the volume status will be set back to available and
            # the backup status to 'error'
            utils.require_driver_initialized(self.driver)

            backup_service = self.service.get_backup_driver(context)
            self._get_driver(backend).restore_backup(context, backup,
                                                     volume,
                                                     backup_service)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_restoring'})
                backup.status = 'available'
                backup.save()

        self.db.volume_update(context, volume_id, {'status': 'available'})
        backup.status = 'available'
        backup.save()
        LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
                     ' to volume %(volume_id)s.'),
                 {'backup_id': backup.id, 'volume_id': volume_id})
        self._notify_about_backup_usage(context, backup, "restore.end")
예제 #55
0
 def delete_snapshot(self, ctxt, snapshot, host):
     new_host = utils.extract_host(host)
     cctxt = self.client.prepare(server=new_host)
     cctxt.cast(ctxt, 'delete_snapshot', snapshot_id=snapshot['id'])
    def create_backup(self, context, backup_id):
        """Create volume backups using configured backup service."""
        backup = self.db.backup_get(context, backup_id)
        volume_id = backup['volume_id']

        # code begin by luobin
        # Because volume could be available or in-use
        initial_vol_status = self.db.volume_get(context, volume_id)['status']
        self.db.volume_update(context, volume_id, {'status': 'backing-up'})
        # code end by luobin

        volume = self.db.volume_get(context, volume_id)

        LOG.info(
            _('Create backup started, backup: %(backup_id)s '
              'volume: %(volume_id)s.') % {
                  'backup_id': backup_id,
                  'volume_id': volume_id
              })
        volume_host = volume_utils.extract_host(volume['host'], 'backend')
        backend = self._get_volume_backend(host=volume_host)

        self.db.backup_update(context, backup_id, {
            'host': self.host,
            'service': self.driver_name
        })

        expected_status = 'backing-up'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = _('Create backup aborted, expected volume status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self.db.backup_update(context, backup_id, {
                'status': 'error',
                'fail_reason': err
            })
            raise exception.InvalidVolume(reason=err)

        expected_status = 'creating'
        actual_status = backup['status']
        if actual_status != expected_status:
            err = _('Create backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self.db.volume_update(context, volume_id,
                                  {'status': initial_vol_status})
            self.db.backup_update(context, backup_id, {
                'status': 'error',
                'fail_reason': err
            })
            raise exception.InvalidBackup(reason=err)

        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught,
            # the volume status will be set back to available and
            # the backup status to 'error'
            utils.require_driver_initialized(self.driver)

            backup_service = self.service.get_backup_driver(context)
            self._get_driver(backend).backup_volume(context, backup,
                                                    backup_service)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': initial_vol_status})
                self.db.backup_update(context, backup_id, {
                    'status': 'error',
                    'fail_reason': unicode(err)
                })

        self.db.volume_update(context, volume_id,
                              {'status': initial_vol_status})
        self.db.backup_update(
            context, backup_id, {
                'status': 'available',
                'size': volume['size'],
                'availability_zone': self.az
            })
        LOG.info(_('Create backup finished. backup: %s.'), backup_id)
예제 #57
0
    def _test_volume_api(self, method, rpc_method, **kwargs):
        ctxt = context.RequestContext('fake_user', 'fake_project')

        if 'rpcapi_class' in kwargs:
            rpcapi_class = kwargs.pop('rpcapi_class')
        else:
            rpcapi_class = volume_rpcapi.VolumeAPI
        rpcapi = rpcapi_class()
        expected_retval = {} if rpc_method == 'call' else None

        target = {"version": kwargs.pop('version', rpcapi.RPC_API_VERSION)}

        if 'request_spec' in kwargs:
            spec = jsonutils.to_primitive(kwargs['request_spec'])
            kwargs['request_spec'] = spec

        expected_msg = self._get_expected_msg(kwargs)

        if 'host' in kwargs:
            host = kwargs['host']
        elif 'backend_id' in kwargs:
            host = kwargs['backend_id']
        elif 'group' in kwargs:
            host = kwargs['group'].service_topic_queue
        elif 'volume' in kwargs:
            vol = kwargs['volume']
            host = vol.service_topic_queue
        elif 'snapshot' in kwargs:
            host = 'fake_host'
        elif 'cgsnapshot' in kwargs:
            host = kwargs['cgsnapshot'].consistencygroup.service_topic_queue

        target['server'] = utils.extract_host(host, 'host')
        target['topic'] = '%s.%s' % (constants.VOLUME_TOPIC,
                                     utils.extract_host(host))

        self.fake_args = None
        self.fake_kwargs = None

        def _fake_prepare_method(*args, **kwds):
            for kwd in kwds:
                self.assertEqual(kwds[kwd], target[kwd])
            return rpcapi.client

        def _fake_rpc_method(*args, **kwargs):
            self.fake_args = args
            kwargs.pop('want_objects', None)
            self.fake_kwargs = kwargs
            if expected_retval is not None:
                return expected_retval

        self.mock_object(rpcapi.client, "prepare", _fake_prepare_method)
        self.mock_object(rpcapi.client, rpc_method, _fake_rpc_method)

        retval = getattr(rpcapi, method)(ctxt, **kwargs)

        self.assertEqual(expected_retval, retval)
        expected_args = [ctxt, method]

        for arg, expected_arg in zip(self.fake_args, expected_args):
            self.assertEqual(expected_arg, arg)

        for kwarg, value in self.fake_kwargs.items():
            if isinstance(value, ovo_base.CinderObject):
                expected = expected_msg[kwarg].obj_to_primitive()
                primitive = value.obj_to_primitive()
                self.assertEqual(expected, primitive)

            else:
                self.assertEqual(expected_msg[kwarg], value)
예제 #58
0
 def detach_volume(self, ctxt, volume):
     new_host = utils.extract_host(volume['host'])
     cctxt = self.client.prepare(server=new_host)
     return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'])
예제 #59
0
 def manage_existing(self, ctxt, volume, ref):
     new_host = utils.extract_host(volume['host'])
     cctxt = self.client.prepare(server=new_host, version='1.15')
     cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref)
예제 #60
0
    def update_pools(self, capability, service):
        """Update storage pools information from backend reported info."""
        if not capability:
            return

        pools = capability.get('pools', None)
        active_pools = set()
        if pools and isinstance(pools, list):
            # Update all pools stats according to information from list
            # of pools in volume capacity
            for pool_cap in pools:
                pool_name = pool_cap['pool_name']
                self._append_backend_info(pool_cap)
                cur_pool = self.pools.get(pool_name, None)
                if not cur_pool:
                    # Add new pool
                    cur_pool = self.pool_state_cls(self.host,
                                                   self.cluster_name, pool_cap,
                                                   pool_name)
                    self.pools[pool_name] = cur_pool
                cur_pool.update_from_volume_capability(pool_cap, service)

                active_pools.add(pool_name)
        elif pools is None:
            # To handle legacy driver that doesn't report pool
            # information in the capability, we have to prepare
            # a pool from backend level info, or to update the one
            # we created in self.pools.
            pool_name = self.volume_backend_name
            if pool_name is None:
                # To get DEFAULT_POOL_NAME
                pool_name = vol_utils.extract_host(self.host, 'pool', True)

            if len(self.pools) == 0:
                # No pool was there
                single_pool = self.pool_state_cls(self.host, self.cluster_name,
                                                  capability, pool_name)
                self._append_backend_info(capability)
                self.pools[pool_name] = single_pool
            else:
                # this is an update from legacy driver
                try:
                    single_pool = self.pools[pool_name]
                except KeyError:
                    single_pool = self.pool_state_cls(self.host,
                                                      self.cluster_name,
                                                      capability, pool_name)
                    self._append_backend_info(capability)
                    self.pools[pool_name] = single_pool

            single_pool.update_from_volume_capability(capability, service)
            active_pools.add(pool_name)

        # remove non-active pools from self.pools
        nonactive_pools = set(self.pools.keys()) - active_pools
        for pool in nonactive_pools:
            LOG.debug(
                "Removing non-active pool %(pool)s @ %(host)s "
                "from scheduler cache.", {
                    'pool': pool,
                    'host': self.host
                })
            del self.pools[pool]