def setUp(self):
        super(NovaApiTestCase, self).setUp()

        self.api = nova.API()
        self.novaclient = FakeNovaClient()
        self.ctx = context.get_admin_context()
        self.mox.StubOutWithMock(nova, 'novaclient')
    def _nova_has_extended_server_attributes(self, context):
        """Check Extended Server Attributes presence

        Find out whether the Extended Server Attributes extension is activated
        in Nova or not. Cache the result to query Nova only once.
        """

        if not hasattr(self, '_nova_ext_srv_attr'):
            self._nova_ext_srv_attr = nova.API().has_extension(
                context, 'ExtendedServerAttributes', timeout=REQUESTS_TIMEOUT)

        return self._nova_ext_srv_attr
    def host_passes(self, host_state, filter_properties):
        context = filter_properties['context']
        host = volume_utils.extract_host(host_state.host, 'host')

        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)

        # Without 'local_to_instance' hint
        if not instance_uuid:
            return True

        if not uuidutils.is_uuid_like(instance_uuid):
            raise exception.InvalidUUID(uuid=instance_uuid)

        # TODO(adrienverge): Currently it is not recommended to allow instance
        # migrations for hypervisors where this hint will be used. In case of
        # instance migration, a previously locally-created volume will not be
        # automatically migrated. Also in case of instance migration during the
        # volume's scheduling, the result is unpredictable. A future
        # enhancement would be to subscribe to Nova migration events (e.g. via
        # Ceilometer).

        # First, lookup for already-known information in local cache
        if instance_uuid in self._cache:
            return self._cache[instance_uuid] == host

        if not self._nova_has_extended_server_attributes(context):
            LOG.warning(
                _LW('Hint "%s" dropped because '
                    'ExtendedServerAttributes not active in Nova.'),
                HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        server = nova.API().get_server(context,
                                       instance_uuid,
                                       privileged_user=True,
                                       timeout=REQUESTS_TIMEOUT)

        if not hasattr(server, INSTANCE_HOST_PROP):
            LOG.warning(
                _LW('Hint "%s" dropped because Nova did not return '
                    'enough information. Either Nova policy needs to '
                    'be changed or a privileged account for Nova '
                    'should be specified in conf.'), HINT_KEYWORD)
            raise exception.CinderException(
                _('Hint "%s" not supported.') % HINT_KEYWORD)

        self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)

        # Match if given instance is hosted on host
        return self._cache[instance_uuid] == host
Esempio n. 4
0
    def __init__(self, scheme="https", *args, **kwargs):
        super(VMwareVcloudVolumeDriver, self).__init__(*args, **kwargs)
        self._stats = None
        self._nova_api = nova.API()
        self._node_name = CONF.vcloud.vcloud_node_name
        self._session = VCloudAPISession(scheme=scheme)

        self._vgw_host = CONF.vgw.vcloud_vgw_host
        self._vgw_name = CONF.vgw.vcloud_vgw_name
        self._vgw_username = CONF.vgw.vcloud_vgw_username
        self._vgw_password = CONF.vgw.vcloud_vgw_password
        #self._vgw_url = CONF.vgw.vcloud_vgw_url
        self._vgw_store_file_dir = CONF.vgw.store_file_dir
Esempio n. 5
0
    def __init__(self, *args, **kwargs):
        self._stats = None
        self._nova_api = nova.API()

        self._node_name = CONF.vcloud.vcloud_node_name
        self._session = VCloudAPISession(
            host_ip=CONF.vcloud.vcloud_host_ip,
            host_port=CONF.vcloud.vcloud_host_port,
            server_username=CONF.vcloud.vcloud_host_username,
            server_password=CONF.vcloud.vcloud_host_password,
            org=CONF.vcloud.vcloud_org,
            vdc=CONF.vcloud.vcloud_vdc,
            version=CONF.vcloud.vcloud_version,
            service=CONF.vcloud.vcloud_service,
            verify=CONF.vcloud.vcloud_verify,
            service_type=CONF.vcloud.vcloud_service_type,
            retry_count=CONF.vcloud.vcloud_api_retry_count,
            create_session=True,
            scheme='https')

        super(VMwareVcloudVolumeDriver, self).__init__(*args, **kwargs)
Esempio n. 6
0
    def setUp(self):
        super(NovaApiTestCase, self).setUp()

        self.api = nova.API()
        self.novaclient = FakeNovaClient()
        self.ctx = context.get_admin_context()
Esempio n. 7
0
    def create_instance_backup(self,
                               context,
                               instance_uuid,
                               name,
                               description,
                               volume_ids,
                               container,
                               incremental=False,
                               availability_zone=None,
                               force=True):
        """Make the RPC call to create backup for volume-based instance."""
        # Use the same policy as backup creatation
        check_policy(context, 'create')

        server = nova.API().get_server(context, instance_uuid)
        if server.status not in [
                "ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED", "SHELVED_OFFLOADED"
        ]:
            msg = (_("Instance %(instance_uuid)s in %(status)s status "
                     "which is not allowed to be backed up.") % {
                         'instance_uuid': instance_uuid,
                         'status': server.status
                     })
            raise exception.InvalidInstanceStatus(reason=msg)

        volumes = [
            self.volume_api.get(context, volume_id) for volume_id in volume_ids
        ]

        for volume in volumes:
            # Verify all volumes are in 'in-use' state
            if volume['status'] != "in-use":
                msg = (_('Volume to be backed up must be in-use '
                         'but the current status is "%s".') % volume['status'])
                raise exception.InvalidVolume(reason=msg)

            # Verify backup service is enabled on host
            volume_host = volume_utils.extract_host(volume['host'], 'host')
            if not self._is_backup_service_enabled(volume, volume_host):
                raise exception.ServiceNotFound(service_id='cinder-backup')

        backups = []
        inst_backup_kwargs = []

        # Add a 32-bit UUID prefix to display_description, in order to
        # distinguish which backups are created at the same time
        desc_prefix = str(uuid.uuid4()).replace('-', '')

        for volume in volumes:
            # Reserve a quota before setting volume status and backup status
            try:
                reserve_opts = {
                    'backups': 1,
                    'backup_gigabytes': volume['size']
                }
                LOG.info(
                    _LI("create_instance_backup "
                        "reserve_opts: %(reserve_opts)s"),
                    {'reserve_opts': reserve_opts})
                reservations = QUOTAS.reserve(context, **reserve_opts)
            except exception.OverQuota as e:
                overs = e.kwargs['overs']
                usages = e.kwargs['usages']
                quotas = e.kwargs['quotas']

                # reset status for the other volumes and
                # remove the related backup
                for backup in backups:
                    self.db.volume_update(context, backup['volume_id'],
                                          {'status': 'in-use'})
                    self.db.backup_update(context, backup['id'],
                                          {'status': 'error'})
                    self.delete(context, backup['id'])

                def _consumed(resource_name):
                    return (usages[resource_name]['reserved'] +
                            usages[resource_name]['in_use'])

                for over in overs:
                    if 'gigabytes' in over:
                        msg = _LW("Quota exceeded for %(s_pid)s, tried to "
                                  "create "
                                  "%(s_size)sG backup "
                                  "(%(d_consumed)dG of "
                                  "%(d_quota)dG already consumed)")
                        LOG.warning(
                            msg, {
                                's_pid': context.project_id,
                                's_size': volume['size'],
                                'd_consumed': _consumed(over),
                                'd_quota': quotas[over]
                            })
                        raise exception.VolumeBackupSizeExceedsAvailableQuota(
                            requested=volume['size'],
                            consumed=_consumed('backup_gigabytes'),
                            quota=quotas['backup_gigabytes'])
                    elif 'backups' in over:
                        msg = _LW("Quota exceeded for %(s_pid)s, tried to "
                                  "create backups (%(d_consumed)d backups "
                                  "already consumed)")
                        LOG.warning(
                            msg, {
                                's_pid': context.project_id,
                                'd_consumed': _consumed(over)
                            })
                        raise exception.BackupLimitExceeded(
                            allowed=quotas[over])

            # Since Ceph doesn't use parent_id to determine an incremental
            # backup, comment this part.
            #
            # Find the latest backup of the volume and use it as the parent
            # backup to do an incremental backup.
            # latest_backup = None
            # if incremental:
            #     backups = \
            #              objects.BackupList.get_all_by_volume(context.elevated(),
            #                                                   volume['id'])
            #     if backups.objects:
            #         latest_backup = max(backups.objects,
            #                             key=lambda x: x['created_at'])
            #     else:
            #         msg = _('No backups available \
            #                  to do an incremental backup.')
            #         raise exception.InvalidBackup(reason=msg)
            latest_backup = None
            # Added for periodic backup
            if getattr(context, 'periodic', False):
                latest_backup = None
                description = PERIODICSTR + description if description \
                    else PERIODICSTR
            else:
                if incremental:
                    all_backups = self.db.\
                        backup_get_all_by_volume(context.elevated(),
                                                 volume['id'])
                    if all_backups:
                        normal_backups = []
                        for bk in all_backups:
                            if not bk.display_description or \
                                    PERIODICSTR not in bk.display_description:
                                normal_backups.append(bk)
                        if normal_backups:
                            latest_backup = max(normal_backups,
                                                key=lambda x: x['created_at'])

            parent_id = None
            if latest_backup:
                if latest_backup['status'] == "available":
                    parent_id = latest_backup['id']
                    LOG.info(
                        _LI("Found parent backup %(bak)s for volume "
                            "%(volume)s. Do an incremental backup."), {
                                'bak': latest_backup['id'],
                                'volume': volume['id']
                            })
                elif latest_backup['status'] == "creating":
                    msg = _('The parent backup is creating.')
                    LOG.info(_LI("The parent backup %(bak)s is creating."),
                             {'bak': latest_backup['id']})
                    raise exception.InvalidBackup(reason=msg)
                else:
                    LOG.info(
                        _LI("No backups available to do an incremental "
                            "backup, do a full backup for "
                            "volume %(volume)s."), {'volume': volume['id']})
            else:
                LOG.info(
                    _LI("No backups available to do an incremental "
                        "backup, do a full backup for volume %(volume)s."),
                    {'volume': volume['id']})

            options = {
                'user_id':
                context.user_id,
                'project_id':
                context.project_id,
                'display_name':
                name,
                'display_description': (lambda x: desc_prefix + x
                                        if x else desc_prefix)(description),
                'volume_id':
                volume['id'],
                'status':
                'creating',
                'container':
                container,
                'parent_id':
                parent_id,
                # Set backup size to "0" which means
                # it's not available. Backup driver
                # will return the exact size when
                # backing up is done. We lined up with OP
                # that when backup is in "creating" status,
                # OP will show "--" in the "size" field
                # instead of "0".
                # 'size': volume['size'],
                'size':
                0,
                'host':
                volume_host,
            }

            # (maqi) Use volume display_description field to save volume
            # previous_status since volumes in Kilo don't have
            # previous_status field in database
            previous_status = volume['status']
            self.db.volume_update(context, volume['id'], {
                'status': 'backing-up',
                'display_description': previous_status
            })

            try:
                backup = self.db.backup_create(context, options)
                QUOTAS.commit(context, reservations)
            except Exception:
                with excutils.save_and_reraise_exception():
                    try:
                        self.db.backup_destroy(context, backup['id'])
                    finally:
                        QUOTAS.rollback(context, reservations)
            backups.append(backup)
            kwargs = {
                'host': backup['host'],
                'backup_id': backup['id'],
                'volume_id': volume['id'],
            }
            inst_backup_kwargs.append(kwargs)

        self.backup_rpcapi.create_instance_backup(context, instance_uuid,
                                                  inst_backup_kwargs)
        LOG.debug(
            "I am ready to return from create_instance_backup"
            "with result: %(backups)s", {'backups': backups})
        return backups
Esempio n. 8
0
    def restore(self, context, backup_id, volume_id=None):
        """Make the RPC call to restore a volume backup."""
        check_policy(context, 'restore')
        backup = self.get(context, backup_id)
        if backup['status'] != 'available':
            msg = _('Backup status must be available')
            raise exception.InvalidBackup(reason=msg)

        size = backup['size']
        if size is None:
            msg = _('Backup to be restored has invalid size')
            raise exception.InvalidBackup(reason=msg)

        # Create a volume if none specified. If a volume is specified check
        # it is large enough for the backup
        if volume_id is None:
            name = 'restore_backup_%s' % backup_id
            description = 'auto-created_from_restore_from_backup'

            LOG.info(_LI("Creating volume of %(size)s GB for restore of "
                         "backup %(backup_id)s"), {
                             'size': size,
                             'backup_id': backup_id
                         },
                     context=context)
            volume = self.volume_api.create(context, size, name, description)
            volume_id = volume['id']

            while True:
                volume = self.volume_api.get(context, volume_id)
                if volume['status'] != 'creating':
                    break
                greenthread.sleep(1)
        else:
            volume = self.volume_api.get(context, volume_id)

        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"]:
            for attachment in volume['volume_attachment']:
                instance_uuid = attachment['instance_uuid']
                instance = nova.API().get_server(context, instance_uuid)
                if instance.status not in ['SHUTOFF']:
                    msg = (_('Volume to be backed up can be in-use, but the '
                             'attached vm should in poweroff status, now vm '
                             'status is "%s".') % instance.status)
                    raise exception.InvalidVolume(reason=msg)

        # record volume status in the display_description
        self.db.volume_update(context, volume_id,
                              {'display_description': volume['status']})

        LOG.debug('Checking backup size %(bs)s against volume size %(vs)s', {
            'bs': size,
            'vs': volume['size']
        })
        # backup size is in GB
        if size > volume['size'] * 1024:
            msg = (_('volume size %(volume_size)d GB is too small to restore '
                     'backup of size %(size)d MB.') % {
                         'volume_size': volume['size'],
                         'size': size
                     })
            raise exception.InvalidVolume(reason=msg)

        LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
                     "backup %(backup_id)s"), {
                         'volume_id': volume_id,
                         'backup_id': backup_id
                     },
                 context=context)

        # Setting the status here rather than setting at start and unrolling
        # for each error condition, it should be a very small window
        self.db.backup_update(context, backup_id, {'status': 'restoring'})
        self.db.volume_update(context, volume_id,
                              {'status': 'restoring-backup'})

        volume_host = volume_utils.extract_host(volume['host'], 'host')
        self.backup_rpcapi.restore_backup(context, volume_host, backup['id'],
                                          volume_id)

        d = {
            'backup_id': backup_id,
            'volume_id': volume_id,
        }

        return d