示例#1
0
    def test_obj_field_parent(self, mock_lzy_ld):
        backup = objects.Backup(context=self.context, parent_id=None)
        self.assertIsNone(backup.parent)

        # Bug #1862635: should trigger a lazy load
        backup = objects.Backup(context=self.context, parent_id=fake.UUID5)
        _ = backup.parent
        mock_lzy_ld.assert_called_once()
示例#2
0
    def test_obj_field_parent(self, mock_lzy_ld):
        backup = objects.Backup(context=self.context, parent_id=None)
        self.assertIsNone(backup.parent)

        # Bug #1862635: should trigger a lazy load
        backup = objects.Backup(context=self.context, parent_id=fake.UUID5)
        # need noqa here because of pyflakes issue #202
        _ = backup.parent  # noqa
        mock_lzy_ld.assert_called_once()
示例#3
0
    def _create_backup_db_entry(self,
                                volume_id=1,
                                display_name='test_backup',
                                display_description='this is a test backup',
                                container='volumebackups',
                                status='creating',
                                size=1,
                                object_count=0,
                                project_id='fake',
                                service=None):
        """Create a backup entry in the DB.

        Return the entry ID
        """
        kwargs = {}
        kwargs['volume_id'] = volume_id
        kwargs['user_id'] = 'fake'
        kwargs['project_id'] = project_id
        kwargs['host'] = 'testhost'
        kwargs['availability_zone'] = '1'
        kwargs['display_name'] = display_name
        kwargs['display_description'] = display_description
        kwargs['container'] = container
        kwargs['status'] = status
        kwargs['fail_reason'] = ''
        kwargs['service'] = service or CONF.backup_driver
        kwargs['snapshot'] = False
        kwargs['parent_id'] = None
        kwargs['size'] = size
        kwargs['object_count'] = object_count
        backup = objects.Backup(context=self.ctxt, **kwargs)
        backup.create()
        return backup
示例#4
0
    def _create_backup_db_entry(self,
                                volume_id=_DEFAULT_VOLUME_ID,
                                container=google_dr.CONF.backup_gcs_bucket,
                                parent_id=None,
                                status=None,
                                service_metadata=None):

        try:
            db.volume_get(self.ctxt, volume_id)
        except exception.NotFound:
            self._create_volume_db_entry(volume_id=volume_id)

        kwargs = {
            'size': 1,
            'container': container,
            'volume_id': volume_id,
            'parent_id': parent_id,
            'user_id': fake.USER_ID,
            'project_id': fake.PROJECT_ID,
            'status': status,
            'service_metadata': service_metadata,
        }
        backup = objects.Backup(context=self.ctxt, **kwargs)
        backup.create()
        return backup
示例#5
0
 def test_save(self, backup_update):
     backup = objects.Backup._from_db_object(self.context, objects.Backup(),
                                             fake_backup)
     backup.display_name = 'foobar'
     backup.save()
     backup_update.assert_called_once_with(self.context, backup.id,
                                           {'display_name': 'foobar'})
示例#6
0
    def import_record(self, context, backup_service, backup_url):
        """Make the RPC call to import a volume backup.

        :param context: running context
        :param backup_service: backup service name
        :param backup_url: backup description to be used by the backup driver
        :raises: InvalidBackup
        :raises: ServiceNotFound
        """
        check_policy(context, 'backup-import')

        # NOTE(ronenkat): since we don't have a backup-scheduler
        # we need to find a host that support the backup service
        # that was used to create the backup.
        # We  send it to the first backup service host, and the backup manager
        # on that host will forward it to other hosts on the hosts list if it
        # cannot support correct service itself.
        hosts = self._list_backup_services()
        if len(hosts) == 0:
            raise exception.ServiceNotFound(service_id=backup_service)

        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'volume_id': '0000-0000-0000-0000',
            'status': 'creating',
        }
        backup = objects.Backup(context=context, **kwargs)
        backup.create()
        first_host = hosts.pop()
        self.backup_rpcapi.import_record(context, first_host, backup,
                                         backup_service, backup_url, hosts)

        return backup
示例#7
0
    def test_import_record(self):
        backup = objects.Backup(context=self.context, id=1)
        export_string = backup.encode_record()
        imported_backup = objects.Backup.decode_record(export_string)

        # Make sure we don't lose data when converting from string
        self.assertDictEqual(dict(backup), imported_backup)
示例#8
0
    def test_import_record_additional_info_cant_overwrite(self):
        backup = objects.Backup(context=self.context, id=1)
        export_string = backup.encode_record(id='fake_id')
        imported_backup = objects.Backup.decode_record(export_string)

        # Make sure the extra_info can't overwrite basic data
        self.assertDictEqual(dict(backup), imported_backup)
示例#9
0
def create_backup(ctxt,
                  volume_id=fake.VOLUME_ID,
                  display_name='test_backup',
                  display_description='This is a test backup',
                  status=fields.BackupStatus.CREATING,
                  parent_id=None,
                  temp_volume_id=None,
                  temp_snapshot_id=None,
                  snapshot_id=None,
                  data_timestamp=None,
                  **kwargs):
    """Create a backup object."""
    values = {
        'user_id': ctxt.user_id or fake.USER_ID,
        'project_id': ctxt.project_id or fake.PROJECT_ID,
        'volume_id': volume_id,
        'status': status,
        'display_name': display_name,
        'display_description': display_description,
        'container': 'fake',
        'availability_zone': 'fake',
        'service': 'fake',
        'size': 5 * 1024 * 1024,
        'object_count': 22,
        'host': socket.gethostname(),
        'parent_id': parent_id,
        'temp_volume_id': temp_volume_id,
        'temp_snapshot_id': temp_snapshot_id,
        'snapshot_id': snapshot_id,
        'data_timestamp': data_timestamp, }

    values.update(kwargs)
    backup = objects.Backup(ctxt, **values)
    backup.create()
    return backup
示例#10
0
    def test_import_record(self):
        utils.replace_obj_loader(self, objects.Backup)
        backup = objects.Backup(context=self.context, id=1, parent_id=None,
                                num_dependent_backups=0)
        export_string = backup.encode_record()
        imported_backup = objects.Backup.decode_record(export_string)

        # Make sure we don't lose data when converting from string
        self.assertDictEqual(self._expected_backup(backup), imported_backup)
示例#11
0
    def test_import_record_additional_info_cant_overwrite(self):
        utils.replace_obj_loader(self, objects.Backup)
        backup = objects.Backup(context=self.context, id=1, parent_id=None,
                                num_dependent_backups=0)
        export_string = backup.encode_record(id='fake_id')
        imported_backup = objects.Backup.decode_record(export_string)

        # Make sure the extra_info can't overwrite basic data
        self.assertDictEqual(self._expected_backup(backup), imported_backup)
示例#12
0
 def test_generate_object_name_prefix(self, utcnow_mock):
     timestamp = '20170518102205'
     utcnow_mock.return_value.strftime.return_value = timestamp
     backup = objects.Backup(self.ctxt,
                             volume_id=fake.VOLUME_ID,
                             id=fake.BACKUP_ID)
     res = self.driver._generate_object_name_prefix(backup)
     expected = 'volume_%s_%s_backup_%s' % (backup.volume_id, timestamp,
                                            backup.id)
     self.assertEqual(expected, res)
示例#13
0
    def test_save_with_metadata(self, backup_update, metadata_update):
        backup = objects.Backup._from_db_object(self.context, objects.Backup(),
                                                fake_backup)

        backup.metadata = {'key1': 'value1'}
        self.assertEqual({'metadata': {
            'key1': 'value1'
        }}, backup.obj_get_changes())
        backup.save()
        metadata_update.assert_called_once_with(self.context, backup.id,
                                                {'key1': 'value1'}, True)
示例#14
0
 def test_destroy(self, backup_destroy, utcnow_mock):
     backup_destroy.return_value = {
         'status': fields.BackupStatus.DELETED,
         'deleted': True,
         'deleted_at': utcnow_mock.return_value}
     backup = objects.Backup(context=self.context, id=fake.BACKUP_ID)
     backup.destroy()
     self.assertTrue(backup_destroy.called)
     admin_context = backup_destroy.call_args[0][0]
     self.assertTrue(admin_context.is_admin)
     self.assertTrue(backup.deleted)
     self.assertEqual(fields.BackupStatus.DELETED, backup.status)
     self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC),
                      backup.deleted_at)
示例#15
0
    def test_import_record_additional_info(self):
        backup = objects.Backup(context=self.context, id=1)
        extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}}
        extra_info_copy = extra_info.copy()
        export_string = backup.encode_record(extra_info=extra_info)
        imported_backup = objects.Backup.decode_record(export_string)

        # Dictionary passed should not be modified
        self.assertDictEqual(extra_info_copy, extra_info)

        # Make sure we don't lose data when converting from string and that
        # extra info is still there
        expected = dict(backup)
        expected['extra_info'] = extra_info
        self.assertDictEqual(expected, imported_backup)
示例#16
0
    def _create_export_record_db_entry(self,
                                       volume_id='0000',
                                       status='creating',
                                       project_id='fake'):
        """Create a backup entry in the DB.

        Return the entry ID
        """
        kwargs = {}
        kwargs['volume_id'] = volume_id
        kwargs['user_id'] = 'fake'
        kwargs['project_id'] = project_id
        kwargs['status'] = status
        backup = objects.Backup(context=self.ctxt, **kwargs)
        backup.create()
        return backup
示例#17
0
    def test_import_record_w_parent(self, backup_get):
        full_backup = objects.Backup.get_by_id(self.context, fake.USER_ID)
        self._compare(self, fake_backup, full_backup)

        utils.replace_obj_loader(self, objects.Backup)
        incr_backup = objects.Backup(context=self.context,
                                     id=fake.BACKUP2_ID,
                                     parent=full_backup,
                                     parent_id=full_backup['id'],
                                     num_dependent_backups=0)
        export_string = incr_backup.encode_record()
        imported_backup = objects.Backup.decode_record(export_string)

        # Make sure we don't lose data when converting from string
        self.assertDictEqual(self._expected_backup(incr_backup),
                             imported_backup)
示例#18
0
    def _create_backup_db_entry(self,
                                volume_id=str(uuid.uuid4()),
                                restore_volume_id=None,
                                display_name='test_backup',
                                display_description='this is a test backup',
                                container='volumebackups',
                                status=fields.BackupStatus.CREATING,
                                size=1,
                                object_count=0,
                                project_id=str(uuid.uuid4()),
                                service=None,
                                temp_volume_id=None,
                                temp_snapshot_id=None,
                                snapshot_id=None,
                                metadata=None,
                                parent_id=None,
                                encryption_key_id=None):
        """Create a backup entry in the DB.

        Return the entry ID
        """
        kwargs = {}
        kwargs['volume_id'] = volume_id
        kwargs['restore_volume_id'] = restore_volume_id
        kwargs['user_id'] = str(uuid.uuid4())
        kwargs['project_id'] = project_id
        kwargs['host'] = 'testhost'
        kwargs['availability_zone'] = '1'
        kwargs['display_name'] = display_name
        kwargs['display_description'] = display_description
        kwargs['container'] = container
        kwargs['status'] = status
        kwargs['fail_reason'] = ''
        kwargs['service'] = service or CONF.backup_driver
        kwargs['snapshot_id'] = snapshot_id
        kwargs['parent_id'] = parent_id
        kwargs['size'] = size
        kwargs['object_count'] = object_count
        kwargs['temp_volume_id'] = temp_volume_id
        kwargs['temp_snapshot_id'] = temp_snapshot_id
        kwargs['metadata'] = metadata or {}
        kwargs['encryption_key_id'] = encryption_key_id
        kwargs['service_metadata'] = 'test_metadata'
        backup = objects.Backup(context=self.ctxt, **kwargs)
        backup.create()
        return backup
示例#19
0
    def test_import_record_additional_info(self):
        utils.replace_obj_loader(self, objects.Backup)
        backup = objects.Backup(context=self.context, id=fake.BACKUP_ID,
                                parent_id=None,
                                num_dependent_backups=0)
        extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}}
        extra_info_copy = extra_info.copy()
        export_string = backup.encode_record(extra_info=extra_info)
        imported_backup = objects.Backup.decode_record(export_string)

        # Dictionary passed should not be modified
        self.assertDictEqual(extra_info_copy, extra_info)

        # Make sure we don't lose data when converting from string and that
        # extra info is still there
        expected = self._expected_backup(backup)
        expected['extra_info'] = extra_info
        self.assertDictEqual(expected, imported_backup)
示例#20
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None,
               metadata=None):
        """Make the RPC call to create a volume backup."""
        volume = self.volume_api.get(context, volume_id)
        context.authorize(policy.CREATE_POLICY, target_obj=volume)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
            if snapshot['status'] not in ["available"]:
                msg = (_('Snapshot to be backed up must be available, '
                         'but the current status is "%s".') %
                       snapshot['status'])
                raise exception.InvalidSnapshot(reason=msg)
        elif volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume.host, 'host')
        availability_zone = availability_zone or volume.availability_zone
        host = self._get_available_backup_service_host(volume_host,
                                                       availability_zone)

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            quota_utils.process_reserve_over_quota(context,
                                                   e,
                                                   resource='backups',
                                                   size=volume.size)
        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                QUOTAS.rollback(context, reservations)
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        parent = None

        if latest_backup:
            parent = latest_backup
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                QUOTAS.rollback(context, reservations)
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at
            self.db.snapshot_update(
                context, snapshot_id,
                {'status': fields.SnapshotStatus.BACKING_UP})
        else:
            self.db.volume_update(context, volume_id, {
                'status': 'backing-up',
                'previous_status': previous_status
            })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'availability_zone': availability_zone,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
                'parent': parent,
                'metadata': metadata or {}
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
示例#21
0
def fake_backup_obj(context, **updates):
    return objects.Backup._from_db_object(context,
                                          objects.Backup(),
                                          fake_db_backup(**updates),
                                          expected_attrs=['metadata'])
示例#22
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".') % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume['availability_zone'],
                                               volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id, {
            'status': 'backing-up',
            'previous_status': previous_status
        })
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': volume_host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
示例#23
0
 def test_create(self, backup_create):
     backup = objects.Backup(context=self.context)
     backup.create()
     self.assertEqual(fake_backup['id'], backup.id)
     self.assertEqual(fake_backup['volume_id'], backup.volume_id)
示例#24
0
 def test_obj_field_metadata(self):
     backup = objects.Backup(context=self.context,
                             metadata={'test_key': 'test_value'})
     self.assertEqual({'test_key': 'test_value'}, backup.metadata)
示例#25
0
 def test_destroy(self, backup_destroy):
     backup = objects.Backup(context=self.context, id=fake.backup_id)
     backup.destroy()
     self.assertTrue(backup_destroy.called)
     admin_context = backup_destroy.call_args[0][0]
     self.assertTrue(admin_context.is_admin)
示例#26
0
 def test_obj_field_restore_volume_id(self):
     backup = objects.Backup(context=self.context, restore_volume_id='2')
     self.assertEqual('2', backup.restore_volume_id)
示例#27
0
 def test_obj_field_snapshot_id(self):
     backup = objects.Backup(context=self.context, snapshot_id='2')
     self.assertEqual('2', backup.snapshot_id)
示例#28
0
 def test_obj_field_temp_volume_snapshot_id(self):
     backup = objects.Backup(context=self.context,
                             temp_volume_id='2',
                             temp_snapshot_id='3')
     self.assertEqual('2', backup.temp_volume_id)
     self.assertEqual('3', backup.temp_snapshot_id)
示例#29
0
def fake_backup_obj(context, **updates):
    return objects.Backup._from_db_object(context, objects.Backup(),
                                          fake_db_backup(**updates))
示例#30
0
    def create(self, context, name, description, volume_id,
               container, incremental=False, availability_zone=None,
               force=False):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)

        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".')
                   % volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use '
                    'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume, volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # do quota reserver before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1,
                            'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(msg, {'s_pid': context.project_id,
                                      's_size': volume['size'],
                                      'd_consumed': _consumed(over),
                                      'd_quota': quotas[over]})
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {'s_pid': context.project_id,
                                      'd_consumed': _consumed(over)})
                    raise exception.BackupLimitExceeded(
                        allowed=quotas[over])

        # Find the latest backup of the volume and use it as the parent
        # backup to do an incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(context.elevated(),
                                                           volume_id)
            if backups.objects:
                latest_backup = max(backups.objects,
                                    key=lambda x: x['created_at'])
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != "available":
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        self.db.volume_update(context, volume_id,
                              {'status': 'backing-up',
                               'previous_status': previous_status})
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': 'creating',
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': volume_host,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup