def setUp(self): super(TestBackupStatus, self).setUp() self.field = fields.BackupStatusField() self.coerce_good_values = [('error', 'error'), ('error_deleting', 'error_deleting'), ('creating', 'creating'), ('available', 'available'), ('deleting', 'deleting'), ('deleted', 'deleted'), ('restoring', 'restoring')] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1]
def setUp(self): super(TestBackupStatus, self).setUp() self.field = fields.BackupStatusField() self.coerce_good_values = [ ('error', fields.BackupStatus.ERROR), ('error_deleting', fields.BackupStatus.ERROR_DELETING), ('creating', fields.BackupStatus.CREATING), ('available', fields.BackupStatus.AVAILABLE), ('deleting', fields.BackupStatus.DELETING), ('deleted', fields.BackupStatus.DELETED), ('restoring', fields.BackupStatus.RESTORING) ] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1]
class Backup(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Add new field num_dependent_backups and extra fields # is_incremental and has_dependent_backups. # Version 1.2: Add new field snapshot_id and data_timestamp. # Version 1.3: Changed 'status' field to use BackupStatusField # Version 1.4: Add restore_volume_id # Version 1.5: Add metadata # Version 1.6: Add encryption_key_id # Version 1.7: Add parent VERSION = '1.7' OPTIONAL_FIELDS = ('metadata', 'parent') fields = { 'id': fields.UUIDField(), 'user_id': fields.StringField(), 'project_id': fields.StringField(), 'volume_id': fields.UUIDField(), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'container': fields.StringField(nullable=True), 'parent_id': fields.StringField(nullable=True), 'parent': fields.ObjectField('Backup', nullable=True), 'status': c_fields.BackupStatusField(nullable=True), 'fail_reason': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), # NOTE(dulek): Metadata field is used to store any strings by backup # drivers, that's why it can't be DictOfStringsField. 'service_metadata': fields.StringField(nullable=True), 'service': fields.StringField(nullable=True), 'object_count': fields.IntegerField(nullable=True), 'temp_volume_id': fields.StringField(nullable=True), 'temp_snapshot_id': fields.StringField(nullable=True), 'num_dependent_backups': fields.IntegerField(nullable=True), 'snapshot_id': fields.StringField(nullable=True), 'data_timestamp': fields.DateTimeField(nullable=True), 'restore_volume_id': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'encryption_key_id': fields.StringField(nullable=True), } obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups'] def __init__(self, *args, **kwargs): super(Backup, self).__init__(*args, **kwargs) self._orig_metadata = {} self._reset_metadata_tracking() def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if self.obj_attr_is_set('metadata') else {}) @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return 'metadata', @property def name(self): return CONF.backup_name_template % self.id @property def is_incremental(self): return bool(self.parent_id) @property def has_dependent_backups(self): return bool(self.num_dependent_backups) def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version.""" added_fields = (((1, 7), ('parent', )), ) super(Backup, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) for version, remove_fields in added_fields: if target_version < version: for obj_field in remove_fields: primitive.pop(obj_field, None) @classmethod def _from_db_object(cls, context, backup, db_backup, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in backup.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_backup.get(name) if isinstance(field, fields.IntegerField): value = value if value is not None else 0 backup[name] = value if 'metadata' in expected_attrs: metadata = db_backup.get('backup_metadata') if metadata is None: raise exception.MetadataAbsent() backup.metadata = {item['key']: item['value'] for item in metadata} backup._context = context backup.obj_reset_changes() return backup def obj_reset_changes(self, fields=None): super(Backup, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'parent': if self.parent_id: self.parent = self.get_by_id(self._context, self.parent_id) else: self.parent = None self.obj_reset_changes(fields=[attrname]) def obj_what_changed(self): changes = super(Backup, self).obj_what_changed() if hasattr(self, 'metadata') and self.metadata != self._orig_metadata: changes.add('metadata') return changes def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.cinder_obj_get_changes() db_backup = db.backup_create(self._context, updates) self._from_db_object(self._context, self, db_backup) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'metadata' in updates: metadata = updates.pop('metadata', None) self.metadata = db.backup_metadata_update( self._context, self.id, metadata, True) updates.pop('parent', None) db.backup_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.backup_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @staticmethod def decode_record(backup_url): """Deserialize backup metadata from string into a dictionary. :raises InvalidInput: """ try: return jsonutils.loads(base64.decode_as_text(backup_url)) except TypeError: msg = _("Can't decode backup record.") except ValueError: msg = _("Can't parse backup record.") raise exception.InvalidInput(reason=msg) def encode_record(self, **kwargs): """Serialize backup object, with optional extra info, into a string.""" # We don't want to export extra fields and we want to force lazy # loading, so we can't use dict(self) or self.obj_to_primitive record = { name: field.to_primitive(self, name, getattr(self, name)) for name, field in self.fields.items() if name != 'parent' } # We must update kwargs instead of record to ensure we don't overwrite # "real" data from the backup kwargs.update(record) retval = jsonutils.dump_as_bytes(kwargs) return base64.encode_as_text(retval)
class Backup(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Add new field num_dependent_backups and extra fields # is_incremental and has_dependent_backups. # Version 1.2: Add new field snapshot_id and data_timestamp. # Version 1.3: Changed 'status' field to use BackupStatusField # Version 1.4: Add restore_volume_id VERSION = '1.4' fields = { 'id': fields.UUIDField(), 'user_id': fields.StringField(), 'project_id': fields.StringField(), 'volume_id': fields.UUIDField(), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'container': fields.StringField(nullable=True), 'parent_id': fields.StringField(nullable=True), 'status': c_fields.BackupStatusField(nullable=True), 'fail_reason': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), # NOTE(dulek): Metadata field is used to store any strings by backup # drivers, that's why it can't be DictOfStringsField. 'service_metadata': fields.StringField(nullable=True), 'service': fields.StringField(nullable=True), 'object_count': fields.IntegerField(nullable=True), 'temp_volume_id': fields.StringField(nullable=True), 'temp_snapshot_id': fields.StringField(nullable=True), 'num_dependent_backups': fields.IntegerField(nullable=True), 'snapshot_id': fields.StringField(nullable=True), 'data_timestamp': fields.DateTimeField(nullable=True), 'restore_volume_id': fields.StringField(nullable=True), } obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups'] @property def name(self): return CONF.backup_name_template % self.id @property def is_incremental(self): return bool(self.parent_id) @property def has_dependent_backups(self): return bool(self.num_dependent_backups) def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version.""" super(Backup, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) @staticmethod def _from_db_object(context, backup, db_backup): for name, field in backup.fields.items(): value = db_backup.get(name) if isinstance(field, fields.IntegerField): value = value if value is not None else 0 backup[name] = value backup._context = context backup.obj_reset_changes() return backup def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.cinder_obj_get_changes() db_backup = db.backup_create(self._context, updates) self._from_db_object(self._context, self, db_backup) def save(self): updates = self.cinder_obj_get_changes() if updates: db.backup_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.backup_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @staticmethod def decode_record(backup_url): """Deserialize backup metadata from string into a dictionary. :raises: InvalidInput """ try: return jsonutils.loads(base64.decode_as_text(backup_url)) except TypeError: msg = _("Can't decode backup record.") except ValueError: msg = _("Can't parse backup record.") raise exception.InvalidInput(reason=msg) def encode_record(self, **kwargs): """Serialize backup object, with optional extra info, into a string.""" # We don't want to export extra fields and we want to force lazy # loading, so we can't use dict(self) or self.obj_to_primitive record = { name: field.to_primitive(self, name, getattr(self, name)) for name, field in self.fields.items() } # We must update kwargs instead of record to ensure we don't overwrite # "real" data from the backup kwargs.update(record) retval = jsonutils.dump_as_bytes(kwargs) return base64.encode_as_text(retval)