class ServerConfig(properties.Owner): sd_id = properties.UUID(required=True) img_id = properties.UUID(required=True) vol_id = properties.UUID(required=True) readonly = properties.Boolean(default=False) discard = properties.Boolean(default=False) detect_zeroes = properties.Boolean(default=False) backing_chain = properties.Boolean(default=True) bitmap = properties.UUID() def __init__(self, config): self.sd_id = config.get("sd_id") self.img_id = config.get("img_id") self.vol_id = config.get("vol_id") self.readonly = config.get("readonly") self.discard = config.get("discard") self.detect_zeroes = config.get("detect_zeroes") # Setting to None overrides the default value. # See https://bugzilla.redhat.com/1892403 self.backing_chain = config.get("backing_chain", True) self.bitmap = config.get("bitmap") if not self.backing_chain and self.bitmap: # When exporting a bitmap we always export the entire chain. raise se.UnsupportedOperation( "Cannot export bitmap with backing_chain=False") if self.bitmap and not self.readonly: # Exporting bitmaps makes sense only for incremental backup. raise se.UnsupportedOperation("Cannot export bitmap for writing")
class ServerConfig(properties.Owner): sd_id = properties.UUID(required=True) img_id = properties.UUID(required=True) vol_id = properties.UUID(required=True) readonly = properties.Boolean(default=False) discard = properties.Boolean(default=False) def __init__(self, config): self.sd_id = config.get("sd_id") self.img_id = config.get("img_id") self.vol_id = config.get("vol_id") self.readonly = config.get("readonly") self.discard = config.get("discard")
class DiskConfig(properties.Owner): vol_id = properties.UUID(required=True) img_id = properties.UUID(required=True) dom_id = properties.UUID(required=True) checkpoint = properties.Boolean(required=True) backup_mode = properties.Enum(values=("full", "incremental")) def __init__(self, disk_config): self.vol_id = disk_config.get("volumeID") self.img_id = disk_config.get("imageID") self.dom_id = disk_config.get("domainID") # Mark if the disk is included in the checkpoint. self.checkpoint = disk_config.get("checkpoint") self.backup_mode = disk_config.get("backup_mode") # Initialized when the engine creates the scratch # disk on a shared storage if "scratch_disk" in disk_config: scratch_disk = disk_config.get("scratch_disk") self.scratch_disk = ScratchDiskConfig( path=scratch_disk.get("path"), type=scratch_disk.get("type"), sd_id=scratch_disk.get("domainID"), img_id=scratch_disk.get("imageID"), vol_id=scratch_disk.get("volumeID")) else: self.scratch_disk = None
class DiskConfig(properties.Owner): vol_id = properties.UUID(required=True) img_id = properties.UUID(required=True) dom_id = properties.UUID(required=True) checkpoint = properties.Boolean(required=True) def __init__(self, disk_config): self.vol_id = disk_config.get("volumeID") self.img_id = disk_config.get("imageID") self.dom_id = disk_config.get("domainID") # Mark if the disk is included in the checkpoint. self.checkpoint = disk_config.get("checkpoint")
class DiskConfig(properties.Owner): vol_id = properties.UUID(required=True) img_id = properties.UUID(required=True) dom_id = properties.UUID(required=True) checkpoint = properties.Boolean(required=True) backup_mode = properties.Enum(values=("full", "incremental")) def __init__(self, disk_config): self.vol_id = disk_config.get("volumeID") self.img_id = disk_config.get("imageID") self.dom_id = disk_config.get("domainID") # Mark if the disk is included in the checkpoint. self.checkpoint = disk_config.get("checkpoint") self.backup_mode = disk_config.get("backup_mode")
class BackupConfig(properties.Owner): backup_id = properties.UUID(required=True) from_checkpoint_id = properties.UUID(required='') to_checkpoint_id = properties.UUID(default='') require_consistency = properties.Boolean() creation_time = properties.Integer(minval=0) def __init__(self, backup_config): self.backup_id = backup_config.get("backup_id") self.from_checkpoint_id = backup_config.get("from_checkpoint_id") self.to_checkpoint_id = backup_config.get("to_checkpoint_id") self.require_consistency = backup_config.get("require_consistency") self.creation_time = backup_config.get("creation_time") self.disks = [DiskConfig(d) for d in backup_config.get("disks", ())] for disk in self.disks: if (self.from_checkpoint_id is None and disk.backup_mode == MODE_INCREMENTAL): raise exception.BackupError( reason="Cannot start an incremental backup for disk, " "full backup is requested", backup=self.backup_id, disk=disk)
class Cls(properties.Owner): value = properties.Boolean(required=True)
class Cls(properties.Owner): value = properties.Boolean(default=False)
class Cls(properties.Owner): value = properties.Boolean()
class CopyDataExternalEndpoint(properties.Owner): """ CopyDataExternalEndpoint represents endpoints for volumes not managed by vdsm, such as Managed Block Storage volumes. """ url = properties.String(required=True) generation = properties.Integer(required=False, minval=0, maxval=sc.MAX_GENERATION) format = properties.String(required=True) sparse = properties.Boolean(required=False) create = properties.Boolean(required=False) is_zero = properties.Boolean(required=True) def __init__(self, params, host_id, job_id): self.lease = validators.Lease(params.get('lease')) self.url = params.get('url') self.generation = params.get('generation') self.format = params.get('format') self.sparse = params.get('sparse', False) self.create = params.get('create', True) self.is_zero = params.get('is_zero', False) self.host_id = host_id self.job_id = job_id @property def locks(self): return [ sd.ExternalLease(self.host_id, self.lease.sd_id, self.lease.lease_id), ] @property def path(self): return self.url def is_invalid_vm_conf_disk(self): return False @property def qemu_format(self): return self.format @property def backing_path(self): return None @property def qcow2_compat(self): return "1.1" @property def backing_qemu_format(self): return None @property def recommends_unordered_writes(self): return self.format == "raw" and not self.sparse @property def requires_create(self): return self.create @property def zero_initialized(self): return self.is_zero @contextmanager def volume_operation(self): dom = sdCache.produce_manifest(self.lease.sd_id) metadata = dom.get_lvb(self.lease.lease_id) log.info("Current lease %s metadata: %r", self.lease.sd_id, metadata) self._validate_metadata(metadata) try: yield except Exception: self._update_metadata(dom, metadata, sc.JOB_STATUS_FAILED) raise self._update_metadata(dom, metadata, sc.JOB_STATUS_SUCCEEDED) @contextmanager def prepare(self): yield def _validate_metadata(self, metadata): if metadata.get("type") != "JOB": raise se.UnsupportedOperation("Metadata type is not support", expected="JOB", actual=metadata.get("type")) if metadata.get("job_id") != self.job_id: raise se.UnsupportedOperation( "job_id on lease doesn't match passed job_id", expected=self.job_id, actual=metadata.get("job_id")) if metadata.get("job_status") != sc.JOB_STATUS_PENDING: raise se.JobStatusMismatch(sc.JOB_STATUS_PENDING, metadata.get("job_status")) if metadata.get("generation") != self.generation: raise se.GenerationMismatch(self.generation, metadata.get("generation")) def _update_metadata(self, dom, metadata, job_status): updated_metadata = metadata.copy() updated_metadata["modified"] = int(time.time()) updated_metadata["host_hardware_id"] = host.uuid() updated_metadata["job_status"] = job_status if job_status == sc.JOB_STATUS_SUCCEEDED: updated_metadata["generation"] = su.next_generation( metadata["generation"]) log.info("Updated lease %s metadata: %r", self.lease.sd_id, updated_metadata) dom.set_lvb(self.lease.lease_id, updated_metadata)
class CopyDataDivEndpoint(properties.Owner): sd_id = properties.UUID(required=True) img_id = properties.UUID(required=True) vol_id = properties.UUID(required=True) generation = properties.Integer(required=False, minval=0, maxval=sc.MAX_GENERATION) prepared = properties.Boolean(default=False) def __init__(self, params, host_id, writable, is_destination=False, lock_image=True): self.sd_id = params.get('sd_id') self.img_id = params.get('img_id') self.vol_id = params.get('vol_id') self.generation = params.get('generation') self.prepared = params.get('prepared') self.is_destination = is_destination self.lock_image = lock_image self._host_id = host_id self._writable = writable self._vol = None @property def locks(self): # A shared lock is always required ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED)] # An exclusive lock will be taken if source and destination images # are not the same, otherwise there will be a deadlock. if self.lock_image: img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) mode = rm.EXCLUSIVE if self._writable else rm.SHARED ret.append(rm.Lock(img_ns, self.img_id, mode)) if self._writable: dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append( volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret @property def path(self): return self.volume.getVolumePath() def is_invalid_vm_conf_disk(self): return workarounds.invalid_vm_conf_disk(self.volume) @property def qemu_format(self): return sc.fmt2str(self.volume.getFormat()) @property def backing_path(self): parent_vol = self.volume.getParentVolume() if not parent_vol: return None return volume.getBackingVolumePath(self.img_id, parent_vol.volUUID) @property def qcow2_compat(self): dom = sdCache.produce_manifest(self.sd_id) return dom.qcow2_compat() @property def backing_qemu_format(self): parent_vol = self.volume.getParentVolume() if not parent_vol: return None return sc.fmt2str(parent_vol.getFormat()) @property def recommends_unordered_writes(self): dom = sdCache.produce_manifest(self.sd_id) return dom.recommends_unordered_writes(self.volume.getFormat()) @property def requires_create(self): return self.volume.requires_create() @property def zero_initialized(self): return self.volume.zero_initialized() @property def volume(self): if self._vol is None: dom = sdCache.produce_manifest(self.sd_id) self._vol = dom.produceVolume(self.img_id, self.vol_id) return self._vol def volume_operation(self): return self.volume.operation(self.generation) @contextmanager def prepare(self): if self.prepared: yield else: self.volume.prepare(rw=self._writable, justme=False, allow_illegal=self.is_destination) try: yield finally: self.volume.teardown(self.sd_id, self.vol_id, justme=False)