class VolumeInfo(properties.Owner): """ VolumeInfo should be used for performing operations on any volume in a chain except shared volume. A volume is prepared in read-write mode. While performing operations, the volume is not set as illegal. """ sd_id = properties.UUID(required=True) img_id = properties.UUID(required=True) vol_id = properties.UUID(required=True) generation = properties.Integer(required=False, minval=0, maxval=sc.MAX_GENERATION) def __init__(self, params, host_id): self.sd_id = params.get('sd_id') self.img_id = params.get('img_id') self.vol_id = params.get('vol_id') self.generation = params.get('generation') self._host_id = host_id self._vol = None @property def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE)] dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append(volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret @property def path(self): return self.volume.getVolumePath() @property def volume(self): if self._vol is None: dom = sdCache.produce_manifest(self.sd_id) self._vol = dom.produceVolume(self.img_id, self.vol_id) return self._vol def volume_operation(self): return self.volume.operation(requested_gen=self.generation, set_illegal=False) @contextmanager def prepare(self): self.volume.prepare(rw=True, justme=False) try: yield finally: self.volume.teardown(self.sd_id, self.vol_id, justme=False)
class JobMetadata(properties.Owner): """ JobMetadata - stored on external leases """ type = properties.Enum(required=True, values=("JOB")) generation = properties.Integer(required=True, minval=0, maxval=sc.MAX_GENERATION) job_id = properties.UUID(required=True) job_status = properties.Enum(required=True, values=("PENDING", "FAILED", "SUCCEEDED", "FENCED")) def __init__(self, params): self.type = params.get("type") self.generation = params.get("generation") self.job_id = params.get("job_id") self.job_status = params.get("job_status")
class VolumeAttributes(properties.Owner): generation = properties.Integer(required=False, minval=0, maxval=sc.MAX_GENERATION) description = properties.String(required=False) def __init__(self, params): self.generation = params.get("generation") self.description = params.get("description") # TODO use properties.Enum when it supports optional enum self.type = params.get("type") # TODO use properties.Enum when it supports optional enum self.legality = params.get("legality") self._validate() def _validate(self): if self._is_empty(): raise ValueError("No attributes to update") self._validate_type() self._validate_legality() def _is_empty(self): return (self.description is None and self.generation is None and self.legality is None and self.type is None) def _validate_type(self): if self.type is not None: if self.type != sc.type2name(sc.SHARED_VOL): raise ValueError("Volume type not supported %s" % self.type) def _validate_legality(self): if self.legality is not None: if self.legality not in [sc.LEGAL_VOL, sc.ILLEGAL_VOL]: raise ValueError("Legality not supported %s" % self.legality) def __repr__(self): values = [ "%s=%r" % (key, value) for key, value in vars(self).items() if value is not None ] return "<VolumeAttributes %s at 0x%x>" % (", ".join(values), id(self))
class BackupConfig(properties.Owner): backup_id = properties.UUID(required=True) from_checkpoint_id = properties.UUID(required='') to_checkpoint_id = properties.UUID(default='') require_consistency = properties.Boolean() creation_time = properties.Integer(minval=0) def __init__(self, backup_config): self.backup_id = backup_config.get("backup_id") self.from_checkpoint_id = backup_config.get("from_checkpoint_id") self.to_checkpoint_id = backup_config.get("to_checkpoint_id") self.require_consistency = backup_config.get("require_consistency") self.creation_time = backup_config.get("creation_time") self.disks = [DiskConfig(d) for d in backup_config.get("disks", ())] for disk in self.disks: if (self.from_checkpoint_id is None and disk.backup_mode == MODE_INCREMENTAL): raise exception.BackupError( reason="Cannot start an incremental backup for disk, " "full backup is requested", backup=self.backup_id, disk=disk)
class SubchainInfo(properties.Owner): sd_id = properties.UUID(required=True) img_id = properties.UUID(required=True) top_id = properties.UUID(required=True) base_id = properties.UUID(required=True) base_generation = properties.Integer(required=False, minval=0, maxval=sc.MAX_GENERATION) def __init__(self, params, host_id): self.sd_id = params.get('sd_id') self.img_id = params.get('img_id') self.top_id = params.get('top_id') self.base_id = params.get('base_id') self.base_generation = params.get('base_generation') self.host_id = host_id self._base_vol = None self._top_vol = None self._chain = None @property def base_vol(self): if self._base_vol is None: dom = sdCache.produce_manifest(self.sd_id) self._base_vol = dom.produceVolume(self.img_id, self.base_id) return self._base_vol @property def top_vol(self): if self._top_vol is None: dom = sdCache.produce_manifest(self.sd_id) self._top_vol = dom.produceVolume(self.img_id, self.top_id) return self._top_vol @property def chain(self): if self._chain is None: dom = sdCache.produce_manifest(self.sd_id) repoPath = dom.getRepoPath() image_repo = image.Image(repoPath) chain = image_repo.getChain(self.sd_id, self.img_id) # When the VM is cloned from a template, the root volume of the # volumes chain is a shared volume. Shared volumes are not returned # in the volumes list when calling Image.getChain hence, we have to # add that volume manually. template = chain[0].getParentVolume() if template is not None: if not template.isShared(): raise se.UnexpectedVolumeState(template.volUUID, "Shared", "Not Shared") chain.insert(0, template) self._chain = [vol.volUUID for vol in chain] return self._chain @property def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) ret = [ rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, rm.EXCLUSIVE) ] dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): # We take only the base lease since no other volumes are modified ret.append( volume.VolumeLease(self.host_id, self.sd_id, self.img_id, self.base_id)) return ret def validate(self): if self.base_id not in self.chain: raise se.VolumeIsNotInChain(self.sd_id, self.img_id, self.base_id) if self.top_id not in self.chain: raise se.VolumeIsNotInChain(self.sd_id, self.img_id, self.top_id) # Validate that top volume is the parent of the base. if self.top_vol.getParent() != self.base_id: raise se.WrongParentVolume(self.base_id, self.top_id) if self.base_vol.isShared(): raise se.SharedVolumeNonWritable(self.base_vol) if self.top_vol.isShared(): raise se.SharedVolumeNonWritable(self.top_vol) def volume_operation(self): return self.base_vol.operation(requested_gen=self.base_generation, set_illegal=False) @contextmanager def prepare(self): top_index = self.chain.index(self.top_id) chain_to_prepare = self.chain[:top_index + 1] dom = sdCache.produce_manifest(self.sd_id) for vol_id in chain_to_prepare: vol = dom.produceVolume(self.img_id, vol_id) rw = True if vol_id == self.base_id else False # TODO: to improve this late to use subchain.top_vol # subchain.base_vol. vol.prepare(rw=rw, justme=True) try: yield finally: self.top_vol.teardown(self.sd_id, self.top_id) def __repr__(self): return ("<SubchainInfo sd_id=%s, img_id=%s, top_id=%s, base_id=%s " "base_generation=%s at 0x%x>") % ( self.sd_id, self.img_id, self.top_id, self.base_id, self.base_generation, # May be None id(self), )
class Cls(properties.Owner): value = properties.Integer(required=True) def __init__(self, value=None): self.value = value
class Cls(properties.Owner): value = properties.Integer(maxval=100)
class Cls(properties.Owner): value = properties.Integer(minval=0)
class CopyDataExternalEndpoint(properties.Owner): """ CopyDataExternalEndpoint represents endpoints for volumes not managed by vdsm, such as Managed Block Storage volumes. """ url = properties.String(required=True) generation = properties.Integer(required=False, minval=0, maxval=sc.MAX_GENERATION) format = properties.String(required=True) sparse = properties.Boolean(required=False) create = properties.Boolean(required=False) is_zero = properties.Boolean(required=True) def __init__(self, params, host_id, job_id): self.lease = validators.Lease(params.get('lease')) self.url = params.get('url') self.generation = params.get('generation') self.format = params.get('format') self.sparse = params.get('sparse', False) self.create = params.get('create', True) self.is_zero = params.get('is_zero', False) self.host_id = host_id self.job_id = job_id @property def locks(self): return [ sd.ExternalLease(self.host_id, self.lease.sd_id, self.lease.lease_id), ] @property def path(self): return self.url def is_invalid_vm_conf_disk(self): return False @property def qemu_format(self): return self.format @property def backing_path(self): return None @property def qcow2_compat(self): return "1.1" @property def backing_qemu_format(self): return None @property def recommends_unordered_writes(self): return self.format == "raw" and not self.sparse @property def requires_create(self): return self.create @property def zero_initialized(self): return self.is_zero @contextmanager def volume_operation(self): dom = sdCache.produce_manifest(self.lease.sd_id) metadata = dom.get_lvb(self.lease.lease_id) log.info("Current lease %s metadata: %r", self.lease.sd_id, metadata) self._validate_metadata(metadata) try: yield except Exception: self._update_metadata(dom, metadata, sc.JOB_STATUS_FAILED) raise self._update_metadata(dom, metadata, sc.JOB_STATUS_SUCCEEDED) @contextmanager def prepare(self): yield def _validate_metadata(self, metadata): if metadata.get("type") != "JOB": raise se.UnsupportedOperation("Metadata type is not support", expected="JOB", actual=metadata.get("type")) if metadata.get("job_id") != self.job_id: raise se.UnsupportedOperation( "job_id on lease doesn't match passed job_id", expected=self.job_id, actual=metadata.get("job_id")) if metadata.get("job_status") != sc.JOB_STATUS_PENDING: raise se.JobStatusMismatch(sc.JOB_STATUS_PENDING, metadata.get("job_status")) if metadata.get("generation") != self.generation: raise se.GenerationMismatch(self.generation, metadata.get("generation")) def _update_metadata(self, dom, metadata, job_status): updated_metadata = metadata.copy() updated_metadata["modified"] = int(time.time()) updated_metadata["host_hardware_id"] = host.uuid() updated_metadata["job_status"] = job_status if job_status == sc.JOB_STATUS_SUCCEEDED: updated_metadata["generation"] = su.next_generation( metadata["generation"]) log.info("Updated lease %s metadata: %r", self.lease.sd_id, updated_metadata) dom.set_lvb(self.lease.lease_id, updated_metadata)
class CopyDataDivEndpoint(properties.Owner): sd_id = properties.UUID(required=True) img_id = properties.UUID(required=True) vol_id = properties.UUID(required=True) generation = properties.Integer(required=False, minval=0, maxval=sc.MAX_GENERATION) prepared = properties.Boolean(default=False) def __init__(self, params, host_id, writable, is_destination=False, lock_image=True): self.sd_id = params.get('sd_id') self.img_id = params.get('img_id') self.vol_id = params.get('vol_id') self.generation = params.get('generation') self.prepared = params.get('prepared') self.is_destination = is_destination self.lock_image = lock_image self._host_id = host_id self._writable = writable self._vol = None @property def locks(self): # A shared lock is always required ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED)] # An exclusive lock will be taken if source and destination images # are not the same, otherwise there will be a deadlock. if self.lock_image: img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) mode = rm.EXCLUSIVE if self._writable else rm.SHARED ret.append(rm.Lock(img_ns, self.img_id, mode)) if self._writable: dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append( volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret @property def path(self): return self.volume.getVolumePath() def is_invalid_vm_conf_disk(self): return workarounds.invalid_vm_conf_disk(self.volume) @property def qemu_format(self): return sc.fmt2str(self.volume.getFormat()) @property def backing_path(self): parent_vol = self.volume.getParentVolume() if not parent_vol: return None return volume.getBackingVolumePath(self.img_id, parent_vol.volUUID) @property def qcow2_compat(self): dom = sdCache.produce_manifest(self.sd_id) return dom.qcow2_compat() @property def backing_qemu_format(self): parent_vol = self.volume.getParentVolume() if not parent_vol: return None return sc.fmt2str(parent_vol.getFormat()) @property def recommends_unordered_writes(self): dom = sdCache.produce_manifest(self.sd_id) return dom.recommends_unordered_writes(self.volume.getFormat()) @property def requires_create(self): return self.volume.requires_create() @property def zero_initialized(self): return self.volume.zero_initialized() @property def volume(self): if self._vol is None: dom = sdCache.produce_manifest(self.sd_id) self._vol = dom.produceVolume(self.img_id, self.vol_id) return self._vol def volume_operation(self): return self.volume.operation(self.generation) @contextmanager def prepare(self): if self.prepared: yield else: self.volume.prepare(rw=self._writable, justme=False, allow_illegal=self.is_destination) try: yield finally: self.volume.teardown(self.sd_id, self.vol_id, justme=False)
class CopyDataDivEndpoint(properties.Owner): sd_id = properties.UUID(required=True) img_id = properties.UUID(required=True) vol_id = properties.UUID(required=True) generation = properties.Integer(required=False, minval=0, maxval=sc.MAX_GENERATION) def __init__(self, params, host_id, writable): self.sd_id = params.get('sd_id') self.img_id = params.get('img_id') self.vol_id = params.get('vol_id') self.generation = params.get('generation') self._host_id = host_id self._writable = writable self._vol = None @property def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) mode = rm.EXCLUSIVE if self._writable else rm.SHARED ret = [rm.ResourceManagerLock(sc.STORAGE, self.sd_id, rm.SHARED), rm.ResourceManagerLock(img_ns, self.img_id, mode)] if self._writable: dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append(volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret @property def path(self): return self.volume.getVolumePath() def is_invalid_vm_conf_disk(self): return workarounds.invalid_vm_conf_disk(self.volume) @property def qemu_format(self): return sc.fmt2str(self.volume.getFormat()) @property def backing_path(self): parent_vol = self.volume.getParentVolume() if not parent_vol: return None return volume.getBackingVolumePath(self.img_id, parent_vol.volUUID) @property def qcow2_compat(self): dom = sdCache.produce_manifest(self.sd_id) return dom.qcow2_compat() @property def backing_qemu_format(self): parent_vol = self.volume.getParentVolume() if not parent_vol: return None return sc.fmt2str(parent_vol.getFormat()) @property def preallocation(self): dom = sdCache.produce_manifest(self.sd_id) if (dom.supportsSparseness and self.volume.getType() == sc.PREALLOCATED_VOL): return qemuimg.PREALLOCATION.FALLOC return None @property def volume(self): if self._vol is None: dom = sdCache.produce_manifest(self.sd_id) self._vol = dom.produceVolume(self.img_id, self.vol_id) return self._vol def volume_operation(self): return self.volume.operation(self.generation) @contextmanager def prepare(self): self.volume.prepare(rw=self._writable, justme=False) try: yield finally: self.volume.teardown(self.sd_id, self.vol_id, justme=False)