def validateCreateVolumeParams(self, volFormat, srcVolUUID, diskType=None, preallocate=None, add_bitmaps=False): """ Validate create volume parameters """ if volFormat not in sc.VOL_FORMAT: raise se.IncorrectFormat(volFormat) # Volumes with a parent must be cow if srcVolUUID != sc.BLANK_UUID and volFormat != sc.COW_FORMAT: raise se.IncorrectFormat(sc.type2name(volFormat)) if diskType is not None and diskType not in sc.VOL_DISKTYPE: raise se.InvalidParameterException("DiskType", diskType) if preallocate is not None and preallocate not in sc.VOL_TYPE: raise se.IncorrectType(preallocate) if add_bitmaps: if srcVolUUID == sc.BLANK_UUID: raise se.UnsupportedOperation( "Cannot add bitmaps for volume without parent volume", srcVolUUID=srcVolUUID, add_bitmaps=add_bitmaps) if not self.supports_bitmaps_operations(): raise se.UnsupportedOperation( "Cannot perform bitmaps operations on " "storage domain version < 4", domain_version=self.getVersion(), add_bitmaps=add_bitmaps)
def _validate(self): if self._vol_info.volume.getFormat() != sc.COW_FORMAT: raise se.UnsupportedOperation("Volume is not in COW format", vol_uuid=self._vol_info.vol_id) if self._vol_info.volume.isShared(): raise se.UnsupportedOperation( "Cannot remove bitmaps from shared volume", vol_uuid=self._vol_info.vol_id)
def _validate_copy_bitmaps(self, src_format, dst_format): if self._copy_bitmaps and qemuimg.FORMAT.RAW in (src_format, dst_format): raise se.UnsupportedOperation( "Cannot copy bitmaps from/to volumes with raw " "format", src_vol=self._source.path, dst_vol=self._dest.path)
def _validate_metadata(self, metadata): if metadata.get("type") != "JOB": raise se.UnsupportedOperation("Metadata type is not support", expected="JOB", actual=metadata.get("type")) if metadata.get("job_id") != self.job_id: raise se.UnsupportedOperation( "job_id on lease doesn't match passed job_id", expected=self.job_id, actual=metadata.get("job_id")) if metadata.get("job_status") != sc.JOB_STATUS_PENDING: raise se.JobStatusMismatch(sc.JOB_STATUS_PENDING, metadata.get("job_status")) if metadata.get("generation") != self.generation: raise se.GenerationMismatch(self.generation, metadata.get("generation"))
def _run(self): sd_manifest = sdCache.produce_manifest(self.params.sd_id) if not sd_manifest.supports_device_reduce(): raise se.UnsupportedOperation( "Storage domain does not support reduce operation", sdUUID=sd_manifest.sdUUID(), sdType=sd_manifest.getStorageType()) # TODO: we assume at this point that the domain isn't active and can't # be activated - we need to ensure that. with rm.acquireResource(STORAGE, self.params.sd_id, rm.EXCLUSIVE): with sd_manifest.domain_id(self.host_id), \ sd_manifest.domain_lock(self.host_id): sd_manifest.reduceVG(self.params.guid)
def attach_volume(vol_id, connection_info): """ Attach volume with os-brick. """ db = managedvolumedb.open() with closing(db): _add_volume(db, vol_id, connection_info) log.debug("Starting attach volume %s connection_info=%s", vol_id, connection_info) try: attachment = run_helper("attach", connection_info) try: path = _resolve_path(vol_id, connection_info, attachment) db.update_volume(vol_id, path=path, attachment=attachment, multipath_id=attachment.get("multipath_id")) _invalidate_lvm_filter(attachment) volume_type = connection_info["driver_volume_type"] if volume_type not in ("rbd", "iscsi"): raise se.UnsupportedOperation( "Unsupported volume type, supported types are: " "rbd, iscsi") _add_udev_rule(vol_id, path) except: _silent_detach(connection_info, attachment) raise except: _silent_remove(db, vol_id) raise log.debug("Attached volume %s attachment=%s", vol_id, attachment) return { "result": { 'attachment': attachment, 'path': path, 'vol_id': vol_id } }
def validateCreateVolumeParams(cls, volFormat, srcVolUUID, diskType=None, preallocate=None, add_bitmaps=False): """ Validate create volume parameters """ if volFormat not in sc.VOL_FORMAT: raise se.IncorrectFormat(volFormat) # Volumes with a parent must be cow if srcVolUUID != sc.BLANK_UUID and volFormat != sc.COW_FORMAT: raise se.IncorrectFormat(sc.type2name(volFormat)) if diskType is not None and diskType not in sc.VOL_DISKTYPE: raise se.InvalidParameterException("DiskType", diskType) if preallocate is not None and preallocate not in sc.VOL_TYPE: raise se.IncorrectType(preallocate) if add_bitmaps and srcVolUUID == sc.BLANK_UUID: raise se.UnsupportedOperation( "Cannot add bitmaps for volume without parent volume", srcVolUUID=srcVolUUID, add_bitmaps=add_bitmaps)
def acquire(self, hostId, lease, lvb=False): if lvb and not supports_lvb: raise se.UnsupportedOperation( "This sanlock version does not support LVB") self.log.info("Acquiring %s for host id %s, lvb=%s", lease, hostId, lvb) # If host id was acquired by this thread, this will return immediately. # If host is id being acquired asynchronically by the domain monitor, # wait until the domain monitor find that host id was acquired. # # IMPORTANT: This must be done *before* entering the lock. Once we # enter the lock, the domain monitor cannot check if host id was # acquired, since hasHostId() is using the same lock. if not self._ready.wait(self.ACQUIRE_HOST_ID_TIMEOUT): raise se.AcquireHostIdFailure( "Timeout acquiring host id, cannot acquire %s (id=%s)" % (lease, hostId)) with self._lock, SANLock._process_lock: while True: if SANLock._process_fd is None: try: SANLock._process_fd = sanlock.register() except sanlock.SanlockException as e: raise se.AcquireLockFailure( self._sdUUID, e.errno, "Cannot register to sanlock", str(e)) self.log.info("Using sanlock process fd %d", SANLock._process_fd) # TODO: remove once sanlock 3.8.3 is available on centos. extra_args = {"lvb": lvb} if supports_lvb else {} try: sanlock.acquire( self._lockspace_name, lease.name.encode("utf-8"), [(lease.path, lease.offset)], slkfd=SANLock._process_fd, **extra_args) except sanlock.SanlockException as e: if e.errno != errno.EPIPE: raise se.AcquireLockFailure( self._sdUUID, e.errno, "Cannot acquire %s" % (lease,), str(e)) # If we hold leases, we just lost them, since sanlock is # releasing all process leases when the process fd is # closed. The only way to recover is to panic; child # processes run by vdsm will be killed, and vdsm will lose # the SPM role. if SANLock._lease_count > 0: panic("Sanlock process fd was closed while " "holding {} leases: {}" .format(SANLock._lease_count, e)) self.log.warning("Sanlock process fd was closed: %s", e) SANLock._process_fd = None continue SANLock._lease_count += 1 break self.log.info("Successfully acquired %s for host id %s", lease, hostId)