def __init__(self, sdUUID): domaindir = os.path.join(self.mountpoint, sdUUID) metadata = selectMetadata(sdUUID) sd.StorageDomain.__init__(self, sdUUID, domaindir, metadata) lvm.activateLVs(self.sdUUID, SPECIAL_LVS) self.metavol = lvm.lvPath(self.sdUUID, sd.METADATA) try: self.logBlkSize = self.getMetaParam(DMDK_LOGBLKSIZE) self.phyBlkSize = self.getMetaParam(DMDK_PHYBLKSIZE) except KeyError: # 512 by Saggi "Trust me (Smoch Alai (sic))" # *blkSize keys may be missing from metadata only for domains that # existed before the introduction of the keys. # Such domains supported only 512 sizes self.logBlkSize = 512 self.phyBlkSize = 512 # Check that all devices in the VG have the same logical and physical # block sizes. lvm.checkVGBlockSizes(sdUUID, (self.logBlkSize, self.phyBlkSize)) # _extendlock is used to prevent race between # VG extend and LV extend. self._extendlock = threading.Lock() self.imageGarbageCollector() self._registerResourceNamespaces() self._lastUncachedSelftest = 0
def format(cls, sdUUID): """Format detached storage domain. This removes all data from the storage domain. """ # Remove the directory tree try: domaindir = cls.findDomainPath(sdUUID) except (se.StorageDomainDoesNotExist): pass else: fileUtils.cleanupdir(domaindir, ignoreErrors=True) # Remove special metadata and service volumes # Remove all volumes LV if exists _removeVMSfs(lvm.lvPath(sdUUID, MASTERLV)) try: lvs = lvm.getLV(sdUUID) except se.LogicalVolumeDoesNotExistError: lvs = () # No LVs in this VG (domain) for lv in lvs: # Fix me: Should raise and get resource lock. try: lvm.removeLVs(sdUUID, lv.name) except se.CannotRemoveLogicalVolume as e: cls.log.warning("Remove logical volume failed %s/%s %s", sdUUID, lv.name, str(e)) lvm.removeVG(sdUUID) return True
def getMetadata(self, metaId=None): """ Get Meta data array of key,values lines """ if not metaId: metaId = self.getMetadataId() vgname, offs = metaId try: meta = misc.readblock(lvm.lvPath(vgname, sd.METADATA), offs * VOLUME_METASIZE, VOLUME_METASIZE) out = {} for l in meta: if l.startswith("EOF"): return out if l.find("=") < 0: continue key, value = l.split("=") out[key.strip()] = value.strip() except Exception as e: self.log.error(e, exc_info=True) raise se.VolumeMetadataReadError("%s: %s" % (metaId, e)) return out
def getMetadata(self, metaId=None): """ Get Meta data array of key,values lines """ if not metaId: metaId = self.getMetadataId() vgname, offs = metaId try: meta = misc.readblock(lvm.lvPath(vgname, sd.METADATA), offs * volume.METADATA_SIZE, volume.METADATA_SIZE) # TODO: factor out logic below for sharing with file volumes out = {} for l in meta: if l.startswith("EOF"): return out if l.find("=") < 0: continue key, value = l.split("=", 1) out[key.strip()] = value.strip() except Exception as e: self.log.error(e, exc_info=True) raise se.VolumeMetadataReadError("%s: %s" % (metaId, e)) return out
def getMetadata(self, metaid=None, nocache=False): """ Get Meta data array of key,values lines """ if nocache: out = self.metaCache() if out: return out if not metaid: vgname = self.sdUUID offs = self.getMetaOffset() else: vgname = metaid[0] offs = metaid[1] try: meta = misc.readblockSUDO(lvm.lvPath(vgname, sd.METADATA), offs * VOLUME_METASIZE, VOLUME_METASIZE) out = {} for l in meta: if l.startswith("EOF"): return out if l.find("=") < 0: continue key, value = l.split("=") out[key.strip()] = value.strip() except Exception, e: self.log.error(e, exc_info=True) raise se.VolumeMetadataReadError(str(metaid) + ":" + str(e))
def __init__(self, sdUUID): domaindir = os.path.join(self.mountpoint, sdUUID) metadata = selectMetadata(sdUUID) sd.StorageDomain.__init__(self, sdUUID, domaindir, metadata) lvm.activateLVs(self.sdUUID, SPECIAL_LVS) self.metavol = lvm.lvPath(self.sdUUID, sd.METADATA) try: self.logBlkSize = self.getMetaParam(DMDK_LOGBLKSIZE) self.phyBlkSize = self.getMetaParam(DMDK_PHYBLKSIZE) except KeyError: # Initialize the block sizes metadata if not defined self.logBlkSize, self.phyBlkSize = lvm.getVGBlockSizes(sdUUID) self.setMetaParam(DMDK_LOGBLKSIZE, self.logBlkSize) self.setMetaParam(DMDK_PHYBLKSIZE, self.phyBlkSize) # Check that all devices in the VG have the same logical and physical # block sizes. lvm.checkVGBlockSizes(sdUUID, (self.logBlkSize, self.phyBlkSize)) # _extendlock is used to prevent race between # VG extend and LV extend. self._extendlock = threading.Lock() self.imageGarbageCollector() self._registerResourceNamespaces() self._lastUncachedSelftest = 0
def _postZero(sdUUID, volumes): # Assumed that there is no any thread that can deactivate these LVs # on this host or change the rw permission on this or any other host. lvNames = tuple(vol.volUUID for vol in volumes) # Assert volumes are writable. (Don't do this at home.) try: lvm.changelv(sdUUID, lvNames, ("--permission", "rw")) except se.StorageException: # Hope this only means that some volumes were already writable. pass lvm.activateLVs(sdUUID, lvNames) for lv in lvm.getLV(sdUUID): if lv.name in lvNames: # wipe out the whole volume try: misc.ddWatchCopy( "/dev/zero", lvm.lvPath(sdUUID, lv.name), vars.task.aborting, int(lv.size), recoveryCallback=volume.baseAsyncTasksRollback) except utils.ActionStopped: raise except Exception: raise se.VolumesZeroingError(lv.name)
def __putMetadata(cls, metaId, meta): vgname, offs = metaId data = cls.formatMetadata(meta) data += "\0" * (volume.METADATA_SIZE - len(data)) metavol = lvm.lvPath(vgname, sd.METADATA) with fileUtils.DirectFile(metavol, "r+d") as f: f.seek(offs * volume.METADATA_SIZE) f.write(data)
def _putMetadata(cls, metaId, meta): vgname, offs = metaId data = cls.formatMetadata(meta) data += "\0" * (volume.METADATA_SIZE - len(data)) metavol = lvm.lvPath(vgname, sd.METADATA) with fileUtils.DirectFile(metavol, "r+d") as f: f.seek(offs * volume.METADATA_SIZE) f.write(data)
def _putMetadata(cls, metaId, meta): vgname, offs = metaId data = cls.formatMetadata(meta) data += "\0" * (sc.METADATA_SIZE - len(data)) metavol = lvm.lvPath(vgname, sd.METADATA) with directio.DirectFile(metavol, "r+") as f: f.seek(offs * sc.METADATA_SIZE) f.write(data)
def mountMaster(self): """ Mount the master metadata file system. Should be called only by SPM. """ lvm.activateLVs(self.sdUUID, MASTERLV) masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR) fileUtils.createdir(masterDir) masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV) cmd = [constants.EXT_FSCK, "-p", masterfsdev] (rc, out, err) = misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL) # fsck exit codes # 0 - No errors # 1 - File system errors corrected # 2 - File system errors corrected, system should # be rebooted # 4 - File system errors left uncorrected # 8 - Operational error # 16 - Usage or syntax error # 32 - E2fsck canceled by user request # 128 - Shared library error if rc == 1 or rc == 2: # rc is a number self.log.info("fsck corrected fs errors (%s)", rc) if rc >= 4: raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc) # TODO: Remove when upgrade is only from a version which creates ext3 # Try to add a journal - due to unfortunate circumstances we exposed # to the public the code that created ext2 file system instead of ext3. # In order to make up for it we are trying to add journal here, just # to be sure (and we have fixed the file system creation). # If there is a journal already tune2fs will do nothing, indicating # this condition only with exit code. However, we do not really care. cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev] misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL) masterMount = mount.Mount(masterfsdev, masterDir) try: masterMount.mount(vfstype=mount.VFS_EXT3) except mount.MountError as ex: rc, out = ex raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out) cmd = [ constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir ] (rc, out, err) = misc.execCmd(cmd, sudo=True) if rc != 0: self.log.error("failed to chown %s", masterDir)
def _create(cls, dom, imgUUID, volUUID, size, volFormat, preallocate, volParent, srcImgUUID, srcVolUUID, volPath): """ Class specific implementation of volumeCreate. All the exceptions are properly handled and logged in volume.create() """ if preallocate == volume.SPARSE_VOL: volSize = "%s" % config.get("irs", "volume_utilization_chunk_mb") else: volSize = "%s" % ((size + SECTORS_TO_MB - 1) / SECTORS_TO_MB) lvm.createLV(dom.sdUUID, volUUID, volSize, activate=True, initialTag=TAG_VOL_UNINIT) utils.rmFile(volPath) os.symlink(lvm.lvPath(dom.sdUUID, volUUID), volPath) if not volParent: cls.log.info( "Request to create %s volume %s with size = %s " "sectors", volume.type2name(volFormat), volPath, size) if volFormat == volume.COW_FORMAT: qemuimg.create(volPath, size * BLOCK_SIZE, volume.fmt2str(volFormat)) else: # Create hardlink to template and its meta file cls.log.info("Request to create snapshot %s/%s of volume %s/%s", imgUUID, volUUID, srcImgUUID, srcVolUUID) volParent.clone(volPath, volFormat) with cls._tagCreateLock: mdSlot = dom.getVolumeMetadataSlot(volUUID, VOLUME_MDNUMBLKS) mdTags = [ "%s%s" % (TAG_PREFIX_MD, mdSlot), "%s%s" % (TAG_PREFIX_PARENT, srcVolUUID), "%s%s" % (TAG_PREFIX_IMAGE, imgUUID) ] lvm.changeLVTags(dom.sdUUID, volUUID, delTags=[TAG_VOL_UNINIT], addTags=mdTags) try: lvm.deactivateLVs(dom.sdUUID, volUUID) except se.CannotDeactivateLogicalVolume: cls.log.warn("Cannot deactivate new created volume %s/%s", dom.sdUUID, volUUID, exc_info=True) return (dom.sdUUID, mdSlot)
def mountMaster(self): """ Mount the master metadata file system. Should be called only by SPM. """ lvm.activateLVs(self.sdUUID, MASTERLV) masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR) fileUtils.createdir(masterDir) masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV) cmd = [constants.EXT_FSCK, "-p", masterfsdev] (rc, out, err) = misc.execCmd(cmd) # fsck exit codes # 0 - No errors # 1 - File system errors corrected # 2 - File system errors corrected, system should # be rebooted # 4 - File system errors left uncorrected # 8 - Operational error # 16 - Usage or syntax error # 32 - E2fsck canceled by user request # 128 - Shared library error if rc == 1 or rc == 2: # rc is a number self.log.info("fsck corrected fs errors (%s)", rc) if rc >= 4: raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc) # TODO: Remove when upgrade is only from a version which creates ext3 # Try to add a journal - due to unfortunate circumstances we exposed # to the public the code that created ext2 file system instead of ext3. # In order to make up for it we are trying to add journal here, just # to be sure (and we have fixed the file system creation). # If there is a journal already tune2fs will do nothing, indicating this # condition only with exit code. However, we do not really care. cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev] misc.execCmd(cmd) rc = fileUtils.mount(masterfsdev, masterDir, mountType=fileUtils.FSTYPE_EXT3) # mount exit codes # mount has the following return codes (the bits can be ORed): # 0 success # 1 incorrect invocation or permissions # 2 system error (out of memory, cannot fork, no more loop devices) # 4 internal mount bug or missing nfs support in mount # 8 user interrupt # 16 problems writing or locking /etc/mtab # 32 mount failure # 64 some mount succeeded if rc != 0: raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out) cmd = [constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir] (rc, out, err) = misc.execCmd(cmd) if rc != 0: self.log.error("failed to chown %s", masterDir)
def validateVolumePath(self): """ Block SD supports lazy volume link creation. Note that the volume can be still inactive. An explicit prepare is required to validate that the volume is active. """ if not self.imagePath: self.validateImagePath() volPath = os.path.join(self.imagePath, self.volUUID) if not os.path.lexists(volPath): os.symlink(lvm.lvPath(self.sdUUID, self.volUUID), volPath) self.volumePath = volPath
def getVSize(cls, sdobj, imgUUID, volUUID, bs=512): try: return _getDeviceSize(lvm.lvPath(sdobj.sdUUID, volUUID)) / bs except OSError: # This is OK, the volume might not be active. Try the traditional # way pass except Exception: cls.log.warn( "Could not get size for vol %s/%s using optimized methods", sdobj.sdUUID, volUUID, exc_info=True ) return int(int(lvm.getLV(sdobj.sdUUID, volUUID).size) / bs)
def mountMaster(self): """ Mount the master metadata file system. Should be called only by SPM. """ lvm.activateLVs(self.sdUUID, MASTERLV) masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR) fileUtils.createdir(masterDir) masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV) cmd = [constants.EXT_FSCK, "-p", masterfsdev] (rc, out, err) = misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL) # fsck exit codes # 0 - No errors # 1 - File system errors corrected # 2 - File system errors corrected, system should # be rebooted # 4 - File system errors left uncorrected # 8 - Operational error # 16 - Usage or syntax error # 32 - E2fsck canceled by user request # 128 - Shared library error if rc == 1 or rc == 2: # rc is a number self.log.info("fsck corrected fs errors (%s)", rc) if rc >= 4: raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc) # TODO: Remove when upgrade is only from a version which creates ext3 # Try to add a journal - due to unfortunate circumstances we exposed # to the public the code that created ext2 file system instead of ext3. # In order to make up for it we are trying to add journal here, just # to be sure (and we have fixed the file system creation). # If there is a journal already tune2fs will do nothing, indicating # this condition only with exit code. However, we do not really care. cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev] misc.execCmd(cmd, sudo=True, deathSignal=signal.SIGKILL) masterMount = mount.Mount(masterfsdev, masterDir) try: masterMount.mount(vfstype=mount.VFS_EXT3) except mount.MountError as ex: rc, out = ex raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out) cmd = [constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir] (rc, out, err) = misc.execCmd(cmd, sudo=True) if rc != 0: self.log.error("failed to chown %s", masterDir)
def validateVolumePath(self): """ Block SD supports lazy volume link creation. Note that the volume can be still inactive. An explicit prepare is required to validate that the volume is active. """ if not self._imagePath: self.validateImagePath() volPath = os.path.join(self._imagePath, self.volUUID) if not os.path.lexists(volPath): srcPath = lvm.lvPath(self.sdUUID, self.volUUID) self.log.debug("Creating symlink from %s to %s", srcPath, volPath) os.symlink(srcPath, volPath) self._volumePath = volPath
def getVSize(self, imgUUID, volUUID): """ Return the block volume size in bytes. """ try: size = _tellEnd(lvm.lvPath(self.sdUUID, volUUID)) except IOError as e: if e.errno == os.errno.ENOENT: # Inactive volume has no /dev entry. Fallback to lvm way. size = lvm.getLV(self.sdUUID, volUUID).size else: self.log.warn("Could not get size for vol %s/%s", self.sdUUID, volUUID, exc_info=True) raise return int(size)
def refreshDirTree(self): # create domain images folder imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES) fileUtils.createdir(imagesPath) # create domain special volumes folder domMD = os.path.join(self.domaindir, sd.DOMAIN_META_DATA) fileUtils.createdir(domMD) lvm.activateLVs(self.sdUUID, SPECIAL_LVS) for lvName in SPECIAL_LVS: dst = os.path.join(domMD, lvName) if not os.path.lexists(dst): src = lvm.lvPath(self.sdUUID, lvName) os.symlink(src, dst)
def __putMetadata(cls, meta, metaid): vgname = metaid[0] offs = metaid[1] lines = ["%s=%s\n" % (key.strip(), str(value).strip()) for key, value in meta.iteritems()] lines.append("EOF\n") metavol = lvm.lvPath(vgname, sd.METADATA) with fileUtils.DirectFile(metavol, "r+d") as f: data = "".join(lines) if len(data) > VOLUME_METASIZE: cls.log.warn("Truncating volume metadata (%s)", data) data = data[:VOLUME_METASIZE] else: data += "\0" * (VOLUME_METASIZE - len(data)) f.seek(offs * VOLUME_METASIZE) f.write(data)
def _create(cls, dom, imgUUID, volUUID, size, volFormat, preallocate, volParent, srcImgUUID, srcVolUUID, imgPath, volPath): """ Class specific implementation of volumeCreate. All the exceptions are properly handled and logged in volume.create() """ if preallocate == volume.SPARSE_VOL: volSize = "%s" % config.get("irs", "volume_utilization_chunk_mb") else: volSize = "%s" % ((size + SECTORS_TO_MB - 1) / SECTORS_TO_MB) lvm.createLV(dom.sdUUID, volUUID, volSize, activate=True, initialTag=TAG_VOL_UNINIT) utils.rmFile(volPath) os.symlink(lvm.lvPath(dom.sdUUID, volUUID), volPath) if not volParent: cls.log.info("Request to create %s volume %s with size = %s " "sectors", volume.type2name(volFormat), volPath, size) if volFormat == volume.COW_FORMAT: volume.createVolume(None, None, volPath, size, volFormat, preallocate) else: # Create hardlink to template and its meta file cls.log.info("Request to create snapshot %s/%s of volume %s/%s", imgUUID, volUUID, srcImgUUID, srcVolUUID) volParent.clone(imgPath, volUUID, volFormat, preallocate) with cls._tagCreateLock: mdSlot = dom.mapMetaOffset(volUUID, VOLUME_MDNUMBLKS) mdTags = ["%s%s" % (TAG_PREFIX_MD, mdSlot), "%s%s" % (TAG_PREFIX_PARENT, srcVolUUID), "%s%s" % (TAG_PREFIX_IMAGE, imgUUID)] lvm.changeLVTags(dom.sdUUID, volUUID, delTags=[TAG_VOL_UNINIT], addTags=mdTags) try: lvm.deactivateLVs(dom.sdUUID, volUUID) except se.CannotDeactivateLogicalVolume: cls.log.warn("Cannot deactivate new created volume %s/%s", dom.sdUUID, volUUID, exc_info=True) return (dom.sdUUID, mdSlot)
def _create(cls, dom, imgUUID, volUUID, size, volFormat, preallocate, volParent, srcImgUUID, srcVolUUID, volPath, initialSize=None): """ Class specific implementation of volumeCreate. All the exceptions are properly handled and logged in volume.create() """ lvSize = cls.calculate_volume_alloc_size(preallocate, size, initialSize) lvm.createLV(dom.sdUUID, volUUID, "%s" % lvSize, activate=True, initialTags=(sc.TAG_VOL_UNINIT,)) utils.rmFile(volPath) os.symlink(lvm.lvPath(dom.sdUUID, volUUID), volPath) if not volParent: cls.log.info("Request to create %s volume %s with size = %s " "sectors", sc.type2name(volFormat), volPath, size) if volFormat == sc.COW_FORMAT: qemuimg.create(volPath, size=size * BLOCK_SIZE, format=sc.fmt2str(volFormat), qcow2Compat=dom.qcow2_compat()) else: # Create hardlink to template and its meta file cls.log.info("Request to create snapshot %s/%s of volume %s/%s", imgUUID, volUUID, srcImgUUID, srcVolUUID) volParent.clone(volPath, volFormat) with dom.acquireVolumeMetadataSlot( volUUID, sc.VOLUME_MDNUMBLKS) as slot: mdTags = ["%s%s" % (sc.TAG_PREFIX_MD, slot), "%s%s" % (sc.TAG_PREFIX_PARENT, srcVolUUID), "%s%s" % (sc.TAG_PREFIX_IMAGE, imgUUID)] lvm.changeLVTags(dom.sdUUID, volUUID, delTags=[sc.TAG_VOL_UNINIT], addTags=mdTags) try: lvm.deactivateLVs(dom.sdUUID, [volUUID]) except se.CannotDeactivateLogicalVolume: cls.log.warn("Cannot deactivate new created volume %s/%s", dom.sdUUID, volUUID, exc_info=True) return (dom.sdUUID, slot)
def _zeroVolume(sdUUID, volUUID): """Fill a block volume. This function requires an active LV. """ dm = lvm.lvDmDev(sdUUID, volUUID) size = multipath.getDeviceSize(dm) # Bytes # TODO: Change for zero 128 M chuncks and log. # 128 M is the vdsm extent size default BS = constants.MEGAB # 1024 ** 2 = 1 MiB count = size / BS cmd = [constants.EXT_DD, "oflag=%s" % misc.DIRECTFLAG, "if=/dev/zero", "of=%s" % lvm.lvPath(sdUUID, volUUID), "bs=%s" % BS, "count=%s" % count] p = misc.execCmd(cmd, sync=False, nice=utils.NICENESS.HIGH, ioclass=utils.IOCLASS.IDLE, deathSignal=signal.SIGKILL) return p
def getVSize(cls, sdobj, imgUUID, volUUID, bs=BLOCK_SIZE): """ Returns size in block units. Returns the largest integer value less than or equal to size [blocks]. """ try: size = _tellEnd(lvm.lvPath(sdobj.sdUUID, volUUID)) / bs except IOError as e: if e.errno == os.errno.ENOENT: # Inactive volume has no /dev entry. Fallback to lvm way. size = int(int(lvm.getLV(sdobj.sdUUID, volUUID).size) / bs) else: cls.log.warn("Could not get size for vol %s/%s", sdobj.sdUUID, volUUID, exc_info=True) raise return size
def getMetadata(self, metaId=None): """ Get Meta data array of key,values lines """ if not metaId: metaId = self.getMetadataId() vgname, offs = metaId try: lines = misc.readblock(lvm.lvPath(vgname, sd.METADATA), offs * sc.METADATA_SIZE, sc.METADATA_SIZE) except Exception as e: self.log.error(e, exc_info=True) raise se.VolumeMetadataReadError("%s: %s" % (metaId, e)) md = VolumeMetadata.from_lines(lines) return md.legacy_info()
def _create(cls, dom, imgUUID, volUUID, size, volFormat, preallocate, volParent, srcImgUUID, srcVolUUID, volPath, initialSize=None): """ Class specific implementation of volumeCreate. All the exceptions are properly handled and logged in volume.create() """ lvSize = cls.calculate_volume_alloc_size(preallocate, size, initialSize) lvm.createLV(dom.sdUUID, volUUID, "%s" % lvSize, activate=True, initialTags=(sc.TAG_VOL_UNINIT,)) utils.rmFile(volPath) os.symlink(lvm.lvPath(dom.sdUUID, volUUID), volPath) if not volParent: cls.log.info("Request to create %s volume %s with size = %s " "sectors", sc.type2name(volFormat), volPath, size) if volFormat == sc.COW_FORMAT: qemuimg.create( volPath, size * BLOCK_SIZE, sc.fmt2str(volFormat)) else: # Create hardlink to template and its meta file cls.log.info("Request to create snapshot %s/%s of volume %s/%s", imgUUID, volUUID, srcImgUUID, srcVolUUID) volParent.clone(volPath, volFormat) with dom.acquireVolumeMetadataSlot( volUUID, sc.VOLUME_MDNUMBLKS) as slot: mdTags = ["%s%s" % (sc.TAG_PREFIX_MD, slot), "%s%s" % (sc.TAG_PREFIX_PARENT, srcVolUUID), "%s%s" % (sc.TAG_PREFIX_IMAGE, imgUUID)] lvm.changeLVTags(dom.sdUUID, volUUID, delTags=[sc.TAG_VOL_UNINIT], addTags=mdTags) try: lvm.deactivateLVs(dom.sdUUID, [volUUID]) except se.CannotDeactivateLogicalVolume: cls.log.warn("Cannot deactivate new created volume %s/%s", dom.sdUUID, volUUID, exc_info=True) return (dom.sdUUID, slot)
def __putMetadata(cls, metaId, meta): vgname, offs = metaId lines = [ "%s=%s\n" % (key.strip(), str(value).strip()) for key, value in meta.iteritems() ] lines.append("EOF\n") metavol = lvm.lvPath(vgname, sd.METADATA) with fileUtils.DirectFile(metavol, "r+d") as f: data = "".join(lines) if len(data) > VOLUME_METASIZE: raise se.MetadataOverflowError(data) data += "\0" * (VOLUME_METASIZE - len(data)) f.seek(offs * VOLUME_METASIZE) f.write(data)
def __putMetadata(cls, meta, metaid): vgname = metaid[0] offs = metaid[1] lines = [ "%s=%s\n" % (key.strip(), str(value).strip()) for key, value in meta.iteritems() ] lines.append("EOF\n") metavol = lvm.lvPath(vgname, sd.METADATA) with fileUtils.DirectFile(metavol, "r+d") as f: data = "".join(lines) if len(data) > VOLUME_METASIZE: cls.log.warn("Truncating volume metadata (%s)", data) data = data[:VOLUME_METASIZE] else: data += "\0" * (VOLUME_METASIZE - len(data)) f.seek(offs * VOLUME_METASIZE) f.write(data)
def create(cls, sdUUID, domainName, domClass, vgUUID, storageType, version): """ Create new storage domain 'sdUUID' - Storage Domain UUID 'domainName' - storage domain name 'vgUUID' - volume group UUID 'domClass' - Data/Iso """ cls.log.info( "sdUUID=%s domainName=%s domClass=%s vgUUID=%s " "storageType=%s version=%s", sdUUID, domainName, domClass, vgUUID, storageType, version) if len(domainName) > sd.MAX_DOMAIN_DESCRIPTION_SIZE: raise se.StorageDomainDescriptionTooLongError() sd.validateDomainVersion(version) vg = lvm.getVGbyUUID(vgUUID) vgName = vg.name if set((STORAGE_UNREADY_DOMAIN_TAG, )) != set(vg.tags): raise se.VolumeGroupHasDomainTag(vgUUID) try: lvm.getLV(vgName) raise se.StorageDomainNotEmpty(vgUUID) except se.LogicalVolumeDoesNotExistError: pass numOfPVs = len(lvm.listPVNames(vgName)) if version in VERS_METADATA_LV and numOfPVs > MAX_PVS: cls.log.debug("%d > %d", numOfPVs, MAX_PVS) raise se.StorageDomainIsMadeFromTooManyPVs() # Set the name of the VG to be the same as sdUUID if vgName != sdUUID: lvm.renameVG(vgName, sdUUID) vgName = sdUUID # Create metadata service volume metasize = cls.metaSize(vgName) lvm.createLV(vgName, sd.METADATA, "%s" % (metasize)) # Create the mapping right now so the index 0 is guaranteed # to belong to the metadata volume. Since the metadata is at # least SDMETADATA/METASIZE units, we know we can use the first # SDMETADATA bytes of the metadata volume for the SD metadata. # pass metadata's dev to ensure it is the first mapping mapping = cls.getMetaDataMapping(vgName) # Create the rest of the BlockSD internal volumes lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE) lvm.createLV(vgName, sd.IDS, sd.IDS_SIZE) lvm.createLV(vgName, sd.INBOX, sd.INBOX_SIZE) lvm.createLV(vgName, sd.OUTBOX, sd.OUTBOX_SIZE) lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE) # Create VMS file system _createVMSfs(os.path.join("/dev", vgName, MASTERLV)) lvm.deactivateLVs(vgName, MASTERLV) path = lvm.lvPath(vgName, sd.METADATA) # Zero out the metadata and special volumes before use try: misc.ddCopy("/dev/zero", path, RESERVED_METADATA_SIZE) path = lvm.lvPath(vgName, sd.INBOX) misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE) path = lvm.lvPath(vgName, sd.OUTBOX) misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE) except se.ActionStopped, e: raise e
def __init__(self, vgName, lvName, offset, size): self._size = size self._lvName = lvName self._vgName = vgName self._offset = offset self.metavol = lvm.lvPath(vgName, lvName)
def create(cls, sdUUID, domainName, domClass, vgUUID, storageType, version): """ Create new storage domain 'sdUUID' - Storage Domain UUID 'domainName' - storage domain name 'domClass' - Data/Iso 'vgUUID' - volume group UUID 'storageType' - NFS_DOMAIN, LOCALFS_DOMAIN, &etc. 'version' - DOMAIN_VERSIONS """ cls.log.info("sdUUID=%s domainName=%s domClass=%s vgUUID=%s " "storageType=%s version=%s", sdUUID, domainName, domClass, vgUUID, storageType, version) if not misc.isAscii(domainName) and not sd.supportsUnicode(version): raise se.UnicodeArgumentException() if len(domainName) > sd.MAX_DOMAIN_DESCRIPTION_SIZE: raise se.StorageDomainDescriptionTooLongError() sd.validateDomainVersion(version) vg = lvm.getVGbyUUID(vgUUID) vgName = vg.name if set((STORAGE_UNREADY_DOMAIN_TAG,)) != set(vg.tags): raise se.VolumeGroupHasDomainTag(vgUUID) try: lvm.getLV(vgName) raise se.StorageDomainNotEmpty(vgUUID) except se.LogicalVolumeDoesNotExistError: pass numOfPVs = len(lvm.listPVNames(vgName)) if version in VERS_METADATA_LV and numOfPVs > MAX_PVS: cls.log.debug("%d > %d", numOfPVs, MAX_PVS) raise se.StorageDomainIsMadeFromTooManyPVs() # Create metadata service volume metasize = cls.metaSize(vgName) lvm.createLV(vgName, sd.METADATA, "%s" % (metasize)) # Create the mapping right now so the index 0 is guaranteed # to belong to the metadata volume. Since the metadata is at # least SDMETADATA/METASIZE units, we know we can use the first # SDMETADATA bytes of the metadata volume for the SD metadata. # pass metadata's dev to ensure it is the first mapping mapping = cls.getMetaDataMapping(vgName) # Create the rest of the BlockSD internal volumes for metaFile, metaSizeMb in sd.SPECIAL_VOLUME_SIZES_MIB.iteritems(): lvm.createLV(vgName, metaFile, metaSizeMb) lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE) # Create VMS file system _createVMSfs(os.path.join("/dev", vgName, MASTERLV)) lvm.deactivateLVs(vgName, MASTERLV) path = lvm.lvPath(vgName, sd.METADATA) # Zero out the metadata and special volumes before use try: misc.ddCopy("/dev/zero", path, RESERVED_METADATA_SIZE) path = lvm.lvPath(vgName, sd.INBOX) misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE) path = lvm.lvPath(vgName, sd.OUTBOX) misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE) except utils.ActionStopped: raise except se.StorageException: raise se.VolumesZeroingError(path) if version in VERS_METADATA_LV: md = LvBasedSDMetadata(vgName, sd.METADATA) elif version in VERS_METADATA_TAG: md = TagBasedSDMetadata(vgName) logBlkSize, phyBlkSize = lvm.getVGBlockSizes(vgName) # create domain metadata # FIXME : This is 99% like the metadata in file SD # Do we really need to keep the VGUUID? # no one reads it from here anyway initialMetadata = { sd.DMDK_VERSION: version, sd.DMDK_SDUUID: sdUUID, sd.DMDK_TYPE: storageType, sd.DMDK_CLASS: domClass, sd.DMDK_DESCRIPTION: domainName, sd.DMDK_ROLE: sd.REGULAR_DOMAIN, sd.DMDK_POOLS: [], sd.DMDK_LOCK_POLICY: '', sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: sd.DEFAULT_LEASE_PARAMS[ sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC], sd.DMDK_LEASE_TIME_SEC: sd.DEFAULT_LEASE_PARAMS[ sd.DMDK_LEASE_TIME_SEC], sd.DMDK_IO_OP_TIMEOUT_SEC: sd.DEFAULT_LEASE_PARAMS[ sd.DMDK_IO_OP_TIMEOUT_SEC], sd.DMDK_LEASE_RETRIES: sd.DEFAULT_LEASE_PARAMS[ sd.DMDK_LEASE_RETRIES], DMDK_VGUUID: vgUUID, DMDK_LOGBLKSIZE: logBlkSize, DMDK_PHYBLKSIZE: phyBlkSize, } initialMetadata.update(mapping) md.update(initialMetadata) # Mark VG with Storage Domain Tag try: lvm.replaceVGTag(vgName, STORAGE_UNREADY_DOMAIN_TAG, STORAGE_DOMAIN_TAG) except se.StorageException: raise se.VolumeGroupUninitialized(vgName) bsd = BlockStorageDomain(sdUUID) bsd.initSPMlease() return bsd
def getReadDelay(self): stats = misc.readspeed(lvm.lvPath(self.sdUUID, sd.METADATA), 4096) return stats['seconds']
cls.log.error("Unexpected error", exc_info=True) raise se.VolumeCannotGetParent("blockVolume can't get parent %s for volume %s: %s" % (srcVolUUID, volUUID, str(e))) try: cls.log.info("blockVolume: creating LV: volUUID %s" % (volUUID)) if preallocate == volume.SPARSE_VOL: volsize = "%s" % config.get("irs", "volume_utilization_chunk_mb") else: # should stay %d and size should be int(size) volsize = "%s" % (size / 2 / 1024) vars.task.pushRecovery(task.Recovery("halfbaked volume rollback", "blockVolume", "BlockVolume", "halfbakedVolumeRollback", [sdUUID, volUUID, vol_path])) lvm.createLV(sdUUID, volUUID, volsize, activate=True) if os.path.exists(vol_path): os.unlink(vol_path) os.symlink(lvm.lvPath(sdUUID, volUUID), vol_path) except se.StorageException: cls.log.error("Unexpected error", exc_info=True) raise except Exception, e: cls.log.error("Unexpected error", exc_info=True) raise se.VolumeCreationError("blockVolume create/link lv %s failed: %s" % (volUUID, str(e))) # By definition volume is now a leaf and should be writeable. # Default permission for lvcreate is read and write. No need to set permission. try: cls.log.info("blockVolume: create: volUUID %s srcImg %s srvVol %s" % (volUUID, srcImgUUID, srcVolUUID)) if not pvol: cls.log.info("Request to create %s volume %s with size = %s sectors", volume.type2name(volFormat), vol_path, size) # Create 'raw' volume via qemu-img actually redundant
def getLeasesFilePath(self): lvm.activateLVs(self.sdUUID, [sd.LEASES]) return lvm.lvPath(self.sdUUID, sd.LEASES)
def getDevPath(self): """ Return the underlying device (for sharing) """ return lvm.lvPath(self.sdUUID, self.volUUID)
def getIdsFilePath(self): lvm.activateLVs(self.sdUUID, [sd.IDS]) return lvm.lvPath(self.sdUUID, sd.IDS)
def create(cls, sdUUID, domainName, domClass, vgUUID, storageType, version): """ Create new storage domain 'sdUUID' - Storage Domain UUID 'domainName' - storage domain name 'vgUUID' - volume group UUID 'domClass' - Data/Iso """ cls.log.info("sdUUID=%s domainName=%s domClass=%s vgUUID=%s " "storageType=%s version=%s", sdUUID, domainName, domClass, vgUUID, storageType, version) if len(domainName) > sd.MAX_DOMAIN_DESCRIPTION_SIZE: raise se.StorageDomainDescriptionTooLongError() sd.validateDomainVersion(version) vg = lvm.getVGbyUUID(vgUUID) vgName = vg.name if set((STORAGE_UNREADY_DOMAIN_TAG,)) != set(vg.tags): raise se.VolumeGroupHasDomainTag(vgUUID) try: lvm.getLV(vgName) raise se.StorageDomainNotEmpty(vgUUID) except se.LogicalVolumeDoesNotExistError: pass numOfPVs = len(lvm.listPVNames(vgName)) if version in VERS_METADATA_LV and numOfPVs > MAX_PVS: cls.log.debug("%d > %d" , numOfPVs, MAX_PVS) raise se.StorageDomainIsMadeFromTooManyPVs() # Set the name of the VG to be the same as sdUUID if vgName != sdUUID: lvm.renameVG(vgName, sdUUID) vgName = sdUUID # Create metadata service volume metasize = cls.metaSize(vgName) lvm.createLV(vgName, sd.METADATA, "%s" % (metasize)) # Create the mapping right now so the index 0 is guaranteed # to belong to the metadata volume. Since the metadata is at # least SDMETADATA/METASIZE units, we know we can use the first # SDMETADATA bytes of the metadata volume for the SD metadata. # pass metadata's dev to ensure it is the first mapping mapping = cls.getMetaDataMapping(vgName) # Create the rest of the BlockSD internal volumes lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE) lvm.createLV(vgName, sd.IDS, sd.IDS_SIZE) lvm.createLV(vgName, sd.INBOX, sd.INBOX_SIZE) lvm.createLV(vgName, sd.OUTBOX, sd.OUTBOX_SIZE) lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE) # Create VMS file system _createVMSfs(os.path.join("/dev", vgName, MASTERLV)) lvm.deactivateLVs(vgName, MASTERLV) path = lvm.lvPath(vgName, sd.METADATA) # Zero out the metadata and special volumes before use try: misc.ddCopy("/dev/zero", path, RESERVED_METADATA_SIZE) path = lvm.lvPath(vgName, sd.INBOX) misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE) path = lvm.lvPath(vgName, sd.OUTBOX) misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE) except se.ActionStopped, e: raise e
try: cls.log.info("blockVolume: creating LV: volUUID %s" % (volUUID)) if preallocate == volume.SPARSE_VOL: volsize = "%s" % config.get("irs", "volume_utilization_chunk_mb") else: # should stay %d and size should be int(size) volsize = "%s" % (size / 2 / 1024) vars.task.pushRecovery( task.Recovery("halfbaked volume rollback", "blockVolume", "BlockVolume", "halfbakedVolumeRollback", [sdUUID, volUUID, vol_path])) lvm.createLV(sdUUID, volUUID, volsize, activate=True) if os.path.exists(vol_path): os.unlink(vol_path) os.symlink(lvm.lvPath(sdUUID, volUUID), vol_path) except se.StorageException: cls.log.error("Unexpected error", exc_info=True) raise except Exception, e: cls.log.error("Unexpected error", exc_info=True) raise se.VolumeCreationError( "blockVolume create/link lv %s failed: %s" % (volUUID, str(e))) # By definition volume is now a leaf and should be writeable. # Default permission for lvcreate is read and write. No need to set permission. try: cls.log.info( "blockVolume: create: volUUID %s srcImg %s srvVol %s" % (volUUID, srcImgUUID, srcVolUUID))
def create(cls, sdUUID, domainName, domClass, vgUUID, storageType, version): """ Create new storage domain 'sdUUID' - Storage Domain UUID 'domainName' - storage domain name 'domClass' - Data/Iso 'vgUUID' - volume group UUID 'storageType' - NFS_DOMAIN, LOCALFS_DOMAIN, &etc. 'version' - DOMAIN_VERSIONS """ cls.log.info("sdUUID=%s domainName=%s domClass=%s vgUUID=%s " "storageType=%s version=%s", sdUUID, domainName, domClass, vgUUID, storageType, version) if not misc.isAscii(domainName) and not sd.supportsUnicode(version): raise se.UnicodeArgumentException() if len(domainName) > sd.MAX_DOMAIN_DESCRIPTION_SIZE: raise se.StorageDomainDescriptionTooLongError() sd.validateDomainVersion(version) vg = lvm.getVGbyUUID(vgUUID) vgName = vg.name if set((STORAGE_UNREADY_DOMAIN_TAG,)) != set(vg.tags): raise se.VolumeGroupHasDomainTag(vgUUID) try: lvm.getLV(vgName) raise se.StorageDomainNotEmpty(vgUUID) except se.LogicalVolumeDoesNotExistError: pass numOfPVs = len(lvm.listPVNames(vgName)) if version in VERS_METADATA_LV and numOfPVs > MAX_PVS: cls.log.debug("%d > %d", numOfPVs, MAX_PVS) raise se.StorageDomainIsMadeFromTooManyPVs() # Create metadata service volume metasize = cls.metaSize(vgName) lvm.createLV(vgName, sd.METADATA, "%s" % (metasize)) # Create the mapping right now so the index 0 is guaranteed # to belong to the metadata volume. Since the metadata is at # least SDMETADATA/METASIZE units, we know we can use the first # SDMETADATA bytes of the metadata volume for the SD metadata. # pass metadata's dev to ensure it is the first mapping mapping = cls.getMetaDataMapping(vgName) # Create the rest of the BlockSD internal volumes lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE) lvm.createLV(vgName, sd.IDS, sd.IDS_SIZE) lvm.createLV(vgName, sd.INBOX, sd.INBOX_SIZE) lvm.createLV(vgName, sd.OUTBOX, sd.OUTBOX_SIZE) lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE) # Create VMS file system _createVMSfs(os.path.join("/dev", vgName, MASTERLV)) lvm.deactivateLVs(vgName, MASTERLV) path = lvm.lvPath(vgName, sd.METADATA) # Zero out the metadata and special volumes before use try: misc.ddCopy("/dev/zero", path, RESERVED_METADATA_SIZE) path = lvm.lvPath(vgName, sd.INBOX) misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE) path = lvm.lvPath(vgName, sd.OUTBOX) misc.ddCopy("/dev/zero", path, RESERVED_MAILBOX_SIZE) except utils.ActionStopped: raise except se.StorageException: raise se.VolumesZeroingError(path) if version in VERS_METADATA_LV: md = LvBasedSDMetadata(vgName, sd.METADATA) elif version in VERS_METADATA_TAG: md = TagBasedSDMetadata(vgName) logBlkSize, phyBlkSize = lvm.getVGBlockSizes(vgName) # create domain metadata # FIXME : This is 99% like the metadata in file SD # Do we really need to keep the VGUUID? # no one reads it from here anyway initialMetadata = { sd.DMDK_VERSION: version, sd.DMDK_SDUUID: sdUUID, sd.DMDK_TYPE: storageType, sd.DMDK_CLASS: domClass, sd.DMDK_DESCRIPTION: domainName, sd.DMDK_ROLE: sd.REGULAR_DOMAIN, sd.DMDK_POOLS: [], sd.DMDK_LOCK_POLICY: '', sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: sd.DEFAULT_LEASE_PARAMS[ sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC], sd.DMDK_LEASE_TIME_SEC: sd.DEFAULT_LEASE_PARAMS[ sd.DMDK_LEASE_TIME_SEC], sd.DMDK_IO_OP_TIMEOUT_SEC: sd.DEFAULT_LEASE_PARAMS[ sd.DMDK_IO_OP_TIMEOUT_SEC], sd.DMDK_LEASE_RETRIES: sd.DEFAULT_LEASE_PARAMS[ sd.DMDK_LEASE_RETRIES], DMDK_VGUUID: vgUUID, DMDK_LOGBLKSIZE: logBlkSize, DMDK_PHYBLKSIZE: phyBlkSize, } initialMetadata.update(mapping) md.update(initialMetadata) # Mark VG with Storage Domain Tag try: lvm.replaceVGTag(vgName, STORAGE_UNREADY_DOMAIN_TAG, STORAGE_DOMAIN_TAG) except se.StorageException: raise se.VolumeGroupUninitialized(vgName) bsd = BlockStorageDomain(sdUUID) bsd.initSPMlease() return bsd
def mountMaster(self): """ Mount the master metadata file system. Should be called only by SPM. """ lvm.activateLVs(self.sdUUID, MASTERLV) masterDir = os.path.join(self.domaindir, sd.MASTER_FS_DIR) fileUtils.createdir(masterDir) masterfsdev = lvm.lvPath(self.sdUUID, MASTERLV) cmd = [constants.EXT_FSCK, "-p", masterfsdev] (rc, out, err) = misc.execCmd(cmd) # fsck exit codes # 0 - No errors # 1 - File system errors corrected # 2 - File system errors corrected, system should # be rebooted # 4 - File system errors left uncorrected # 8 - Operational error # 16 - Usage or syntax error # 32 - E2fsck canceled by user request # 128 - Shared library error if rc == 1 or rc == 2: # rc is a number self.log.info("fsck corrected fs errors (%s)", rc) if rc >= 4: raise se.BlockStorageDomainMasterFSCKError(masterfsdev, rc) # TODO: Remove when upgrade is only from a version which creates ext3 # Try to add a journal - due to unfortunate circumstances we exposed # to the public the code that created ext2 file system instead of ext3. # In order to make up for it we are trying to add journal here, just # to be sure (and we have fixed the file system creation). # If there is a journal already tune2fs will do nothing, indicating this # condition only with exit code. However, we do not really care. cmd = [constants.EXT_TUNE2FS, "-j", masterfsdev] misc.execCmd(cmd) rc = fileUtils.mount(masterfsdev, masterDir, mountType=fileUtils.FSTYPE_EXT3) # mount exit codes # mount has the following return codes (the bits can be ORed): # 0 success # 1 incorrect invocation or permissions # 2 system error (out of memory, cannot fork, no more loop devices) # 4 internal mount bug or missing nfs support in mount # 8 user interrupt # 16 problems writing or locking /etc/mtab # 32 mount failure # 64 some mount succeeded if rc != 0: raise se.BlockStorageDomainMasterMountError(masterfsdev, rc, out) cmd = [ constants.EXT_CHOWN, "%s:%s" % (constants.METADATA_USER, constants.METADATA_GROUP), masterDir ] (rc, out, err) = misc.execCmd(cmd) if rc != 0: self.log.error("failed to chown %s", masterDir)
def getReadDelay(self): with fileUtils.open_ex(lvm.lvPath(self.sdUUID, sd.METADATA), "dr") as f: t = time.time() f.read(4096) return time.time() - t