def _extendSizeRaw(self, newSize): volPath = self.getVolumePath() curSizeBytes = self.oop.os.stat(volPath).st_size newSizeBytes = newSize * BLOCK_SIZE # No real sanity checks here, they should be included in the calling # function/method. We just validate the sizes to be consistent since # they're computed and used in the pre-allocated case. if newSizeBytes == curSizeBytes: return # Nothing to do elif curSizeBytes <= 0: raise se.StorageException("Volume size is impossible: %s" % curSizeBytes) elif newSizeBytes < curSizeBytes: raise se.VolumeResizeValueError(newSize) if self.getType() == sc.PREALLOCATED_VOL: self.log.info("Preallocating volume %s to %s bytes", volPath, newSizeBytes) operation = fallocate.allocate(volPath, newSizeBytes - curSizeBytes, curSizeBytes) with vars.task.abort_callback(operation.abort): with utils.stopwatch("Preallocating volume %s" % volPath): operation.run() else: # for sparse files we can just truncate to the correct size # also good fallback for failed preallocation self.log.info("Truncating volume %s to %s bytes", volPath, newSizeBytes) self.oop.truncateFile(volPath, newSizeBytes)
def copyToImage(dstImgPath, methodArgs): totalSize = getLengthFromArgs(methodArgs) fileObj = methodArgs['fileObj'] # Unlike copyFromImage, we don't use direct I/O when writing because: # - Images are small so using host page cache is ok. # - Images typically aligned to 512 bytes (tar), may fail on 4k storage. cmd = [ constants.EXT_DD, "of=%s" % dstImgPath, "bs=%s" % MiB, # Ensure that data reach physical storage before returning. "conv=fsync", ] log.info("Copy to image %s", dstImgPath) with utils.stopwatch("Copy %s bytes" % totalSize, level=logging.INFO, log=log): p = commands.start(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE) with commands.terminating(p): _copyData(fileObj, p.stdin, totalSize) try: _, err = p.communicate(timeout=WAIT_TIMEOUT) except subprocess.TimeoutExpired: log.error("timeout waiting for dd process") raise se.StorageException() if p.returncode != 0: log.error("dd failed rc=%s err=%r", p.returncode, err) raise se.MiscFileWriteException()
def changelv(vg, lvs, attrs): """ Change multiple attributes on multiple LVs. vg: VG name lvs: a single LV name or iterable of LV names. attrs: an iterable of (attr, value) pairs), e.g. (('--available', 'y'), ('--permission', 'rw') Note: You may activate an activated LV without error but lvchange returns an error (RC=5) when activating rw if already rw """ lvs = _normalizeargs(lvs) # If it fails or not we (may be) change the lv, # so we invalidate cache to reload these volumes on first occasion lvnames = tuple("%s/%s" % (vg, lv) for lv in lvs) cmd = ["lvchange"] cmd.extend(LVM_NOBACKUP) if isinstance(attrs[0], str): # ("--attribute", "value") cmd.extend(attrs) else: # (("--aa", "v1"), ("--ab", "v2")) for attr in attrs: cmd.extend(attr) cmd.extend(lvnames) rc, out, err = _lvminfo.cmd(tuple(cmd), _lvminfo._getVGDevs((vg, ))) _lvminfo._invalidatelvs(vg, lvs) if rc != 0: raise se.StorageException("%d %s %s\n%s/%s" % (rc, out, err, vg, lvs))
def _extendSizeRaw(self, new_capacity): volPath = self.getVolumePath() cur_capacity = self.oop.os.stat(volPath).st_size # No real sanity checks here, they should be included in the calling # function/method. We just validate the sizes to be consistent since # they're computed and used in the pre-allocated case. if new_capacity == cur_capacity: return # Nothing to do elif cur_capacity <= 0: raise se.StorageException("Volume capacity is impossible: %s" % cur_capacity) elif new_capacity < cur_capacity: raise se.VolumeResizeValueError(new_capacity) if self.getType() == sc.PREALLOCATED_VOL: self.log.info("Preallocating volume %s to %s", volPath, new_capacity) op = fallocate.allocate(volPath, new_capacity - cur_capacity, offset=cur_capacity) with vars.task.abort_callback(op.abort): with utils.stopwatch("Preallocating volume {}".format(volPath), level=logging.INFO, log=self.log): op.run() else: # for sparse files we can just truncate to the correct size # also good fallback for failed preallocation self.log.info("Truncating volume %s to %s", volPath, new_capacity) self.oop.truncateFile(volPath, new_capacity)
def copyToImage(dstImgPath, methodArgs): totalSize = getLengthFromArgs(methodArgs) fileObj = methodArgs['fileObj'] cmd = [constants.EXT_DD, "of=%s" % dstImgPath, "bs=%s" % constants.MEGAB] p = commands.execCmd(cmd, sync=False) with utils.terminating(p): _copyData(fileObj, p.stdin, totalSize) p.stdin.close() if not p.wait(WAIT_TIMEOUT): log.error("timeout waiting for dd process") raise se.StorageException() if p.returncode != 0: log.error("dd error - code %s, stderr %s", p.returncode, p.stderr.read(1000)) raise se.MiscFileWriteException()
def _extendSizeRaw(self, newSize): volPath = self.getVolumePath() curSizeBytes = self.oop.os.stat(volPath).st_size newSizeBytes = newSize * BLOCK_SIZE # No real sanity checks here, they should be included in the calling # function/method. We just validate the sizes to be consistent since # they're computed and used in the pre-allocated case. if newSizeBytes == curSizeBytes: return # Nothing to do elif curSizeBytes <= 0: raise se.StorageException("Volume size is impossible: %s" % curSizeBytes) elif newSizeBytes < curSizeBytes: raise se.VolumeResizeValueError(newSize) if self.getType() == sc.PREALLOCATED_VOL: # for pre-allocated we need to zero to the file size misc.ddWatchCopy("/dev/zero", volPath, vars.task.aborting, newSizeBytes - curSizeBytes, curSizeBytes) else: # for sparse files we can just truncate to the correct size self.oop.truncateFile(volPath, newSizeBytes)