def _metaVolumeFstabUpdate(metaVolumeName): try: fs_spec = "127.0.0.1:" + metaVolumeName fstab.FsTab().add(fs_spec, META_VOL_MOUNT_POINT, FS_TYPE, mntOpts=['defaults', '_netdev']) except IOError as e: raise ge.GlusterMetaVolumeFstabUpdateFailedException( err=["fstab update failed", str(e)]) except ge.GlusterHostStorageDeviceFsTabFoundException as e: logging.warn(e.message)
def createBrick(brickName, mountPoint, devNameList, fsType=DEFAULT_FS_TYPE, raidParams={}): def _getDeviceList(devNameList): return [ blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1]) for devName in devNameList ] def _createPV(deviceList, alignment): for dev in deviceList: # bz#1178705: Blivet always creates pv with 1MB dataalignment # Workaround: Till blivet fixes the issue, we use lvm pvcreate rc, out, err = commands.execCmd([ _pvCreateCommandPath.cmd, '--dataalignment', '%sk' % alignment, dev.path ]) if rc: raise ge.GlusterHostStorageDevicePVCreateFailedException( dev.path, alignment, rc, out, err) _reset_blivet(blivetEnv) return _getDeviceList([dev.name for dev in deviceList]) def _createVG(vgName, deviceList, stripeSize): # bz#1198568: Blivet always creates vg with 1MB stripe size # Workaround: Till blivet fixes the issue, use vgcreate command devices = ','.join([device.path for device in deviceList]) rc, out, err = commands.execCmd([ _vgCreateCommandPath.cmd, '-s', '%sk' % stripeSize, vgName, devices ]) if rc: raise ge.GlusterHostStorageDeviceVGCreateFailedException( vgName, devices, stripeSize, rc, out, err) blivetEnv.reset() return blivetEnv.devicetree.getDeviceByName(vgName) def _createThinPool(poolName, vg, alignment, poolMetaDataSize, poolDataSize): metaName = "meta-%s" % poolName vgPoolName = "%s/%s" % (vg.name, poolName) metaLv = LVMLogicalVolumeDevice(metaName, parents=[vg], size=blivet.size.Size( '%d KiB' % poolMetaDataSize)) poolLv = LVMLogicalVolumeDevice(poolName, parents=[vg], size=blivet.size.Size('%d KiB' % poolDataSize)) blivetEnv.createDevice(metaLv) blivetEnv.createDevice(poolLv) blivetEnv.doIt() # bz#1100514: LVM2 currently only supports physical extent sizes # that are a power of 2. Till that support is available we need # to use lvconvert to achive that. # bz#1179826: blivet doesn't support lvconvert functionality. # Workaround: Till the bz gets fixed, lvconvert command is used rc, out, err = commands.execCmd([ _lvconvertCommandPath.cmd, '--chunksize', '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata', "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y' ]) if rc: raise ge.GlusterHostStorageDeviceLVConvertFailedException( vg.path, alignment, rc, out, err) rc, out, err = commands.execCmd( [_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName]) if rc: raise ge.GlusterHostStorageDeviceLVChangeFailedException( vgPoolName, rc, out, err) _reset_blivet(blivetEnv) return blivetEnv.devicetree.getDeviceByName(poolLv.name) if os.path.ismount(mountPoint): raise ge.GlusterHostStorageMountPointInUseException(mountPoint) vgName = "vg-" + brickName poolName = "pool-" + brickName poolDataSize = 0 count = 0 raidType = raidParams.get('type') metaDataSizeKib = DEFAULT_METADATA_SIZE_KB if raidType == '6': count = raidParams['pdCount'] - 2 alignment = raidParams['stripeSize'] * count chunkSize = alignment elif raidType == '10': count = raidParams['pdCount'] / 2 alignment = raidParams['stripeSize'] * count chunkSize = DEFAULT_CHUNK_SIZE_KB else: # Device type is JBOD alignment = DEFAULT_CHUNK_SIZE_KB chunkSize = DEFAULT_CHUNK_SIZE_KB blivetEnv = blivet.Blivet() _reset_blivet(blivetEnv) # get the devices list from the device name deviceList = _getDeviceList(devNameList) # raise an error when any device not actually found in the given list notFoundList = set(devNameList).difference( set([dev.name for dev in deviceList])) if notFoundList: raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList) # raise an error when any device is used already in the given list inUseList = set(devNameList).difference( set([not _canCreateBrick(dev) or dev.name for dev in deviceList])) if inUseList: raise ge.GlusterHostStorageDeviceInUseException(inUseList) pvDeviceList = _createPV(deviceList, alignment) vg = _createVG(vgName, pvDeviceList, alignment) # The following calculation is based on the redhat storage performance doc # http://docbuilder.usersys.redhat.com/22522 # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance # create ~16GB metadata LV (metaDataSizeKib) that has a size which is # a multiple of RAID stripe width if it is > minimum vg size # otherwise allocate a minimum of 0.5% of the data device size # and create data LV (poolDataSize) that has a size which is # a multiple of stripe width. vgSizeKib = int(_getDeviceSize(vg, 'KiB')) if _getDeviceSize(vg) < MIN_VG_SIZE: metaDataSizeKib = vgSizeKib * MIN_METADATA_PERCENT poolDataSize = vgSizeKib - metaDataSizeKib metaDataSizeKib = (metaDataSizeKib - (metaDataSizeKib % alignment)) poolDataSize = (poolDataSize - (poolDataSize % alignment)) # Creating a thin pool from the data LV and the metadata LV # lvconvert --chunksize alignment --thinpool VOLGROUP/thin_pool # --poolmetadata VOLGROUP/metadata_device_name pool = _createThinPool(poolName, vg, chunkSize, metaDataSizeKib, poolDataSize) thinlv = LVMThinLogicalVolumeDevice(brickName, parents=[pool], size=vg.size, grow=True) blivetEnv.createDevice(thinlv) blivetEnv.doIt() if fsType != DEFAULT_FS_TYPE: log.error("fstype %s is currently unsupported" % fsType) raise ge.GlusterHostStorageDeviceMkfsFailedException( thinlv.path, alignment, raidParams.get('stripeSize', 0), fsType) format = blivet.formats.getFormat(DEFAULT_FS_TYPE, device=thinlv.path, mountopts=DEFAULT_MOUNT_OPTIONS) format._defaultFormatOptions = ["-f", "-i", "size=512", "-n", "size=8192"] if raidParams.get('type') == '6': format._defaultFormatOptions += [ "-d", "sw=%s,su=%sk" % (count, raidParams.get('stripeSize')) ] blivetEnv.formatDevice(thinlv, format) blivetEnv.doIt() try: os.makedirs(mountPoint) except OSError as e: if errno.EEXIST != e.errno: errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename) raise ge.GlusterHostStorageDeviceMakeDirsFailedException( err=[errMsg]) thinlv.format.setup(mountpoint=mountPoint) blivetEnv.doIt() # bz#1230495: lvm devices are invisible and appears only after vgscan # Workaround: Till the bz gets fixed, We use vgscan to refresh LVM devices rc, out, err = commands.execCmd([_vgscanCommandPath.cmd]) if rc: raise ge.GlusterHostStorageDeviceVGScanFailedException(rc, out, err) fstab.FsTab().add(thinlv.path, mountPoint, DEFAULT_FS_TYPE, mntOpts=[DEFAULT_MOUNT_OPTIONS]) return _getDeviceDict(thinlv)
def createBrick(brickName, mountPoint, devNameList, fsType=DEFAULT_FS_TYPE, raidParams={}): def _getDeviceList(devNameList): return [ blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1]) for devName in devNameList ] def _makePartition(deviceList): pvDeviceList = [] doPartitioning = False for dev in deviceList: if dev.type not in ['disk', 'dm-multipath']: pvDeviceList.append(dev) else: blivetEnv.initializeDisk(dev) part = blivetEnv.newPartition(fmt_type="lvmpv", grow=True, parents=[dev]) blivetEnv.createDevice(part) pvDeviceList.append(part) doPartitioning = True if doPartitioning: blivet.partitioning.doPartitioning(blivetEnv) return pvDeviceList def _createPV(deviceList, alignment=0): def _createAlignedPV(deviceList, alignment): for dev in deviceList: rc, out, err = lvm._createpv([dev.path], metadataSize=0, options=('--dataalignment', '%sK' % alignment)) if rc: raise ge.GlusterHostStorageDevicePVCreateFailedException( dev.path, alignment, rc, out, err) blivetEnv.reset() return _getDeviceList([dev.name for dev in deviceList]) if alignment: blivetEnv.doIt() return _createAlignedPV(deviceList, alignment) for dev in deviceList: lvmpv = blivet.formats.getFormat("lvmpv", device=dev.path) blivetEnv.formatDevice(dev, lvmpv) blivet.partitioning.doPartitioning(blivetEnv) return deviceList def _createVG(vgName, deviceList, stripeSize=0): if stripeSize: vg = LVMVolumeGroupDevice(vgName, peSize=blivet.size.Size('%s KiB' % stripeSize), parents=deviceList) else: vg = LVMVolumeGroupDevice(vgName, parents=deviceList) blivetEnv.createDevice(vg) return vg def _createThinPool(poolName, vg, alignment=0, poolMetaDataSize=0, poolDataSize=0): if not alignment: # bz#1180228: blivet doesn't handle percentage-based sizes properly # Workaround: Till the bz gets fixed, we take only 99% size from vg pool = LVMThinPoolDevice(poolName, parents=[vg], size=(vg.size * 99 / 100), grow=True) blivetEnv.createDevice(pool) return pool else: metaName = "meta-%s" % poolName vgPoolName = "%s/%s" % (vg.name, poolName) metaLv = LVMLogicalVolumeDevice(metaName, parents=[vg], size=blivet.size.Size( '%d KiB' % poolMetaDataSize)) poolLv = LVMLogicalVolumeDevice(poolName, parents=[vg], size=blivet.size.Size( '%d KiB' % poolDataSize)) blivetEnv.createDevice(metaLv) blivetEnv.createDevice(poolLv) blivetEnv.doIt() # bz#1100514: LVM2 currently only supports physical extent sizes # that are a power of 2. Till that support is available we need # to use lvconvert to achive that. # bz#1179826: blivet doesn't support lvconvert functionality. # Workaround: Till the bz gets fixed, lvconvert command is used rc, out, err = utils.execCmd([ _lvconvertCommandPath.cmd, '--chunksize', '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata', "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y' ]) if rc: raise ge.GlusterHostStorageDeviceLVConvertFailedException( vg.path, alignment, rc, out, err) rc, out, err = utils.execCmd( [_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName]) if rc: raise ge.GlusterHostStorageDeviceLVChangeFailedException( vgPoolName, rc, out, err) blivetEnv.reset() return blivetEnv.devicetree.getDeviceByName(poolLv.name) vgName = "vg-" + brickName poolName = "pool-" + brickName alignment = 0 chunkSize = 0 poolDataSize = 0 count = 0 metaDataSize = DEFAULT_METADATA_SIZE_KB if raidParams.get('type') == '6': count = raidParams['pdCount'] - 2 alignment = raidParams['stripeSize'] * count chunkSize = alignment elif raidParams.get('type') == '10': count = raidParams['pdCount'] / 2 alignment = raidParams['stripeSize'] * count chunkSize = DEFAULT_CHUNK_SIZE_KB blivetEnv = blivet.Blivet() blivetEnv.reset() deviceList = _getDeviceList(devNameList) notFoundList = set(devNameList).difference( set([dev.name for dev in deviceList])) if notFoundList: raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList) inUseList = set(devNameList).difference( set([not _canCreateBrick(dev) or dev.name for dev in deviceList])) if inUseList: raise ge.GlusterHostStorageDeviceInUseException(inUseList) pvDeviceList = _makePartition(deviceList) pvDeviceList = _createPV(pvDeviceList, alignment) vg = _createVG(vgName, pvDeviceList, raidParams.get('stripeSize', 0)) # The following calculation is based on the redhat storage performance doc # http://docbuilder.usersys.redhat.com/22522 # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance if alignment: vgSizeKib = int(vg.size.convertTo(spec="KiB")) if vg.size.convertTo(spec='MiB') < MIN_VG_SIZE: metaDataSize = vgSizeKib * MIN_METADATA_PERCENT poolDataSize = vgSizeKib - metaDataSize metaDataSize = (metaDataSize - (metaDataSize % alignment)) poolDataSize = (poolDataSize - (poolDataSize % alignment)) pool = _createThinPool(poolName, vg, chunkSize, metaDataSize, poolDataSize) thinlv = LVMThinLogicalVolumeDevice(brickName, parents=[pool], size=pool.size, grow=True) blivetEnv.createDevice(thinlv) blivetEnv.doIt() if fsType != DEFAULT_FS_TYPE: log.error("fstype %s is currently unsupported" % fsType) raise ge.GlusterHostStorageDeviceMkfsFailedException( thinlv.path, alignment, raidParams.get('stripeSize', 0), fsType) format = blivet.formats.getFormat(DEFAULT_FS_TYPE, device=thinlv.path) if alignment: format._defaultFormatOptions = [ "-f", "-K", "-i", "size=512", "-d", "sw=%s,su=%sk" % (count, raidParams.get('stripeSize')), "-n", "size=8192" ] blivetEnv.formatDevice(thinlv, format) blivetEnv.doIt() try: os.makedirs(mountPoint) except OSError as e: if errno.EEXIST != e.errno: errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename) raise ge.GlusterHostStorageDeviceMakeDirsFailedException( err=[errMsg]) thinlv.format.setup(mountpoint=mountPoint) blivetEnv.doIt() fstab.FsTab().add(thinlv.path, mountPoint, DEFAULT_FS_TYPE) return _getDeviceDict(thinlv)