def initialize(self, service_dir): super(LocalDiskResourceService, self).initialize(service_dir) # Make sure LVM Volume Group set up localdiskutils.setup_device_lvm(self._block_dev, self._vg_name) # Finally retrieve the LV info lvs_info = lvm.lvsdisplay(group=self._vg_name) # Mark all retrived volumes that were created by treadmill as 'stale' for lv in lvs_info: lv['stale'] = lv['name'].startswith(TREADMILL_LV_PREFIX) if lv['open_count']: _LOGGER.warning('Logical volume in use: %r', lv['block_dev']) # Count the number of extents taken by non-treadmill volumes self._extent_reserved = sum( [lv['extent_size'] for lv in lvs_info if not lv['stale']]) volumes = { lv['name']: { k: lv[k] for k in [ 'name', 'block_dev', 'dev_major', 'dev_minor', 'extent_size', 'stale', ] } for lv in lvs_info } self._volumes = volumes self._vg_status = localdiskutils.refresh_vg_status(self._vg_name)
def check_available_volume(): """Check if we have enough space for benchmark. """ vg_status = localdiskutils.refresh_vg_status(vg_name) available_volume = vg_status['extent_size'] * \ vg_status['extent_free'] return available_volume > utils.size_to_bytes(total_volume)
def on_delete_request(self, rsrc_id): app_unique_name = rsrc_id with lc.LogContext(_LOGGER, rsrc_id): uniqueid = _uniqueid(app_unique_name) # Remove it from state (if present) if not self._destroy_volume(uniqueid): return False # Now that we successfully removed a volume, retry all the pending # resources. for pending_id in self._pending: self.retry_request(pending_id) self._pending = [] # We just destroyed a volume, refresh cached status from LVM and # notify the service of the availability of the new status. self._vg_status = localdiskutils.refresh_vg_status(self._vg_name) return True
def synchronize(self): """Make sure that all stale volumes are removed. """ modified = False for uniqueid in list(self._volumes.keys()): if self._volumes[uniqueid].pop('stale', False): modified = True # This is a stale volume, destroy it. self._destroy_volume(uniqueid) if not modified: return # Now that we successfully removed a volume, retry all the pending # resources. for pending_id in self._pending: self.retry_request(pending_id) self._pending = [] # We just destroyed a volume, refresh cached status from LVM and notify # the service of the availability of the new status. self._vg_status = localdiskutils.refresh_vg_status(self._vg_name)
def test__refresh_vg_status(self): """Test LVM volume group status querying. """ # Access to a protected member # pylint: disable=W0212 treadmill.lvm.vgdisplay.return_value = { 'access': 'r/w', 'extent_alloc': 0, 'extent_free': 24, 'extent_nb': 24, 'extent_size': 4096, 'lv_cur': 0, 'lv_max': 0, 'lv_open_count': 0, 'max_size': -1, 'name': 'test', 'number': -1, 'pv_actual': 1, 'pv_cur': 1, 'pv_max': 0, 'size': 98304, 'status': '772', 'uuid': 'Vsj4xA-45Ad-v4Rp-VOOf-XzEf-Gxwr-erL7Zu', } status = localdiskutils.refresh_vg_status('FOO') treadmill.lvm.vgdisplay.assert_called_with(group='FOO') self.assertEqual( status, { 'extent_free': 24, 'extent_nb': 24, 'extent_size': 4194304, 'name': 'test', })
def on_create_request(self, rsrc_id, rsrc_data): app_unique_name = rsrc_id size = rsrc_data['size'] read_bps = self._default_read_bps write_bps = self._default_write_bps read_iops = self._default_read_iops write_iops = self._default_write_iops with lc.LogContext(_LOGGER, rsrc_id, adapter_cls=lc.ContainerAdapter) as log: log.info('Processing request') size_in_bytes = utils.size_to_bytes(size) uniqueid = _uniqueid(app_unique_name) # Create the logical volume existing_volume = uniqueid in self._volumes if not existing_volume: needed = math.ceil(size_in_bytes / self._vg_status['extent_size']) if needed > self._vg_status['extent_free']: # If we do not have enough space, delay the creation until # another volume is deleted. log.info( 'Delaying request %r until %d extents are free.' ' Current volumes: %r', rsrc_id, needed, self._volumes) self._pending.append(rsrc_id) return None lvm.lvcreate( volume=uniqueid, group=self._vg_name, size_in_bytes=size_in_bytes, ) # We just created a volume, refresh cached status from LVM self._vg_status = localdiskutils.refresh_vg_status( self._vg_name) lv_info = lvm.lvdisplay(volume=uniqueid, group=self._vg_name) # Configure block device using cgroups (this is idempotent) # FIXME(boysson): The unique id <-> cgroup relation should be # captured in the cgroup module. cgrp = os.path.join('treadmill', 'apps', app_unique_name) cgutils.create('blkio', cgrp) major, minor = lv_info['dev_major'], lv_info['dev_minor'] cgroups.set_value( 'blkio', cgrp, 'blkio.throttle.write_bps_device', '{major}:{minor} {bps}'.format( major=major, minor=minor, bps=utils.size_to_bytes(write_bps), )) cgroups.set_value( 'blkio', cgrp, 'blkio.throttle.read_bps_device', '{major}:{minor} {bps}'.format( major=major, minor=minor, bps=utils.size_to_bytes(read_bps), )) cgroups.set_value( 'blkio', cgrp, 'blkio.throttle.write_iops_device', '{major}:{minor} {iops}'.format(major=major, minor=minor, iops=write_iops)) cgroups.set_value( 'blkio', cgrp, 'blkio.throttle.read_iops_device', '{major}:{minor} {iops}'.format(major=major, minor=minor, iops=read_iops)) volume_data = { k: lv_info[k] for k in ['name', 'block_dev', 'dev_major', 'dev_minor', 'extent_size'] } # Record existence of the volume. self._volumes[lv_info['name']] = volume_data return volume_data