def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command): with with_host(cephadm_module, 'test'): dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices)) ds = DriveSelection(dg, Devices([Device(path) for path in devices])) preview = preview out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview) assert out in exp_command
def _mk_device(rotational=True, locked=False, size="394.27 GB"): return [ Device( path='??', sys_api={ "rotational": '1' if rotational else '0', "vendor": "Vendor", "human_readable_size": size, "partitions": {}, "locked": int(locked), "sectorsize": "512", "removable": "0", "path": "??", "support_discard": "", "model": "Model", "ro": "0", "nr_requests": "128", "size": 423347879936 # ignore coversion from human_readable_size }, available=not locked, rejected_reasons=['locked'] if locked else [], lvs=[], device_id="Model-Vendor-foobar") ]
def test_compare_raise(self): matcher = drive_selection.SizeMatcher('size', 'None') disk_dict = Device(path='/dev/vdb', sys_api=dict(size='20.00 GB')) with pytest.raises(Exception): matcher.compare(disk_dict) pytest.fail("Couldn't parse size")
def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator): _run_cephadm.return_value = ('{}', '', 0) with with_host(cephadm_module, 'test'): spec = DriveGroupSpec( service_id='foo', placement=PlacementSpec( host_pattern='*', ), data_devices=DeviceSelection( all=True ) ) c = cephadm_module.apply([spec]) assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] inventory = Devices([ Device( '/dev/sdb', available=True ), ]) cephadm_module.cache.update_host_devices_networks('test', inventory.devices, {}) _run_cephadm.return_value = (['{}'], '', 0) assert cephadm_module._apply_all_services() == False _run_cephadm.assert_any_call( 'test', 'osd', 'ceph-volume', ['--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd'], env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}') _run_cephadm.assert_called_with('test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
def __init__(self, paths=None, # type: Optional[List[str]] model=None, # type: Optional[str] size=None, # type: Optional[str] rotational=None, # type: Optional[bool] limit=None, # type: Optional[int] vendor=None, # type: Optional[str] all=False, # type: bool ): """ ephemeral drive group device specification """ #: List of Device objects for devices paths. self.paths = [] if paths is None else [Device(path) for path in paths] # type: List[Device] #: A wildcard string. e.g: "SDD*" or "SanDisk SD8SN8U5" self.model = model #: Match on the VENDOR property of the drive self.vendor = vendor #: Size specification of format LOW:HIGH. #: Can also take the the form :HIGH, LOW: #: or an exact value (as ceph-volume inventory reports) self.size: Optional[str] = size #: is the drive rotating or not self.rotational = rotational #: Limit the number of devices added to this Drive Group. Devices #: are used from top to bottom in the output of ``ceph-volume inventory`` self.limit = limit #: Matches all devices. Can only be used for data devices self.all = all
def _mk_inventory(devices): devs = [] for dev_, name in zip(devices, map(chr, range(ord('a'), ord('z')))): dev = Device.from_json(dev_.to_json()) dev.path = '/dev/sd' + name dev.sys_api = dict(dev_.sys_api, path='/dev/sd' + name) devs.append(dev) return Devices(devices=devs)
def get_dg_spec(self, dg): dg_spec = DriveGroupSpec._from_json_impl(dg) dg_spec.validate() i = Inventory([]) i.main() inventory = i.get_report() devices = [Device.from_json(i) for i in inventory] selection = DriveSelection(dg_spec, devices) return to_ceph_volume(selection)
def test_get_disk_key_3(self): """ virtual is False key is found retrun value of key is expected """ disk_map = Device(path='/dev/vdb', sys_api={'foo': 'bar'}) ret = drive_selection.Matcher('foo', 'bar')._get_disk_key(disk_map) assert ret is disk_map.sys_api.get('foo')
def test_get_disk_key_4(self): """ virtual is False key is not found expect raise Exception """ disk_map = Device(path='/dev/vdb') with pytest.raises(Exception): drive_selection.Matcher('bar', 'foo')._get_disk_key(disk_map) pytest.fail("No disk_key found for foo or None")
def device(self, i: Any) -> Tuple[str, Device]: node = i.metadata.labels['kubernetes.io/hostname'] device_discovery = self.lso_devices[ i.metadata.annotations['storage.openshift.com/device-id']] device = Device( path=device_discovery['path'], sys_api=dict(size=device_discovery['size'], rotational='1' if device_discovery['property'] == 'Rotational' else '0'), available=device_discovery['status']['state'] == 'Available', device_id=device_discovery['deviceID'].split('/')[-1], lsm_data=dict(serialNum=device_discovery['serial'])) return (node, device)
def test_orch_device_ls(): devices = Devices([Device("/dev/vdb", available=True)]) ilist = OrchResult([InventoryHost("ceph-node-1", devices=devices), InventoryHost("ceph-node-2", devices=devices), InventoryHost("ceph-node-10", devices=devices)]) with mock.patch("orchestrator.OrchestratorCli.get_inventory", return_value=ilist): # Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10) cmd = { 'prefix': 'orch device ls' } m = OrchestratorCli('orchestrator', 0, 0) r = m._handle_command(None, cmd) out = 'HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS \n'\ 'ceph-node-1 /dev/vdb unknown None 0 Yes 0s ago \n'\ 'ceph-node-2 /dev/vdb unknown None 0 Yes 0s ago \n'\ 'ceph-node-10 /dev/vdb unknown None 0 Yes 0s ago ' assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
def device(self, i: Any) -> Tuple[str, Device]: node = i.metadata.labels['kubernetes.io/hostname'] device_discovery = self.lso_devices[ i.metadata.annotations['storage.openshift.com/device-id']] pv_name = i.metadata.name vendor: str = device_discovery['model'].split()[0] if len( device_discovery['model'].split()) >= 1 else '' model: str = ' '.join(device_discovery['model'].split()[1:]) if len( device_discovery['model'].split()) > 1 else '' device = Device( path=device_discovery['path'], sys_api=dict(size=device_discovery['size'], rotational='1' if device_discovery['property'] == 'Rotational' else '0', node=node, pv_name=pv_name, model=model, vendor=vendor), available=device_discovery['status']['state'] == 'Available', device_id=device_discovery['deviceID'].split('/')[-1], lsm_data=dict(serialNum=device_discovery['serial'])) return (node, device)
def device(self, i: 'client.V1PersistentVolume') -> Tuple[str, Device]: node = 'N/A' if i.spec.node_affinity: terms = i.spec.node_affinity.required.node_selector_terms if len(terms) == 1 and len( terms[0].match_expressions ) == 1 and terms[0].match_expressions[ 0].key == 'kubernetes.io/hostname' and len( terms[0].match_expressions[0].values) == 1: node = terms[0].match_expressions[0].values[0] size = self.convert_size(i.spec.capacity['storage']) path = i.spec.host_path.path if i.spec.host_path else i.spec.local.path if i.spec.local else ( '/dev/' + i.metadata.annotations['storage.openshift.com/device-name'] ) if i.metadata.annotations and 'storage.openshift.com/device-name' in i.metadata.annotations else '' state = i.spec.volume_mode == 'Block' and i.status.phase == 'Available' pv_name = i.metadata.name device = Device( path=path, sys_api=dict(size=size, node=node, pv_name=pv_name), available=state, ) return (node, device)
def test_compare_exact(self): matcher = drive_selection.SizeMatcher('size', '20GB') disk_dict = Device(path='/dev/vdb', sys_api=dict(size='20.00 GB')) ret = matcher.compare(disk_dict) assert ret is True
def test_compare_value_not_true(self): disk_dict = Device(path='/dev/vdb') matcher = drive_selection.AllMatcher('all', 'False') ret = matcher.compare(disk_dict) assert ret is True
def test_compare_false(self): disk_dict = Device(path='/dev/vdb', sys_api=dict(rotates='1')) matcher = drive_selection.EqualityMatcher('rotates', '0') ret = matcher.compare(disk_dict) assert ret is False
def test_compare_false(self): disk_dict = Device(path='/dev/vdb', sys_api=dict(model='nothing_matching')) matcher = drive_selection.SubstringMatcher('model', 'samsung') ret = matcher.compare(disk_dict) assert ret is False
def test_compare_at_least_1TB(self, test_input, expected): matcher = drive_selection.SizeMatcher('size', '1TB:') disk_dict = Device(path='/dev/sdz', sys_api=dict(size=test_input)) ret = matcher.compare(disk_dict) assert ret is expected
def test_compare_high(self, test_input, expected): matcher = drive_selection.SizeMatcher('size', ':50GB') disk_dict = Device(path='/dev/vdb', sys_api=dict(size=test_input)) ret = matcher.compare(disk_dict) assert ret is expected
def produce(self): return [Device(**self.device_conf) for x in range(0, self.pieces)]