def test_from_json(filename): with open(filename) as f: data = json.load(f) if 'inventory' in data: data = data['inventory'] ds = Devices.from_json(data) assert len(ds.devices) == len(data) assert Devices.from_json(ds.to_json()) == ds
def make_sample_data(available=available, data_devices=10, wal_devices=0, db_devices=2, human_readable_size_data='50.00 GB', human_readable_size_wal='20.00 GB', size=5368709121, human_readable_size_db='20.00 GB'): factory = InventoryFactory() inventory_sample = [] data_disks = factory.produce( pieces=data_devices, available=available, size=size, human_readable_size=human_readable_size_data) wal_disks = factory.produce( pieces=wal_devices, human_readable_size=human_readable_size_wal, rotational='0', model='ssd_type_model', size=size, available=available) db_disks = factory.produce( pieces=db_devices, human_readable_size=human_readable_size_db, rotational='0', size=size, model='ssd_type_model', available=available) inventory_sample.extend(data_disks) inventory_sample.extend(wal_disks) inventory_sample.extend(db_disks) return Devices(devices=inventory_sample)
def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command): with with_host(cephadm_module, 'test'): dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices)) ds = DriveSelection(dg, Devices([Device(path) for path in devices])) preview = preview out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview) assert out in exp_command
def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator): _run_cephadm.return_value = ('{}', '', 0) with with_host(cephadm_module, 'test'): spec = DriveGroupSpec( service_id='foo', placement=PlacementSpec( host_pattern='*', ), data_devices=DeviceSelection( all=True ) ) c = cephadm_module.apply([spec]) assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] inventory = Devices([ Device( '/dev/sdb', available=True ), ]) cephadm_module.cache.update_host_devices_networks('test', inventory.devices, {}) _run_cephadm.return_value = (['{}'], '', 0) assert cephadm_module._apply_all_services() == False _run_cephadm.assert_any_call( 'test', 'osd', 'ceph-volume', ['--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd'], env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}') _run_cephadm.assert_called_with('test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
def _mk_inventory(devices): devs = [] for dev_, name in zip(devices, map(chr, range(ord('a'), ord('z')))): dev = Device.from_json(dev_.to_json()) dev.path = '/dev/sd' + name dev.sys_api = dict(dev_.sys_api, path='/dev/sd' + name) devs.append(dev) return Devices(devices=devs)
def handle_metadata(self, data: Dict[str, Any]) -> None: try: host = data['host'] self.mgr.cache.agent_ports[host] = int(data['port']) if host not in self.mgr.cache.agent_counter: self.mgr.log.debug( f'Got metadata from agent on host {host} with no known counter entry. Starting counter at 1 and requesting new metadata' ) self.mgr.cache.agent_counter[host] = 1 self.mgr.agent_helpers._request_agent_acks({host}) return # update timestamp of most recent agent update self.mgr.cache.agent_timestamp[host] = datetime_now() up_to_date = False int_ack = int(data['ack']) if int_ack == self.mgr.cache.agent_counter[host]: up_to_date = True else: # we got old counter value with message, inform agent of new timestamp if not self.mgr.cache.messaging_agent(host): self.mgr.agent_helpers._request_agent_acks({host}) self.mgr.log.info( f'Received old metadata from agent on host {host}. Requested up-to-date metadata.' ) if 'ls' in data and data['ls']: self.mgr._process_ls_output(host, data['ls']) if 'networks' in data and data['networks']: self.mgr.cache.update_host_networks(host, data['networks']) if 'facts' in data and data['facts']: self.mgr.cache.update_host_facts(host, json.loads(data['facts'])) if 'volume' in data and data['volume']: ret = Devices.from_json(json.loads(data['volume'])) self.mgr.cache.update_host_devices(host, ret.devices) if up_to_date: was_out_of_date = not self.mgr.cache.all_host_metadata_up_to_date( ) self.mgr.cache.metadata_up_to_date[host] = True if was_out_of_date and self.mgr.cache.all_host_metadata_up_to_date( ): self.mgr.log.info( 'New metadata from agent has made all hosts up to date. Kicking serve loop' ) self.mgr._kick_serve_loop() self.mgr.log.info( f'Received up-to-date metadata from agent on host {host}.') except Exception as e: self.mgr.log.warning( f'Failed to update metadata with metadata from agent on host {host}: {e}' )
def test_orch_device_ls(): devices = Devices([Device("/dev/vdb", available=True)]) ilist = OrchResult([InventoryHost("ceph-node-1", devices=devices), InventoryHost("ceph-node-2", devices=devices), InventoryHost("ceph-node-10", devices=devices)]) with mock.patch("orchestrator.OrchestratorCli.get_inventory", return_value=ilist): # Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10) cmd = { 'prefix': 'orch device ls' } m = OrchestratorCli('orchestrator', 0, 0) r = m._handle_command(None, cmd) out = 'HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS \n'\ 'ceph-node-1 /dev/vdb unknown None 0 Yes 0s ago \n'\ 'ceph-node-2 /dev/vdb unknown None 0 Yes 0s ago \n'\ 'ceph-node-10 /dev/vdb unknown None 0 Yes 0s ago ' assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
def handle_metadata(self, data: Dict[str, Any]) -> str: try: host = data['host'] self.mgr.agent_cache.agent_ports[host] = int(data['port']) if host not in self.mgr.agent_cache.agent_counter: self.mgr.agent_cache.agent_counter[host] = 1 self.mgr.agent_helpers._request_agent_acks({host}) res = f'Got metadata from agent on host {host} with no known counter entry. Starting counter at 1 and requesting new metadata' self.mgr.log.debug(res) return res # update timestamp of most recent agent update self.mgr.agent_cache.agent_timestamp[host] = datetime_now() error_daemons_old = set([dd.name() for dd in self.mgr.cache.get_error_daemons()]) daemon_count_old = len(self.mgr.cache.get_daemons_by_host(host)) up_to_date = False int_ack = int(data['ack']) if int_ack == self.mgr.agent_cache.agent_counter[host]: up_to_date = True else: # we got old counter value with message, inform agent of new timestamp if not self.mgr.agent_cache.messaging_agent(host): self.mgr.agent_helpers._request_agent_acks({host}) self.mgr.log.debug( f'Received old metadata from agent on host {host}. Requested up-to-date metadata.') if 'ls' in data and data['ls']: self.mgr._process_ls_output(host, data['ls']) self.mgr.update_failed_daemon_health_check() if 'networks' in data and data['networks']: self.mgr.cache.update_host_networks(host, data['networks']) if 'facts' in data and data['facts']: self.mgr.cache.update_host_facts(host, json.loads(data['facts'])) if 'volume' in data and data['volume']: ret = Devices.from_json(json.loads(data['volume'])) self.mgr.cache.update_host_devices(host, ret.devices) if ( error_daemons_old != set([dd.name() for dd in self.mgr.cache.get_error_daemons()]) or daemon_count_old != len(self.mgr.cache.get_daemons_by_host(host)) ): self.mgr.log.debug( f'Change detected in state of daemons from {host} agent metadata. Kicking serve loop') self.mgr._kick_serve_loop() if up_to_date and ('ls' in data and data['ls']): was_out_of_date = not self.mgr.cache.all_host_metadata_up_to_date() self.mgr.cache.metadata_up_to_date[host] = True if was_out_of_date and self.mgr.cache.all_host_metadata_up_to_date(): self.mgr.log.debug( 'New metadata from agent has made all hosts up to date. Kicking serve loop') self.mgr._kick_serve_loop() self.mgr.log.debug( f'Received up-to-date metadata from agent on host {host}.') self.mgr.agent_cache.save_agent(host) return 'Successfully processed metadata.' except Exception as e: err_str = f'Failed to update metadata with metadata from agent on host {host}: {e}' self.mgr.log.warning(err_str) return err_str
def test_from_json(): data = json.loads(""" [ { "available": false, "rejected_reasons": [ "locked" ], "sys_api": { "scheduler_mode": "", "rotational": "0", "vendor": "", "human_readable_size": "50.00 GB", "sectors": 0, "sas_device_handle": "", "partitions": {}, "rev": "", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/dm-0", "support_discard": "", "model": "", "ro": "0", "nr_requests": "128", "size": 53687091200 }, "lvs": [], "path": "/dev/dm-0" }, { "available": false, "rejected_reasons": [ "locked" ], "sys_api": { "scheduler_mode": "", "rotational": "0", "vendor": "", "human_readable_size": "31.47 GB", "sectors": 0, "sas_device_handle": "", "partitions": {}, "rev": "", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/dm-1", "support_discard": "", "model": "", "ro": "0", "nr_requests": "128", "size": 33789313024 }, "lvs": [], "path": "/dev/dm-1" }, { "available": false, "rejected_reasons": [ "locked" ], "sys_api": { "scheduler_mode": "", "rotational": "0", "vendor": "", "human_readable_size": "394.27 GB", "sectors": 0, "sas_device_handle": "", "partitions": {}, "rev": "", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/dm-2", "support_discard": "", "model": "", "ro": "0", "nr_requests": "128", "size": 423347879936 }, "lvs": [], "path": "/dev/dm-2" }, { "available": false, "rejected_reasons": [ "locked" ], "sys_api": { "scheduler_mode": "cfq", "rotational": "0", "vendor": "ATA", "human_readable_size": "476.94 GB", "sectors": 0, "sas_device_handle": "", "partitions": { "sda2": { "start": "411648", "holders": [], "sectorsize": 512, "sectors": "2097152", "size": "1024.00 MB" }, "sda3": { "start": "2508800", "holders": [ "dm-1", "dm-2", "dm-0" ], "sectorsize": 512, "sectors": "997705728", "size": "475.74 GB" }, "sda1": { "start": "2048", "holders": [], "sectorsize": 512, "sectors": "409600", "size": "200.00 MB" } }, "rev": "0000", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/sda", "support_discard": "", "model": "SanDisk SD8SN8U5", "ro": "0", "nr_requests": "128", "size": 512110190592 }, "lvs": [ { "comment": "not used by ceph", "name": "swap" }, { "comment": "not used by ceph", "name": "home" }, { "comment": "not used by ceph", "name": "root" } ], "path": "/dev/sda" } ]""".strip()) ds = Devices.from_json(data) assert len(ds.devices) == 4 assert Devices.from_json(ds.to_json()) == ds