def test_from_json(filename): with open(filename) as f: data = json.load(f) if 'inventory' in data: data = data['inventory'] ds = Devices.from_json(data) assert len(ds.devices) == len(data) assert Devices.from_json(ds.to_json()) == ds
def handle_metadata(self, data: Dict[str, Any]) -> None: try: host = data['host'] self.mgr.cache.agent_ports[host] = int(data['port']) if host not in self.mgr.cache.agent_counter: self.mgr.log.debug( f'Got metadata from agent on host {host} with no known counter entry. Starting counter at 1 and requesting new metadata' ) self.mgr.cache.agent_counter[host] = 1 self.mgr.agent_helpers._request_agent_acks({host}) return # update timestamp of most recent agent update self.mgr.cache.agent_timestamp[host] = datetime_now() up_to_date = False int_ack = int(data['ack']) if int_ack == self.mgr.cache.agent_counter[host]: up_to_date = True else: # we got old counter value with message, inform agent of new timestamp if not self.mgr.cache.messaging_agent(host): self.mgr.agent_helpers._request_agent_acks({host}) self.mgr.log.info( f'Received old metadata from agent on host {host}. Requested up-to-date metadata.' ) if 'ls' in data and data['ls']: self.mgr._process_ls_output(host, data['ls']) if 'networks' in data and data['networks']: self.mgr.cache.update_host_networks(host, data['networks']) if 'facts' in data and data['facts']: self.mgr.cache.update_host_facts(host, json.loads(data['facts'])) if 'volume' in data and data['volume']: ret = Devices.from_json(json.loads(data['volume'])) self.mgr.cache.update_host_devices(host, ret.devices) if up_to_date: was_out_of_date = not self.mgr.cache.all_host_metadata_up_to_date( ) self.mgr.cache.metadata_up_to_date[host] = True if was_out_of_date and self.mgr.cache.all_host_metadata_up_to_date( ): self.mgr.log.info( 'New metadata from agent has made all hosts up to date. Kicking serve loop' ) self.mgr._kick_serve_loop() self.mgr.log.info( f'Received up-to-date metadata from agent on host {host}.') except Exception as e: self.mgr.log.warning( f'Failed to update metadata with metadata from agent on host {host}: {e}' )
def handle_metadata(self, data: Dict[str, Any]) -> str: try: host = data['host'] self.mgr.agent_cache.agent_ports[host] = int(data['port']) if host not in self.mgr.agent_cache.agent_counter: self.mgr.agent_cache.agent_counter[host] = 1 self.mgr.agent_helpers._request_agent_acks({host}) res = f'Got metadata from agent on host {host} with no known counter entry. Starting counter at 1 and requesting new metadata' self.mgr.log.debug(res) return res # update timestamp of most recent agent update self.mgr.agent_cache.agent_timestamp[host] = datetime_now() error_daemons_old = set([dd.name() for dd in self.mgr.cache.get_error_daemons()]) daemon_count_old = len(self.mgr.cache.get_daemons_by_host(host)) up_to_date = False int_ack = int(data['ack']) if int_ack == self.mgr.agent_cache.agent_counter[host]: up_to_date = True else: # we got old counter value with message, inform agent of new timestamp if not self.mgr.agent_cache.messaging_agent(host): self.mgr.agent_helpers._request_agent_acks({host}) self.mgr.log.debug( f'Received old metadata from agent on host {host}. Requested up-to-date metadata.') if 'ls' in data and data['ls']: self.mgr._process_ls_output(host, data['ls']) self.mgr.update_failed_daemon_health_check() if 'networks' in data and data['networks']: self.mgr.cache.update_host_networks(host, data['networks']) if 'facts' in data and data['facts']: self.mgr.cache.update_host_facts(host, json.loads(data['facts'])) if 'volume' in data and data['volume']: ret = Devices.from_json(json.loads(data['volume'])) self.mgr.cache.update_host_devices(host, ret.devices) if ( error_daemons_old != set([dd.name() for dd in self.mgr.cache.get_error_daemons()]) or daemon_count_old != len(self.mgr.cache.get_daemons_by_host(host)) ): self.mgr.log.debug( f'Change detected in state of daemons from {host} agent metadata. Kicking serve loop') self.mgr._kick_serve_loop() if up_to_date and ('ls' in data and data['ls']): was_out_of_date = not self.mgr.cache.all_host_metadata_up_to_date() self.mgr.cache.metadata_up_to_date[host] = True if was_out_of_date and self.mgr.cache.all_host_metadata_up_to_date(): self.mgr.log.debug( 'New metadata from agent has made all hosts up to date. Kicking serve loop') self.mgr._kick_serve_loop() self.mgr.log.debug( f'Received up-to-date metadata from agent on host {host}.') self.mgr.agent_cache.save_agent(host) return 'Successfully processed metadata.' except Exception as e: err_str = f'Failed to update metadata with metadata from agent on host {host}: {e}' self.mgr.log.warning(err_str) return err_str
def test_from_json(): data = json.loads(""" [ { "available": false, "rejected_reasons": [ "locked" ], "sys_api": { "scheduler_mode": "", "rotational": "0", "vendor": "", "human_readable_size": "50.00 GB", "sectors": 0, "sas_device_handle": "", "partitions": {}, "rev": "", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/dm-0", "support_discard": "", "model": "", "ro": "0", "nr_requests": "128", "size": 53687091200 }, "lvs": [], "path": "/dev/dm-0" }, { "available": false, "rejected_reasons": [ "locked" ], "sys_api": { "scheduler_mode": "", "rotational": "0", "vendor": "", "human_readable_size": "31.47 GB", "sectors": 0, "sas_device_handle": "", "partitions": {}, "rev": "", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/dm-1", "support_discard": "", "model": "", "ro": "0", "nr_requests": "128", "size": 33789313024 }, "lvs": [], "path": "/dev/dm-1" }, { "available": false, "rejected_reasons": [ "locked" ], "sys_api": { "scheduler_mode": "", "rotational": "0", "vendor": "", "human_readable_size": "394.27 GB", "sectors": 0, "sas_device_handle": "", "partitions": {}, "rev": "", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/dm-2", "support_discard": "", "model": "", "ro": "0", "nr_requests": "128", "size": 423347879936 }, "lvs": [], "path": "/dev/dm-2" }, { "available": false, "rejected_reasons": [ "locked" ], "sys_api": { "scheduler_mode": "cfq", "rotational": "0", "vendor": "ATA", "human_readable_size": "476.94 GB", "sectors": 0, "sas_device_handle": "", "partitions": { "sda2": { "start": "411648", "holders": [], "sectorsize": 512, "sectors": "2097152", "size": "1024.00 MB" }, "sda3": { "start": "2508800", "holders": [ "dm-1", "dm-2", "dm-0" ], "sectorsize": 512, "sectors": "997705728", "size": "475.74 GB" }, "sda1": { "start": "2048", "holders": [], "sectorsize": 512, "sectors": "409600", "size": "200.00 MB" } }, "rev": "0000", "sas_address": "", "locked": 1, "sectorsize": "512", "removable": "0", "path": "/dev/sda", "support_discard": "", "model": "SanDisk SD8SN8U5", "ro": "0", "nr_requests": "128", "size": 512110190592 }, "lvs": [ { "comment": "not used by ceph", "name": "swap" }, { "comment": "not used by ceph", "name": "home" }, { "comment": "not used by ceph", "name": "root" } ], "path": "/dev/sda" } ]""".strip()) ds = Devices.from_json(data) assert len(ds.devices) == 4 assert Devices.from_json(ds.to_json()) == ds