def get_inventory(self, node_filter=None): """ There is no guarantee which devices are returned by get_inventory. """ try: c_v_out = check_output(['ceph_volume', 'inventory', '--format', 'json']) except OSError: cmd = """ . /tmp/ceph-volume-virtualenv/bin/activate ceph-volume inventory --format json """ c_v_out = check_output(cmd, shell=True) for out in c_v_out.splitlines(): if not out.startswith(b'-->') and not out.startswith(b' stderr'): self.log.error(out) devs = [] for device in json.loads(out): dev = orchestrator.InventoryDevice() if device["sys_api"]["rotational"] == "1": dev.type = 'hdd' # 'ssd', 'hdd', 'nvme' elif 'nvme' in device["path"]: dev.type = 'nvme' else: dev.type = 'ssd' dev.size = device['sys_api']['size'] dev.id = device['path'] dev.extended = device devs.append(dev) return [orchestrator.InventoryNode('localhost', devs)] self.log.error('c-v failed: ' + str(c_v_out)) raise Exception('c-v failed')
def get_inventory(self, node_filter=None, refresh=False): """ There is no guarantee which devices are returned by get_inventory. """ if node_filter and node_filter.nodes is not None: assert isinstance(node_filter.nodes, list) if self._inventory: if node_filter: return list( filter(lambda node: node.name in node_filter.nodes, self._inventory)) return self._inventory try: c_v_out = check_output( ['ceph-volume', 'inventory', '--format', 'json']) except OSError: cmd = """ . {tmpdir}/ceph-volume-virtualenv/bin/activate ceph-volume inventory --format json """ try: c_v_out = check_output( cmd.format(tmpdir=os.environ.get('TMPDIR', '/tmp')), shell=True) except (OSError, CalledProcessError): c_v_out = check_output(cmd.format(tmpdir='.'), shell=True) for out in c_v_out.splitlines(): self.log.error(out) devs = inventory.Devices.from_json(json.loads(out)) return [orchestrator.InventoryNode('localhost', devs)] self.log.error('c-v failed: ' + str(c_v_out)) raise Exception('c-v failed')
def run(host, host_info): # type: (str, orchestrator.OutdatableData) -> orchestrator.InventoryNode timeout_min = int( self.get_module_option( "inventory_cache_timeout_min", self._DEFAULT_INVENTORY_CACHE_TIMEOUT_MIN)) if host_info.outdated(timeout_min) or refresh: self.log.info("refresh stale inventory for '{}'".format(host)) out, code = self._run_ceph_daemon( host, 'osd', 'ceph-volume', ['--', 'inventory', '--format=json']) # stdout and stderr get combined; assume last line is the real # output and everything preceding it is an error. data = json.loads(out[-1]) host_info = orchestrator.OutdatableData(data) self.inventory_cache[host] = host_info else: self.log.debug( "reading cached inventory for '{}'".format(host)) devices = orchestrator.InventoryDevice.from_ceph_volume_inventory_list( host_info.data) return orchestrator.InventoryNode(host, devices)
def get_inventory(self, node_filter=None, refresh=False): """ There is no guarantee which devices are returned by get_inventory. """ if node_filter and node_filter.nodes is not None: assert isinstance(node_filter.nodes, list) try: c_v_out = check_output( ['ceph-volume', 'inventory', '--format', 'json']) except OSError: cmd = """ . {tmpdir}/ceph-volume-virtualenv/bin/activate ceph-volume inventory --format json """ try: c_v_out = check_output( cmd.format(tmpdir=os.environ.get('TMPDIR', '/tmp')), shell=True) except (OSError, CalledProcessError): c_v_out = check_output(cmd.format(tmpdir='.'), shell=True) for out in c_v_out.splitlines(): if not out.startswith(b'-->') and not out.startswith(b' stderr'): self.log.error(out) devs = [] for device in json.loads(out): dev = orchestrator.InventoryDevice.from_ceph_volume_inventory( device) devs.append(dev) return [orchestrator.InventoryNode('localhost', devs)] self.log.error('c-v failed: ' + str(c_v_out)) raise Exception('c-v failed')
def get_inventory(self, node_filter=None, refresh=False): node_list = None if node_filter and node_filter.nodes: # Explicit node list node_list = node_filter.nodes elif node_filter and node_filter.labels: # TODO: query k8s API to resolve to node list, and pass # it into RookCluster.get_discovered_devices raise NotImplementedError() devs = self.rook_cluster.get_discovered_devices(node_list) result = [] for node_name, node_devs in devs.items(): devs = [] for d in node_devs: dev = inventory.Device( path=d['name'], sys_api=dict(rotational='1' if d['rotational'] else '0', size=d['size']), available=d['empty'], rejected_reasons=[] if d['empty'] else ['not empty'], ) devs.append(dev) result.append( orchestrator.InventoryNode(node_name, inventory.Devices(devs))) return result
def get_hosts(self): """ Return a list of hosts managed by the orchestrator. Notes: - skip async: manager reads from cache. TODO: - InventoryNode probably needs to be able to report labels """ return [orchestrator.InventoryNode(host_name) for host_name in self.inventory_cache]
def process_result(event_data): result = [] if event_data['success']: for node_name, node_devs in event_data["return"].items(): if node_filter is None: self.inventory_cache[node_name] = orchestrator.OutdatableData(node_devs) devs = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(node_devs) result.append(orchestrator.InventoryNode(node_name, devs)) else: self.log.error(event_data['return']) return result
def process_result(event_data): result = [] if event_data['success']: for node_name, node_devs in event_data["return"].items(): devs = list( map( lambda di: orchestrator.InventoryDevice. from_ceph_volume_inventory(di), node_devs)) result.append(orchestrator.InventoryNode(node_name, devs)) else: self.log.error(event_data['return']) return result
def process_inventory_json(inventory_events, ar_client, playbook_uuid): """ Adapt the output of the playbook used in 'get_inventory' to the Orchestrator expected output (list of InventoryNode) @param inventory_events: events dict with the results Example: inventory_events = {'37-100564f1-9fed-48c2-bd62-4ae8636dfcdb': {'host': '192.168.121.254', 'task': 'RESULTS', 'event': 'runner_on_ok'}, '36-2016b900-e38f-7dcd-a2e7-00000000000e': {'host': '192.168.121.252' 'task': 'RESULTS', 'event': 'runner_on_ok'}} @param ar_client: Ansible Runner Service client @param playbook_uuid: Palybooud identifier @return : list of InventoryNode """ #Obtain the needed data for each result event inventory_nodes = [] # Loop over the result events and request the event data for event_key, data in inventory_events.items(): event_response = ar_client.http_get(EVENT_DATA_URL % (playbook_uuid, event_key)) # Process the data for each event if event_response: event_data = json.loads(event_response.text)["data"]["event_data"] free_disks = event_data["res"]["disks_catalog"] for item, data in free_disks.items(): if item not in [host.name for host in inventory_nodes]: devs = [] for dev_key, dev_data in data.items(): if dev_key not in [device.id for device in devs]: dev = orchestrator.InventoryDevice() dev.id = dev_key dev.type = 'hdd' if dev_data[ "rotational"] else "sdd/nvme" dev.size = dev_data["sectorsize"] * dev_data[ "sectors"] devs.append(dev) inventory_nodes.append( orchestrator.InventoryNode(item, devs)) return inventory_nodes
def get_hosts(self): """ Return a list of hosts managed by the orchestrator. Notes: - skip async: manager reads from cache. TODO: - InventoryNode probably needs to be able to report labels """ nodes = [] for key, host_info in self._get_hosts(): node = orchestrator.InventoryNode(host_info["host"], []) nodes.append(node) return SSHReadCompletionReady(nodes)
def get_hosts(self): """ Return a list of hosts managed by the orchestrator. Notes: - skip async: manager reads from cache. TODO: - InventoryNode probably needs to be able to report labels """ nodes = [ orchestrator.InventoryNode(host_name, []) for host_name in self.inventory_cache ] return orchestrator.TrivialReadCompletion(nodes)
def process_result(event_data): result = [] if event_data['success']: for node_name, node_devs in event_data["return"].items(): devs = [] for d in node_devs: dev = orchestrator.InventoryDevice() dev.blank = d['blank'] dev.type = d['type'] dev.id = d['id'] dev.size = d['size'] dev.extended = d['extended'] dev.metadata_space_free = d['metadata_space_free'] devs.append(dev) result.append(orchestrator.InventoryNode(node_name, devs)) return result
def process_result(event_data): result = [] if event_data['success']: for node_name, node_devs in event_data["return"].items(): if node_filter is None: # The cache will only be populated when this function is invoked # without a node filter, i.e. if you run it once for the whole # cluster, you can then call it for individual nodes and return # cached data. However, if you only *ever* call it for individual # nodes, the cache will never be populated, and you'll always have # the full round trip to DeepSea. self.inventory_cache[node_name] = orchestrator.OutdatableData(node_devs) devs = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(node_devs) result.append(orchestrator.InventoryNode(node_name, devs)) else: self.log.error(event_data['return']) return result
def run(host, host_info): # type: (str, orchestrator.OutdatableData) -> orchestrator.InventoryNode if host_info.outdated(self.inventory_cache_timeout) or refresh: self.log.info("refresh stale inventory for '{}'".format(host)) out, code = self._run_ceph_daemon( host, 'osd', 'ceph-volume', ['--', 'inventory', '--format=json']) data = json.loads(''.join(out)) host_info = orchestrator.OutdatableData(data) self.inventory_cache[host] = host_info else: self.log.debug( "reading cached inventory for '{}'".format(host)) devices = inventory.Devices.from_json(host_info.data) return orchestrator.InventoryNode(host, devices)
def process_inventory_json(inventory_events, ar_client, playbook_uuid, logger): """ Adapt the output of the playbook used in 'get_inventory' to the Orchestrator expected output (list of InventoryNode) :param inventory_events: events dict with the results Example: inventory_events = {'37-100564f1-9fed-48c2-bd62-4ae8636dfcdb': {'host': '192.168.121.254', 'task': 'list storage inventory', 'event': 'runner_on_ok'}, '36-2016b900-e38f-7dcd-a2e7-00000000000e': {'host': '192.168.121.252' 'task': 'list storage inventory', 'event': 'runner_on_ok'}} :param ar_client: Ansible Runner Service client :param playbook_uuid: Playbook identifier :return : list of InventoryNode """ #Obtain the needed data for each result event inventory_nodes = [] # Loop over the result events and request the event data for event_key, dummy_data in inventory_events.items(): event_response = ar_client.http_get(EVENT_DATA_URL % (playbook_uuid, event_key)) # Process the data for each event if event_response: event_data = json.loads(event_response.text)["data"]["event_data"] host = event_data["host"] devices = json.loads(event_data["res"]["stdout"]) devs = [] for storage_device in devices: dev = orchestrator.InventoryDevice.from_ceph_volume_inventory( storage_device) devs.append(dev) inventory_nodes.append(orchestrator.InventoryNode(host, devs)) return inventory_nodes
def process_result(raw_event): result = [] raw_event = json.loads(raw_event) if raw_event['data']['success']: for node_name, node_devs in raw_event["data"]["return"].items( ): devs = [] for d in node_devs: dev = orchestrator.InventoryDevice() dev.blank = d['blank'] dev.type = d['type'] dev.id = d['id'] dev.size = d['size'] dev.extended = d['extended'] dev.metadata_space_free = d['metadata_space_free'] devs.append(dev) result.append(orchestrator.InventoryNode(node_name, devs)) return result
def run(key, host_info): updated = False host = host_info["host"] if not host_info["inventory"]: self.log.info("caching inventory for '{}'".format(host)) host_info["inventory"] = self._get_device_inventory(host) updated = True else: timeout_min = int( self.get_module_option( "inventory_cache_timeout_min", self._DEFAULT_INVENTORY_CACHE_TIMEOUT_MIN)) cutoff = datetime.datetime.utcnow() - datetime.timedelta( minutes=timeout_min) last_update = self.time_from_string( host_info["last_inventory_refresh"]) if last_update < cutoff or refresh: self.log.info( "refresh stale inventory for '{}'".format(host)) host_info["inventory"] = self._get_device_inventory(host) updated = True else: self.log.info( "reading cached inventory for '{}'".format(host)) pass if updated: now = datetime.datetime.utcnow() now = now.strftime(DATEFMT) host_info["last_inventory_refresh"] = now self.set_store(key, json.dumps(host_info)) devices = list( map( lambda di: orchestrator.InventoryDevice. from_ceph_volume_inventory(di), host_info["inventory"])) return orchestrator.InventoryNode(host, devices)
def run(host, host_info): # type: (str, orchestrator.OutdatableData) -> orchestrator.InventoryNode timeout_min = int( self.get_module_option( "inventory_cache_timeout_min", self._DEFAULT_INVENTORY_CACHE_TIMEOUT_MIN)) if host_info.outdated(timeout_min) or refresh: self.log.info("refresh stale inventory for '{}'".format(host)) data = self._get_device_inventory(host) host_info = orchestrator.OutdatableData(data) self.inventory_cache[host] = host_info else: self.log.debug( "reading cached inventory for '{}'".format(host)) devices = orchestrator.InventoryDevice.from_ceph_volume_inventory_list( host_info.data) return orchestrator.InventoryNode(host, devices)
def get_inventory(self, node_filter=None): node_list = None if node_filter and node_filter.nodes: # Explicit node list node_list = node_filter.nodes elif node_filter and node_filter.labels: # TODO: query k8s API to resolve to node list, and pass # it into RookCluster.get_discovered_devices raise NotImplementedError() devs = self.rook_cluster.get_discovered_devices(node_list) result = [] for node_name, node_devs in devs.items(): devs = [] for d in node_devs: dev = orchestrator.InventoryDevice() # XXX CAUTION! https://github.com/rook/rook/issues/1716 # Passing this through for the sake of completeness but it # is not trustworthy! dev.blank = d['empty'] dev.type = 'hdd' if d['rotational'] else 'ssd' dev.id = d['name'] dev.size = d['size'] if d['filesystem'] == "" and not d['rotational']: # Empty or partitioned SSD partitioned_space = sum( [p['size'] for p in d['Partitions']]) dev.metadata_space_free = max(0, d[ 'size'] - partitioned_space) devs.append(dev) result.append(orchestrator.InventoryNode(node_name, devs)) return result
def run(host, host_info): # type: (str, orchestrator.OutdatableData) -> orchestrator.InventoryNode timeout_min = int( self.get_module_option( "inventory_cache_timeout_min", self._DEFAULT_INVENTORY_CACHE_TIMEOUT_MIN)) if host_info.outdated(timeout_min) or refresh: self.log.info("refresh stale inventory for '{}'".format(host)) out, code = self._run_ceph_daemon( host, 'osd', 'ceph-volume', ['--', 'inventory', '--format=json']) data = json.loads(''.join(out)) host_info = orchestrator.OutdatableData(data) self.inventory_cache[host] = host_info else: self.log.debug( "reading cached inventory for '{}'".format(host)) devices = orchestrator.InventoryDevice.from_ceph_volume_inventory_list( host_info.data) return orchestrator.InventoryNode(host, devices)
def get_hosts(self): return [ orchestrator.InventoryNode(n, []) for n in self.rook_cluster.get_node_names() ]
def get_hosts(self): if self._inventory: return self._inventory return [orchestrator.InventoryNode('localhost', [])]
def get_hosts(self): return [orchestrator.InventoryNode('localhost', [])]
def get_hosts(self): # type: () -> List[orchestrator.InventoryNode] return [ orchestrator.InventoryNode(n, inventory.Devices([])) for n in self.rook_cluster.get_node_names() ]