def get_compute_node_view(self, compute_node_hostnames, ts_from=None, ts_to=None, name_filtering_support=False): """ Returns a view for the compute node. """ res = None if isinstance(compute_node_hostnames, str): res = self._get_compute_node_subgraph(compute_node_hostnames, ts_from, ts_to) elif isinstance(compute_node_hostnames, list): res = self._get_network_subgraph(ts_from, ts_to) for hostname in compute_node_hostnames: if isinstance(hostname, str): graph = self._get_compute_node_subgraph( hostname, ts_from, ts_to) if len(graph.nodes()) > 0: graphs.merge_graph(res, graph) if name_filtering_support: for node in res.nodes(data=True): name = InfoGraphNode.get_name(node) InfoGraphNode.set_attribute(node, 'node_name', name) return res
def get_workload_view_graph(self, stack_names, ts_from=None, ts_to=None, name_filtering_support=False): """ Returns a graph which only includes the resources related to the execution of the stack names indicated in the input parameter """ res = None if isinstance(stack_names, str): res = self._get_workload_subgraph(stack_names, ts_from, ts_to) # TODO - URGENT: Check this with the new Lanscape elif isinstance(stack_names, list): temp_res = list() for stack_name in stack_names: graph = self._get_workload_subgraph(str(stack_name), ts_from, ts_to) if len(graph.nodes()) > 0: temp_res.append(graph) for graph in temp_res: if not res and len(graph.nodes()) > 0: res = graph elif len(graph.nodes()) > 0: # TODO - URGENT: Fix this. Put Merge within the analytics res = graphs.merge_graph(res, graph) # TODO - URGENT: Check this with the new Lanscape machine_count = 0 for node in res.nodes(data=True): if InfoGraphNode.node_is_machine(node): machine_count += 1 if name_filtering_support: for node in res.nodes(data=True): name = InfoGraphNode.get_name(node) InfoGraphNode.set_attribute(node, 'node_name', name) return res