コード例 #1
0
    def _create_pandas_data_frame_from_graph(graph, metrics='all'):
        """
        Save on csv files the data in the graph.
        Stores one csv per node of the graph

        :param graph: (NetworkX Graph) Graph to be annotated with data
        :param directory: (str) directory where to store csv files
        :return: NetworkX Graph annotated with telemetry data
        """
        result = pandas.DataFrame()
        for node in graph.nodes(data=True):
            node_name = InfoGraphNode.get_name(node)
            node_layer = InfoGraphNode.get_layer(node)
            node_type = InfoGraphNode.get_type(node)

            # This method supports export of either normal metrics coming
            #  from telemetry agent or utilization type of metrics.
            if metrics == 'all':
                node_telemetry_data = InfoGraphNode.get_telemetry_data(node)
            else:
                node_telemetry_data = InfoGraphNode.get_utilization(node)
            # df = node_telemetry_data.copy()

            # LOG.info("Node Name: {} -- Telemetry: {}".format(
            #     InfoGraphNode.get_name(node),
            #     InfoGraphNode.get_telemetry_data(node).columns.values
            # ))

            node_telemetry_data['timestamp'] = node_telemetry_data[
                'timestamp'].astype(float)
            node_telemetry_data['timestamp'] = node_telemetry_data[
                'timestamp'].round()
            node_telemetry_data['timestamp'] = node_telemetry_data[
                'timestamp'].astype(int)
            for metric_name in node_telemetry_data.columns.values:
                if metric_name == 'timestamp':
                    continue
                col_name = "{}@{}@{}@{}".\
                    format(node_name, node_layer, node_type, metric_name)
                col_name = col_name.replace(".", "_")
                node_telemetry_data = node_telemetry_data.rename(
                    columns={metric_name: col_name})

                # LOG.info("TELEMETRIA: {}".format(node_telemetry_data.columns.values))

            if node_telemetry_data.empty or len(
                    node_telemetry_data.columns) <= 1:
                continue
            if result.empty:
                result = node_telemetry_data.copy()
            else:
                node_telemetry_data = \
                    node_telemetry_data.drop_duplicates(subset='timestamp')
                result = pandas.merge(result,
                                      node_telemetry_data,
                                      how='outer',
                                      on='timestamp')
            # TODO: Try with this removed
            # result.set_index(['timestamp'])
        return result
コード例 #2
0
    def _get_workload_subgraph(self, stack_name, ts_from=None, ts_to=None):
        res = None
        try:
            # Get the node ID for the stack_name and query the landscape

            properties = [("stack_name", stack_name)]
            try:
                time_window = ts_to - ts_from
            except:
                time_window = 600
            landscape_res = landscape.get_node_by_properties(
                properties, ts_from, time_window)

            if not landscape_res:
                LOG.info("No graph for a stack returned from analytics")
                # try a service name
                properties = [("service_name", stack_name)]
                landscape_res = landscape.get_node_by_properties(
                    properties, ts_from, time_window)
                if not landscape_res:
                    LOG.info("No graph for a service returned from analytics")
                    return None

            res = landscape.get_subgraph(landscape_res.nodes()[0], ts_from,
                                         time_window)
        except Exception as e:
            LOG.debug('Something went seriously wrong.')
            LOG.error(e)

        for node in res.nodes(data=True):
            attrs = InfoGraphNode.get_attributes(node)
            attrs = InfoGraphUtilities.str_to_dict(attrs)
            InfoGraphNode.set_attributes(node, attrs)
        return res
コード例 #3
0
    def get_metrics(graph, metrics='all'):
        """
        Returns all the metrics associated with the input graph
        :param graph: (NetworkX Graph) Graph to be annotated with data
        :param metrics: metric type to be considered. default = all
        :return: the list of metrics associated with the graph
        """
        metric_list = []
        for node in graph.nodes(data=True):
            node_name = InfoGraphNode.get_name(node)
            node_layer = InfoGraphNode.get_layer(node)
            node_type = InfoGraphNode.get_type(node)
            # This method supports export of either normal metrics coming
            #  from telemetry agent or utilization type of metrics.
            if metrics == 'all':
                node_telemetry_data = InfoGraphNode.get_telemetry_data(node)
            else:
                node_telemetry_data = InfoGraphNode.get_utilization(node)

            metric_list.extend([
                "{}@{}@{}@{}".format(node_name, node_layer, node_type,
                                     metric_name).replace(".", "_")
                for metric_name in node_telemetry_data.columns.values
                if metric_name != 'timestamp'
            ])
        return metric_list
コード例 #4
0
    def get_compute_node_view(self,
                              compute_node_hostnames,
                              ts_from=None,
                              ts_to=None,
                              name_filtering_support=False):
        """
        Returns a view for the compute node.
        """
        res = None
        if isinstance(compute_node_hostnames, str):
            res = self._get_compute_node_subgraph(compute_node_hostnames,
                                                  ts_from, ts_to)

        elif isinstance(compute_node_hostnames, list):
            res = self._get_network_subgraph(ts_from, ts_to)
            for hostname in compute_node_hostnames:
                if isinstance(hostname, str):
                    graph = self._get_compute_node_subgraph(
                        hostname, ts_from, ts_to)
                    if len(graph.nodes()) > 0:
                        graphs.merge_graph(res, graph)

        if name_filtering_support:
            for node in res.nodes(data=True):
                name = InfoGraphNode.get_name(node)
                InfoGraphNode.set_attribute(node, 'node_name', name)
        return res
コード例 #5
0
 def _nic(self, node):
     nic = None
     if InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_NIC:
         attrs = InfoGraphNode.get_attributes(node)
         if 'osdev_network-name' in attrs:
             nic = attrs["osdev_network-name"]
     # if InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_MACHINE:
     #     LOG.info('NODEEEEE: {}'.format(node))
     return nic
コード例 #6
0
 def _nova_uuid(self, node):
     if InfoGraphNode.get_type(node) == NODE_TYPE.INSTANCE_DISK:
         disk_name = InfoGraphNode.get_name(node)
         vm = self.landscape.get_neighbour_by_type(disk_name, "vm")
         return vm
     if InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_MACHINE:
         vm = self.vms.pop()
         return vm
     return None
コード例 #7
0
 def _disk(self, node):
     disk = None
     if (InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_DISK
             or InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_MACHINE):
         attrs = InfoGraphNode.get_attributes(node)
         if 'osdev_storage-name' in attrs:
             disk = attrs["osdev_storage-name"]
     elif InfoGraphNode.get_type(node) == NODE_TYPE.INSTANCE_DISK:
         disk = InfoGraphNode.get_name(node).split("_")[1]
     return disk
コード例 #8
0
 def _stack(self, node):
     if InfoGraphNode.get_type(node) == NODE_TYPE.VIRTUAL_MACHINE:
         # Taking service node to which the VM is connected
         predecessors = self.landscape.predecessors(
             InfoGraphNode.get_name(node))
         for predecessor in predecessors:
             predecessor_node = self.landscape.node[predecessor]
             if predecessor_node['type'] == NODE_TYPE.SERVICE_COMPUTE:
                 if 'stack_name' in predecessor_node:
                     return predecessor_node["stack_name"]
     return None
コード例 #9
0
 def _pu(self, node, metric):
     pu = None
     if (InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_PU
             or InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_MACHINE):
         attrs = InfoGraphNode.get_attributes(node)
         if 'os_index' in attrs:
             pu = attrs["os_index"]
     # metric prefix 'cpu' on to the front of the cpu number.
     if pu and ('intel/proc/schedstat/cpu/' in metric
                or 'intel/psutil/cpu/' in metric):
         pu = "cpu{}".format(pu)
     return pu
コード例 #10
0
 def _nic(self, node, tag_key):
     nic = None
     if InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_NIC:
         attrs = InfoGraphNode.get_attributes(node)
         if tag_key == "hardware_addr":
             nic = attrs["address"]
         elif 'osdev_network-name' in attrs:
             nic = attrs["osdev_network-name"]
         elif 'name' in attrs:
             nic = attrs["name"]
     # if InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_MACHINE:
     #     LOG.info('NODEEEEE: {}'.format(node))
     return nic
コード例 #11
0
    def _get_compute_node_subgraph(self,
                                   compute_node,
                                   ts_from=None,
                                   ts_to=None):

        res = self.db.\
            get_subgraph('type', 'machine', timestamp=ts_to)

        for node in res.nodes(data=True):
            attrs = InfoGraphNode.get_attributes(node)
            attrs = InfoGraphUtilities.str_to_dict(attrs)
            InfoGraphNode.set_attributes(node, attrs)
        return res
コード例 #12
0
    def get_node_subgraph(self, node_id, ts_from=None, ts_to=None):
        try:
            time_window = ts_to - ts_from
        except:
            time_window = 600
        landscape_res = landscape.get_subgraph(node_id, ts_from, time_window)

        for node in landscape_res.nodes(data=True):
            attrs = InfoGraphNode.get_attributes(node)
            attrs = InfoGraphUtilities.str_to_dict(attrs)
            InfoGraphNode.set_attributes(node, attrs)

        return landscape_res
コード例 #13
0
    def extract_infrastructure_graph(workload_name, ts_from, ts_to):
        """
        Returns the entire landscape at the current time

        :return:
        """
        landscape_ip = ConfigHelper.get("LANDSCAPE", "host")
        landscape_port = ConfigHelper.get("LANDSCAPE", "port")
        subgraph_extraction = SubGraphExtraction(landscape_ip=landscape_ip,
                                                 landscape_port=landscape_port)
        # res = subgraph_extraction.get_workload_view_graph(
        #     workload_name, int(ts_from), int(ts_to),
        #     name_filtering_support=True)
        res = landscape.get_graph()
        #PARALLEL = True
        if PARALLEL:
            i = 0
            threads = []
            cpu_count = multiprocessing.cpu_count()
            all_node = res.nodes(data=True)
            no_node_thread = len(res.nodes()) / cpu_count
            node_pool = []

            for node in all_node:
                if i < no_node_thread:
                    node_pool.append(node)
                    i = i + 1
                else:
                    thread1 = ParallelLandscape(
                        i, "Thread-{}".format(InfoGraphNode.get_name(node)), i,
                        node_pool)
                    # thread1 = ParallelTelemetryAnnotation(i, "Thread-{}".format(InfoGraphNode.get_name(node)), i,
                    #                                       node_pool, internal_graph, self.telemetry, ts_to, ts_from)
                    thread1.start()
                    threads.append(thread1)
                    i = 0
                    node_pool = []
            if len(node_pool) != 0:
                thread1 = ParallelLandscape(
                    i, "Thread-{}".format(InfoGraphNode.get_name(node)), i,
                    node_pool)
                thread1.start()
                threads.append(thread1)

            [t.join() for t in threads]
        else:
            for node in res.nodes(data=True):
                attrs = InfoGraphNode.get_attributes(node)
                attrs = InfoGraphUtilities.str_to_dict(attrs)
                InfoGraphNode.set_attributes(node, attrs)
        return res
コード例 #14
0
 def _source(self, node):
     attrs = InfoGraphNode.get_attributes(node)
     if InfoGraphNode.get_layer(node) == GRAPH_LAYER.PHYSICAL:
         if 'allocation' in attrs:
             return attrs['allocation']
         # fix due to the landscape
         else:
             while attrs.get('attributes', None):
                 attrs = attrs['attributes']
             if 'allocation' in attrs:
                 return attrs['allocation']
     if InfoGraphNode.get_type(node) == NODE_TYPE.VIRTUAL_MACHINE:
         if 'vm_name' in attrs:
             return attrs['vm_name']
     if InfoGraphNode.get_type(node) == NODE_TYPE.INSTANCE_DISK:
         # The machine is the source as this is a libvirt disk.
         disk_name = InfoGraphNode.get_name(node)
         vm = self.landscape.get_neighbour_by_type(
             disk_name, NODE_TYPE.VIRTUAL_MACHINE)
         machine = self.landscape.get_neighbour_by_type(
             vm, NODE_TYPE.PHYSICAL_MACHINE)
         return machine
     if InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_MACHINE:
         if 'name' in attrs:
             return attrs['name']
     if InfoGraphNode.get_type(node) == NODE_TYPE.DOCKER_CONTAINER:
         docker_node = self.landscape.get_neighbour_by_type(
             InfoGraphNode.get_name(node), 'docker_node')
         if docker_node:
             machine = self.landscape.get_neighbour_by_type(
                 docker_node, 'machine')
             return machine
     return None
コード例 #15
0
    def _source_metrics(self, node):
        """
        Retrieves metrics associated with a source/host.  The source is 
        identified by the node and then all metrics types are collected for 
        that source.  If the node is physical then the metric types are 
        retrieved using just the machine name as the source, if the node is 
        virtual then the source (the vm hostname) and the stack name are 
        required. 
        """

        metric_types = []
        node_layer = InfoGraphNode.get_layer(node)
        node_type = InfoGraphNode.get_type(node)
        if node_layer == GRAPH_LAYER.PHYSICAL \
                or node_type == NODE_TYPE.INSTANCE_DISK:
            try:
                source = self._source(node)
                identifier = source
                query_tags = {"source": source}
                metric_types = self._cached_metrics(identifier, query_tags)
            except Exception as ex:
                LOG.error('Malformed graph: {}'.format(
                    InfoGraphNode.get_name(node)))
                LOG.error(ex)

        elif node_layer == GRAPH_LAYER.VIRTUAL:
            source = self._source(node)
            stack = self._stack(node)

            #LOG.info("SOURCE: {}".format(source))
            #LOG.info("STACK: {}".format(stack))

            if stack is not None:

                identifier = "{}-{}".format(source, stack)
                # query_tags = {"source": source, "stack": stack}

                query_tags = {"stack_name": stack}
                metric_types = self._cached_metrics(identifier, query_tags)
        elif node_type == NODE_TYPE.DOCKER_CONTAINER:
            source = self._source(node)
            docker_id = InfoGraphNode.get_docker_id(node)
            if docker_id is not None and source is not None:
                identifier = "{}-{}".format(source, docker_id)
                query_tags = {"docker_id": docker_id, "source": source}
                metric_types = self._cached_metrics(identifier, query_tags)

        return metric_types
コード例 #16
0
    def _get_metrics(self, node):
        """
        Retrieves the metrics for a node based on its type.  This is done by
        checking if the node is in the NODE_METRICS dictionary and then pulling
        out a list of metric heads for that node type.  If the metric head 
        matches the metric retrieved from the host then we attach it.
        """
        metrics = []
        node_type = InfoGraphNode.get_type(node)

        if node_type in NODE_METRICS:
            source_metrics = self._source_metrics(node)
            for metric in source_metrics:
                for metric_start in NODE_METRICS[node_type]:
                    if metric.startswith(metric_start) \
                            and not self._exception(node, metric):
                        if metric.startswith("intel/net/"):
                            nic_id = self._nic(node)
                            if nic_id in metric:
                                metrics.append(metric)
                        if metric.startswith('intel/libvirt/'):
                            self._get_nova_uuids(node)
                            for x in range(0, len(self.vms)):
                                #LOG.info('Adding {}'.format(metric))
                                metrics.append(metric)
                        else:
                            metrics.append(metric)
        return metrics
コード例 #17
0
 def _tag_value(self, tag_key, node, metric):
     # TODO: fully qualify this with metric name, if metric is this and tag
     tag_value = None
     if tag_key == "source":
         tag_value = self._source(node)
     elif tag_key in set(["device_id", "disk", "device_name"]):
         tag_value = self._disk(node)
     elif tag_key in set(["cpu_id", "cpuID", "core_id"]):
         tag_value = self._pu(node, metric)
     elif tag_key in set([
             "nic_id", "interface", "network_interface", "interface_name",
             "hardware_addr"
     ]):
         tag_value = self._nic(node, tag_key)
     elif tag_key == "nova_uuid":
         tag_value = self._nova_uuid(node)
     elif tag_key == "stack_name":
         tag_value = self._stack(node)
     elif tag_key == "dev_id":
         if "intel/use/network" in metric:
             tag_value = self._nic(node)
         elif "intel/use/disk" in metric:
             tag_value = self._disk(node)
     elif tag_key == "docker_id":
         tag_value = InfoGraphNode.get_docker_id(node)
     return tag_value
コード例 #18
0
    def get_queries(self, landscape, node, ts_from, ts_to):
        """
        Return queries to use for telemetry for the specific node.

        :param landscape:
        :param node:
        :param ts_from:
        :param ts_to:
        :return:
        """

        queries = []
        self.landscape = landscape
        node_layer = InfoGraphNode.get_layer(node)
        # Service Layer metrics are not required
        #if node_layer == GRAPH_LAYER.SERVICE:
        #    return []
        for metric in self._get_metrics(node):
            try:
                query = self._build_query(metric, node, ts_from, ts_to)
                queries.append(query)
            except Exception as e:
                LOG.error('Exception for metric: {}'.format(metric))

        return queries
コード例 #19
0
    def run(self, workload, service_type="stack", telemetry_system='snap'):

        # Extract data from Info Core
        service_subgraphs = list()
        try:
            LOG.debug("Workload: {}".format(workload.get_workload_name()))
            landscape_ip = ConfigHelper.get("LANDSCAPE", "host")
            landscape_port = ConfigHelper.get("LANDSCAPE", "port")
            landscape.set_landscape_server(host=landscape_ip,
                                           port=landscape_port)
            sge = SubGraphExtraction(landscape_ip, landscape_port)
            res = sge.get_hist_service_nodes(service_type,
                                             workload.get_workload_name())
            nodes = [(node[0], InfoGraphNode.get_attributes(node).get('from'),
                      InfoGraphNode.get_attributes(node).get('to'))
                     for node in res.nodes(data=True)]
            nodes.sort(reverse=True, key=self.node_sort)
            #pr = cProfile.Profile()
            counter = 0
            for node in nodes:
                #pr.enable()
                node_id = node[0]
                from_ts = int(time.time())
                # to_ts = int(attrs['to'])
                tf = from_ts * -1
                subgraph = landscape.get_subgraph(node_id, from_ts, tf)
                if len(subgraph.nodes()) > 0:
                    annotated_subgraph = SubgraphUtilities.graph_telemetry_annotation(
                        subgraph, node[1], node[2], telemetry_system)
                    service_subgraphs.append(annotated_subgraph)
                #print "cProfile Stats"+node_id
                #print "=============="
                #pr.print_stats(sort='time')
                #print "=============="
                #pr.disable()
                counter = counter + 1
                if counter == SUBGRAPH_LIMIT:
                    break
        except Exception as e:
            LOG.error(e)
            LOG.error("No topology data has been found for the selected "
                      "workload.")
            import traceback
            traceback.print_exc()
            exit()
        workload.save_results(self.__filter_name__, service_subgraphs)
        return service_subgraphs
コード例 #20
0
 def _node_is_nic_on_management_net(node, graph, mng_net_name):
     node_name = InfoGraphNode.get_name(node)
     node_type = InfoGraphNode.get_type(node)
     if node_type == InfoGraphNodeType.VIRTUAL_NIC or \
        node_type == InfoGraphNodeType.VIRTUAL_NIC_2:
         neighs = graph.neighbors(node_name)
         for n in neighs:
             neighbor = InfoGraphNode.\
                 get_node(graph, n)
             if InfoGraphNode.get_type(neighbor) == \
                     InfoGraphNodeType.VIRTUAL_NETWORK:
                 network_name = \
                     InfoGraphNode.get_attributes(
                         neighbor)['name']
                 if network_name == mng_net_name:
                     return True
     return False
コード例 #21
0
 def _tags(self, metric, node):
     tags = {}
     attrs = InfoGraphNode.get_attributes(node)
     tag_keys = self._tag_keys(metric, node)
     for tag_key in tag_keys:
         tag_value = self._tag_value(tag_key, node, metric)
         tags[tag_key] = tag_value
     return tags
コード例 #22
0
    def machine_capacity_usage(annotated_subgraph):
        """
        This is a type of fingerprint from the infrastructure perspective
        """
        # TODO: Validate graph
        categories = list()
        categories.append(InfoGraphNodeCategory.COMPUTE)
        categories.append(InfoGraphNodeCategory.NETWORK)
        # TODO: Add a Volume to the workloads to get HD usage
        categories.append(InfoGraphNodeCategory.STORAGE)
        # TODO: Get telemetry for Memory
        categories.append(InfoGraphNodeCategory.MEMORY)

        fingerprint = dict()
        counter = dict()
        for category in categories:
            fingerprint[category] = 0
            counter[category] = 0

        # calculation of the fingerprint on top of the virtual resources
        local_subgraph = annotated_subgraph.copy()
        local_subgraph.filter_nodes('layer', "virtual")
        local_subgraph.filter_nodes('layer', "service")
        local_subgraph.filter_nodes('type', 'machine')

        for node in local_subgraph.nodes(data=True):
            # if Fingerprint._node_is_nic_on_management_net(
            #         node, annotated_subgraph, mng_net_name):
            #     continue
            name = InfoGraphNode.get_name(node)
            category = InfoGraphNode.get_category(node)
            utilization = InfoGraphNode.get_utilization(node)
            if 'utilization' in utilization.columns.values:
                # LOG.info("NODE: {} - CATEGORY: {}".format(name, category))
                mean = utilization['utilization'].mean()
                fingerprint[category] += mean
                counter[category] += 1

        # This is just an average
        # TODO: Improve the average
        for category in categories:
            if counter[category] > 0:
                fingerprint[category] = \
                    fingerprint[category] / counter[category]
        return fingerprint
コード例 #23
0
    def _pu(self, node, metric):
        pu = None
        if (InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_PU
                or InfoGraphNode.get_type(node) == NODE_TYPE.PHYSICAL_MACHINE):
            attrs = InfoGraphNode.get_attributes(node)
            # fix attributes from landscaper - fixing
            # permanently on the fly if needed

            while attrs.get('attributes', None):
                attrs = attrs['attributes']

            if 'os_index' in attrs:
                pu = attrs["os_index"]
        # metric prefix 'cpu' on to the front of the cpu number.
        if pu and ('intel/proc/schedstat/cpu/' in metric
                   or 'intel/psutil/cpu/' in metric):
            pu = "cpu{}".format(pu)
        return pu
コード例 #24
0
 def saturation(internal_graph, node, telemetry):
     telemetry_data = telemetry.get_data(node)
     if 'intel/use/compute/saturation' in telemetry_data:
         InfoGraphNode.set_compute_saturation(node,
                                              pandas.DataFrame(telemetry_data['intel/use/compute/saturation']))
     if 'intel/use/memory/saturation' in telemetry_data:
         InfoGraphNode.set_memory_saturation(node, pandas.DataFrame(telemetry_data['intel/use/memory/saturation']))
     if 'intel/use/disk/saturation' in telemetry_data:
         InfoGraphNode.set_disk_saturation(node, pandas.DataFrame(telemetry_data['intel/use/disk/saturation']))
     if 'intel/use/network/saturation' in telemetry_data:
         InfoGraphNode.set_network_saturation(node,
                                              pandas.DataFrame(telemetry_data['intel/use/network/saturation']))
コード例 #25
0
    def get_annotated_graph(self,
                            graph,
                            ts_from,
                            ts_to,
                            utilization=True,
                            saturation=True):
        internal_graph = graph.copy()
        i = 0
        threads = []
        cpu_count = multiprocessing.cpu_count()
        no_node_thread = len(internal_graph.nodes()) / (cpu_count)
        node_pool = []
        node_pools = []
        for node in internal_graph.nodes(data=True):
            if i < no_node_thread:
                node_pool.append(node)
                i = i + 1
            else:
                thread1 = ParallelTelemetryAnnotation(
                    i, "Thread-{}".format(InfoGraphNode.get_name(node)), i,
                    node_pool, internal_graph, self.telemetry, ts_to, ts_from)
                threads.append(thread1)
                node_pools.append(node_pool)
                i = 1
                node_pool = [node]
        if len(node_pool) != 0:
            node_pools.append(node_pool)
            thread1 = ParallelTelemetryAnnotation(
                i, "Thread-{}".format(InfoGraphNode.get_name(node)), i,
                node_pool, internal_graph, self.telemetry, ts_to, ts_from)
            threads.append(thread1)

        [t.start() for t in threads]
        [t.join() for t in threads]

        for node in internal_graph.nodes(data=True):
            if InfoGraphNode.get_type(node) == InfoGraphNodeType.PHYSICAL_PU:
                self.utils.annotate_machine_pu_util(internal_graph, node)
            elif InfoGraphNode.node_is_disk(node):
                self.utils.annotate_machine_disk_util(internal_graph, node)
            elif InfoGraphNode.node_is_nic(node):
                self.utils.annotate_machine_network_util(internal_graph, node)
        return internal_graph
コード例 #26
0
 def annotate_machine_disk_util(internal_graph, node):
     source = InfoGraphNode.get_attributes(node)['allocation']
     machine = InfoGraphNode.get_node(internal_graph, source)
     machine_util = InfoGraphNode.get_disk_utilization(machine)
     if 'intel/use/disk/utilization' not in machine_util.columns:
         disk_metric = 'intel/procfs/disk/utilization_percentage'
         disk_util_df = InfoGraphNode.get_disk_utilization(node)
         if disk_metric in disk_util_df.columns:
             disk_util = disk_util_df[disk_metric]
             disk_util = disk_util.fillna(0)
             machine_util[InfoGraphNode.get_attributes(node)['name']] = disk_util
             InfoGraphNode.set_disk_utilization(machine, machine_util)
         else:
             LOG.info('Disk util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
     else:
         LOG.debug('Found use disk for node {}'.format(InfoGraphNode.get_name(node)))
コード例 #27
0
 def annotate_machine_network_util(internal_graph, node):
     source = InfoGraphNode.get_attributes(node)['allocation']
     machine = InfoGraphNode.get_node(internal_graph, source)
     machine_util = InfoGraphNode.get_network_utilization(machine)
     if 'intel/use/network/utilization' not in machine_util.columns:
         net_metric = 'intel/psutil/net/utilization_percentage'
         net_util_df = InfoGraphNode.get_network_utilization(node)
         if net_metric in net_util_df.columns:
             net_util = net_util_df[net_metric]
             net_util = net_util.fillna(0)
             machine_util[InfoGraphNode.get_attributes(node)['name']] = net_util
             InfoGraphNode.set_network_utilization(machine, machine_util)
         else:
             LOG.info('Net util not Found use for node {}'.format(InfoGraphNode.get_name(node)))
     else:
         LOG.debug('Found use network for node {}'.format(InfoGraphNode.get_name(node)))
コード例 #28
0
    def get_data(self, node):
        """
        Return telemetry data for the specified node
        :param node: InfoGraph node
        :return: pandas.DataFrame
        """
        queries = InfoGraphNode.get_queries(node)
        ret_val = pandas.DataFrame()
        try:
            ret_val = self._get_data(queries)
        except Exception as ex:
            LOG.debug("Exception in user code: \n{} {} {}".format('-' * 60),
                      traceback.print_exc(file=sys.stdout), '-' * 60)
        #ret_val.set_index(keys='timestamp')
        if InfoGraphNode.node_is_vm(node):
            if not ret_val.empty:
                ret_val.columns = tm_utils.clean_vm_telemetry_colnames(
                    ret_val.columns)

        return ret_val
コード例 #29
0
 def _get_data(self, node):
     # TODO: Create here the object SnapQuery from the string
     results = {}
     for query_vars in InfoGraphNode.get_queries(node):
         query = SnapQuery(self.snap, query_vars['metric'],
                           query_vars['tags'], query_vars['ts_from'],
                           query_vars['ts_to'])
         res = query.run()
         results[query.metric] = res
     results_dataframe = self._to_dataframe(results)
     return results_dataframe
コード例 #30
0
    def _tag_keys(self, metric, node):
        tag_keys = []

        # always put in instance
        tag_keys += ["instance"]

        node_type = InfoGraphNode.get_type(node)
        if node_type in NODE_TO_METRIC_TAGS:
            tag_keys = NODE_TO_METRIC_TAGS[node_type]

        return tag_keys