Exemplo n.º 1
0
    def get_workload_view_graph(self,
                                stack_names,
                                ts_from=None,
                                ts_to=None,
                                name_filtering_support=False):
        """
        Returns a graph which only includes the resources related to the
         execution of the stack names indicated in the input parameter
        """
        res = None

        if isinstance(stack_names, str):
            res = self._get_workload_subgraph(stack_names, ts_from, ts_to)

        # TODO - URGENT: Check this with the new Lanscape
        elif isinstance(stack_names, list):
            temp_res = list()
            for stack_name in stack_names:
                graph = self._get_workload_subgraph(str(stack_name), ts_from,
                                                    ts_to)
                if len(graph.nodes()) > 0:
                    temp_res.append(graph)
            for graph in temp_res:
                if not res and len(graph.nodes()) > 0:
                    res = graph
                elif len(graph.nodes()) > 0:
                    # TODO - URGENT: Fix this. Put Merge within the analytics
                    res = graphs.merge_graph(res, graph)
        # TODO - URGENT: Check this with the new Lanscape
        machine_count = 0
        for node in res.nodes(data=True):
            if InfoGraphNode.node_is_machine(node):
                machine_count += 1

        if name_filtering_support:
            for node in res.nodes(data=True):
                name = InfoGraphNode.get_name(node)
                InfoGraphNode.set_attribute(node, 'node_name', name)

        return res
Exemplo n.º 2
0
    def utilization_scores(graph):
        """
        Returns a dictionary with the scores of
        all the nodes of the graph.

        :param graph: InfoGraph
        :return: dict[node_name] = score
        """
        res = dict()
        for node in graph.nodes(data=True):
            node_name = InfoGraphNode.get_name(node)
            res[node_name] = dict()
            util = InfoGraphNode.get_utilization(node)
            import analytics_engine.common as common
            LOG = common.LOG

            res[node_name]['compute'] = 0
            res[node_name]['disk'] = 0
            res[node_name]['network'] = 0
            res[node_name]['memory'] = 0
            if (isinstance(util, pandas.DataFrame) and
                    util.empty) or \
                    (not isinstance(util, pandas.DataFrame) and
                             util==None):
                continue

            # intel/use/
            if 'intel/use/compute/utilization' in util:
                res[node_name]['compute'] = (
                    util.get('intel/use/compute/utilization').mean()) / 100.0
            elif 'intel/procfs/cpu/utilization_percentage' in util:
                res[node_name]['compute'] = (util.get(
                    'intel/procfs/cpu/utilization_percentage').mean()) / 100.0
            if 'intel/use/memory/utilization' in util:
                res[node_name]['memory'] = (
                    util.get('intel/use/memory/utilization').mean()) / 100.0
            elif 'intel/procfs/memory/utilization_percentage' in util:
                res[node_name]['memory'] = (
                    util.get('intel/procfs/memory/utilization_percentage'
                             ).mean()) / 100.0
            if 'intel/use/disk/utilization' in util:
                res[node_name]['disk'] = (
                    util.get('intel/use/disk/utilization').mean()) / 100.0
            elif 'intel/procfs/disk/utilization_percentage' in util:
                res[node_name]['disk'] = (util.get(
                    'intel/procfs/disk/utilization_percentage').mean()) / 100.0
            if 'intel/use/network/utilization' in util:
                res[node_name]['network'] = (
                    util.get('intel/use/network/utilization').mean()) / 100.0
            elif 'intel/psutil/net/utilization_percentage' in util:
                res[node_name]['network'] = (util.get(
                    'intel/psutil/net/utilization_percentage').mean()) / 100.0

            # special handling of cpu, disk & network utilization if node is a machine
            if InfoGraphNode.node_is_machine(node):
                # mean from all cpu columns
                cpu_util = InfoGraphNode.get_compute_utilization(node)
                cpu_util['total'] = [
                    sum(row) / len(row) for index, row in cpu_util.iterrows()
                ]
                res[node_name]['compute'] = cpu_util['total'].mean() / 100
                # mean from all disk columns
                disk_util = InfoGraphNode.get_disk_utilization(node)
                if disk_util.empty:
                    res[node_name]['disk'] = 0.0
                else:
                    disk_util['total'] = [
                        sum(row) / len(row)
                        for index, row in disk_util.iterrows()
                    ]
                    res[node_name]['disk'] = disk_util['total'].mean() / 100
                # mean from all nic columns
                net_util = InfoGraphNode.get_network_utilization(node)
                if net_util.empty:
                    res[node_name]['network'] = 0.0
                else:
                    net_util['total'] = [
                        sum(row) / len(row)
                        for index, row in net_util.iterrows()
                    ]
                    res[node_name]['network'] = net_util['total'].mean() / 100
                # custom metric

            if InfoGraphNode.get_type(
                    node) == InfoGraphNodeType.DOCKER_CONTAINER:
                node_name = InfoGraphNode.get_docker_id(node)
                res[node_name] = {}
                if 'intel/docker/stats/cgroups/cpu_stats/cpu_usage/percentage' in util.columns:
                    res[node_name]['compute'] = util[
                        'intel/docker/stats/cgroups/cpu_stats/cpu_usage/percentage'].mean(
                        ) / 100
                else:
                    res[node_name]['compute'] = 0
                if 'intel/docker/stats/cgroups/memory_stats/usage/percentage' in util.columns:
                    res[node_name]['memory'] = util[
                        'intel/docker/stats/cgroups/memory_stats/usage/percentage'].mean(
                        ) / 100
                else:
                    res[node_name]['memory'] = 0
                if 'intel/docker/stats/network/utilization_percentage' in util.columns:
                    res[node_name]['network'] = util[
                        'intel/docker/stats/network/utilization_percentage'].mean(
                        ) / 100
                else:
                    res[node_name]['network'] = 0
                if 'intel/docker/stats/cgroups/blkio_stats/io_time_recursive/percentage' in util.columns:
                    res[node_name]['disk'] = util[
                        'intel/docker/stats/cgroups/blkio_stats/io_time_recursive/percentage'].mean(
                        ) / 100
                else:
                    res[node_name]['disk'] = 0
        return res
Exemplo n.º 3
0
    def workload(nodes):
        """
        This is a type of fingerprint from the infrastructure perspective
        """
        # TODO: Validate graph
        data = dict()
        statistics = dict()
        compute = InfoGraphNodeCategory.COMPUTE
        data[compute] = pandas.DataFrame()
        statistics[compute] = {
            'mean': 0,
            'median': 0,
            'min': 0,
            'max': 0,
            'var': 0,
            'std_dev': 0
        }
        network = InfoGraphNodeCategory.NETWORK
        data[network] = pandas.DataFrame()
        statistics[network] = {
            'mean': 0,
            'median': 0,
            'min': 0,
            'max': 0,
            'var': 0,
            'std_dev': 0
        }
        storage = InfoGraphNodeCategory.STORAGE
        data[storage] = pandas.DataFrame()
        statistics[storage] = {
            'mean': 0,
            'median': 0,
            'min': 0,
            'max': 0,
            'var': 0,
            'std_dev': 0
        }
        memory = InfoGraphNodeCategory.MEMORY
        data[memory] = pandas.DataFrame()
        statistics[memory] = {
            'mean': 0,
            'median': 0,
            'min': 0,
            'max': 0,
            'var': 0,
            'std_dev': 0
        }

        # Calculation of the fingerprint on top of the virtual resources
        for node in nodes:
            layer = InfoGraphNode.get_layer(node)
            is_machine = InfoGraphNode.node_is_machine(node)
            if is_machine:
                continue
            if layer == InfoGraphNodeLayer.PHYSICAL:
                continue
            if layer == InfoGraphNodeLayer.SERVICE:
                continue

        for category in statistics:
            mean = data[category]['utilization'].mean()
            median = 0
            min = 0
            maximum = 0
            var = 0
            std_dev = 0
            statistics[category] = \
                {'mean': mean,
                 'median': median,
                 'min': min,
                 'max': maximum,
                 'var': var,
                 'std_dev': std_dev}

        return [data, statistics]
Exemplo n.º 4
0
    def compute_node(annotated_subgraph, hostname=None):
        """
        This is a type of fingerprint from the infrastructure perspective
        """
        # TODO: Validate graph
        data = dict()
        statistics = dict()
        compute = InfoGraphNodeCategory.COMPUTE
        data[compute] = pandas.DataFrame()
        statistics[compute] = {
            'mean': 0,
            'median': 0,
            'min': 0,
            'max': 0,
            'var': 0,
            'std_dev': 0
        }
        network = InfoGraphNodeCategory.NETWORK
        data[network] = pandas.DataFrame()
        statistics[network] = {
            'mean': 0,
            'median': 0,
            'min': 0,
            'max': 0,
            'var': 0,
            'std_dev': 0
        }
        storage = InfoGraphNodeCategory.STORAGE
        data[storage] = pandas.DataFrame()
        statistics[storage] = {
            'mean': 0,
            'median': 0,
            'min': 0,
            'max': 0,
            'var': 0,
            'std_dev': 0
        }
        memory = InfoGraphNodeCategory.MEMORY
        data[memory] = pandas.DataFrame()
        statistics[memory] = {
            'mean': 0,
            'median': 0,
            'min': 0,
            'max': 0,
            'var': 0,
            'std_dev': 0
        }

        # Calculation of the fingerprint on top of the virtual resources
        local_subgraph = annotated_subgraph.copy()

        for node in local_subgraph.nodes(data=True):
            layer = InfoGraphNode.get_layer(node)
            is_machine = InfoGraphNode.node_is_machine(node)
            if is_machine:
                continue
            if layer == InfoGraphNodeLayer.VIRTUAL:
                continue
            if layer == InfoGraphNodeLayer.SERVICE:
                continue
            # If hostname has been specified, need to take into account only
            # nodes that are related to the specific host
            attrs = InfoGraphNode.get_attributes(node)
            allocation = attrs['allocation'] if 'allocation' in attrs \
                else None
            if hostname and not hostname == allocation:
                continue

            category = InfoGraphNode.get_category(node)
            utilization = InfoGraphNode.get_utilization(node)
            try:
                utilization = utilization.drop('timestamp', 1)
            except ValueError:
                utilization = InfoGraphNode.get_utilization(node)
            data[category] = pandas.concat([data[category], utilization])

        for category in statistics:
            if not data[category].empty:
                mean = data[category]['utilization'].mean()
                median = (data[category]['utilization']).median()
                min = data[category]['utilization'].min()
                maximum = data[category]['utilization'].max()
                var = data[category]['utilization'].var()
                std_dev = math.sqrt(var)
            else:
                mean = 0
                median = 0
                min = 0
                maximum = 0
                var = 0
                std_dev = 0
            statistics[category] = \
                {'mean': mean,
                 'median': median,
                 'min': min,
                 'max': maximum,
                 'var': var,
                 'std_dev': std_dev}

        return [data, statistics]