def collect_stats_and_send(self):
        """
        Collects docker metrics from docker and sends them to sender
        cpu, memory, rx_bytes ,tx_bytes, blkio metrics
        """

        if self._my_container_id is None:
            self._my_container_id = self._docker_injector.get_my_container_id()

        host_name = self._docker_wrapper.get_host_name()
        containers = self._docker_wrapper.get_containers()
        self._update_containers_state(containers=containers)
        containers_without_sdk = [v['container'] for k, v in self._containers_state.items() if
                                  k == self._my_container_id or v['ikey'] is None]

        with concurrent.futures.ThreadPoolExecutor(max_workers=max(len(containers), 30)) as executor:
            container_stats = list(
                executor.map(
                    lambda container: (container, self._docker_wrapper.get_stats(container=container,
                                                                                 stats_to_bring=self._samples_in_each_metric)),
                    containers_without_sdk))

        for container, stats in [(container, stats) for container, stats in container_stats if len(stats) > 1]:
            metrics = dockerconvertors.convert_to_metrics(stats)
            properties = dockerconvertors.get_container_properties(container, host_name)
            for metric in metrics:
                self._send_event({'metric': metric, 'properties': properties})
    def test_convert_to_metrics(self):
        time_samples = [0, 10, 20, 30, 40, 50, 60]
        cpu_samples = [0, 1, 2, 5, 9, 19, 22]
        system_samples = [0, 10, 20, 30, 40, 50, 60]
        blkio_samples = [0, 1, 20, 30, 200, 350, 700]
        rx_samples = [0, 0, 0, 101, 120, 120, 230]
        tx_samples = [0, 0, 10, 10, 200, 250, 260]
        memory_samples = [1000000, 1000000, 2000000, 2020000, 3000000, 2000000, 1000000]
        memory_limit = 8000000

        samples = [(time, {'cpu_stats': {'cpu_usage': {'total_usage': cpu}, 'system_cpu_usage': system},
                    'memory_stats': {'limit': memory_limit, 'usage': mem}, 'network': {'rx_bytes': rx, 'tx_bytes': tx},
                    'blkio_stats': {'io_service_bytes_recursive': [{'op': 'Total', 'value': blkio}]}})
                   for time, cpu, system, blkio, rx, tx, mem in
                   zip(time_samples, cpu_samples, system_samples, blkio_samples, rx_samples, tx_samples,
                       memory_samples)]

        expected_metrics = {
            '% Processor Time': {'name': '% Processor Time', 'count': 6, 'value': 36.66666667, 'min': 10, 'max': 100, 'std': 33.26659987},
            'Available Bytes': {'name': 'Available Bytes', 'count': 7, 'value': 6282857.142857143, 'min': 5000000,
             'max': 7000000, 'std': 757225.5121101482},
            'Docker RX Bytes':{'name': 'Docker RX Bytes', 'count': 6, 'value': 3.833333333, 'min': 0, 'max': 11, 'std': 5.262192192},
            'Docker TX Bytes': {'name': 'Docker TX Bytes', 'count': 6, 'value': 4.333333333, 'min': 0, 'max': 19, 'std': 7.420691792},
            'Docker Blkio Bytes': {'name': 'Docker Blkio Bytes', 'count': 6, 'value': 11.66666667, 'min': 0.1, 'max': 35, 'std': 13.61582413}}

        actual_metrics = dockerconvertors.convert_to_metrics(samples)

        for metric in actual_metrics:
            self.assertTrue(metric['name'] in expected_metrics)
            self._assert_metrics_equals(expected_metrics[metric['name']], metric)
Beispiel #3
0
    def collect_stats_and_send(self):
        """
        Collects docker metrics from docker and sends them to sender
        cpu, memory, rx_bytes ,tx_bytes, blkio metrics
        """

        if self._my_container_id is None:
            self._my_container_id = self._docker_injector.get_my_container_id()

        host_name = self._docker_wrapper.get_host_name()
        containers = self._docker_wrapper.get_containers()
        self._update_containers_state(containers=containers)
        containers_without_sdk = [
            v['container'] for k, v in self._containers_state.items()
            if k == self._my_container_id or v['ikey'] is None
        ]

        with concurrent.futures.ThreadPoolExecutor(
                max_workers=max(len(containers), 30)) as executor:
            container_stats = list(
                executor.map(
                    lambda container:
                    (container,
                     self._docker_wrapper.get_stats(container=container,
                                                    stats_to_bring=self.
                                                    _samples_in_each_metric)),
                    containers_without_sdk))

        for container, stats in [(container, stats)
                                 for container, stats in container_stats
                                 if len(stats) > 1]:
            metrics = dockerconvertors.convert_to_metrics(stats)
            properties = dockerconvertors.get_container_properties(
                container, host_name)
            for metric in metrics:
                self._send_event({'metric': metric, 'properties': properties})
    def test_convert_to_metrics(self):
        time_samples = [0, 10, 20, 30, 40, 50, 60]
        cpu_samples = [0, 1, 2, 5, 9, 19, 22]
        system_samples = [0, 10, 20, 30, 40, 50, 60]
        blkio_samples = [0, 1, 20, 30, 200, 350, 700]
        rx_samples = [0, 0, 0, 101, 120, 120, 230]
        tx_samples = [0, 0, 10, 10, 200, 250, 260]
        memory_samples = [
            1000000, 1000000, 2000000, 2020000, 3000000, 2000000, 1000000
        ]
        memory_limit = 8000000

        samples = [(time, {
            'cpu_stats': {
                'cpu_usage': {
                    'total_usage': cpu
                },
                'system_cpu_usage': system
            },
            'memory_stats': {
                'limit': memory_limit,
                'usage': mem
            },
            'network': {
                'rx_bytes': rx,
                'tx_bytes': tx
            },
            'blkio_stats': {
                'io_service_bytes_recursive': [{
                    'op': 'Total',
                    'value': blkio
                }]
            }
        }) for time, cpu, system, blkio, rx, tx, mem in zip(
            time_samples, cpu_samples, system_samples, blkio_samples,
            rx_samples, tx_samples, memory_samples)]

        expected_metrics = {
            '% Processor Time': {
                'name': '% Processor Time',
                'count': 6,
                'value': 36.66666667,
                'min': 10,
                'max': 100,
                'std': 33.26659987
            },
            'Available Bytes': {
                'name': 'Available Bytes',
                'count': 7,
                'value': 6282857.142857143,
                'min': 5000000,
                'max': 7000000,
                'std': 757225.5121101482
            },
            'Docker RX Bytes': {
                'name': 'Docker RX Bytes',
                'count': 6,
                'value': 3.833333333,
                'min': 0,
                'max': 11,
                'std': 5.262192192
            },
            'Docker TX Bytes': {
                'name': 'Docker TX Bytes',
                'count': 6,
                'value': 4.333333333,
                'min': 0,
                'max': 19,
                'std': 7.420691792
            },
            'Docker Blkio Bytes': {
                'name': 'Docker Blkio Bytes',
                'count': 6,
                'value': 11.66666667,
                'min': 0.1,
                'max': 35,
                'std': 13.61582413
            }
        }

        actual_metrics = dockerconvertors.convert_to_metrics(samples)

        for metric in actual_metrics:
            self.assertTrue(metric['name'] in expected_metrics)
            self._assert_metrics_equals(expected_metrics[metric['name']],
                                        metric)