Ejemplo n.º 1
0
    def _calculate_datapoints(self, final_pass=False):
        for point in super(PBenchKPIReader, self)._calculate_datapoints(final_pass):
            concurrency = self.stats_reader.get_data(point[DataPoint.TIMESTAMP])

            for label_data in viewvalues(point[DataPoint.CURRENT]):
                label_data[KPISet.CONCURRENCY] = concurrency

            yield point
Ejemplo n.º 2
0
    def _calculate_datapoints(self, final_pass=False):
        for point in super(PBenchKPIReader, self)._calculate_datapoints(final_pass):
            concurrency = self.stats_reader.get_data(point[DataPoint.TIMESTAMP])

            for label_data in viewvalues(point[DataPoint.CURRENT]):
                label_data[KPISet.CONCURRENCY] = concurrency

            yield point
Ejemplo n.º 3
0
    def _get_windowed_points(self, tstmp, value):
        self.agg_buffer[tstmp] = value
        for tstmp_old in self.agg_buffer.keys():
            if tstmp_old <= tstmp - self.window:
                del self.agg_buffer[tstmp_old]
                continue
            break

        return viewvalues(self.agg_buffer)
Ejemplo n.º 4
0
 def test_unpack(self):
     ITERATIONS = 200
     SIZE_LIMIT = 10
     mon_buffer = MonitoringBuffer(SIZE_LIMIT, ROOT_LOGGER)
     for i in range(ITERATIONS):
         mon = [{"ts": i, "source": "local", "cpu": 1}]
         mon_buffer.record_data(mon)
     unpacked = sum(item['interval'] for item in viewvalues(mon_buffer.data['local']))
     self.assertEqual(unpacked, ITERATIONS)
Ejemplo n.º 5
0
    def _get_windowed_points(self, tstmp, value):
        self.agg_buffer[tstmp] = value
        for tstmp_old in self.agg_buffer.keys():
            if tstmp_old <= tstmp - self.window:
                del self.agg_buffer[tstmp_old]
                continue
            break

        return viewvalues(self.agg_buffer)
Ejemplo n.º 6
0
 def test_downsample_theorem(self):
     # Theorem: average interval size in monitoring buffer will always
     # be less or equal than ITERATIONS / BUFFER_LIMIT
     mon_buffer = MonitoringBuffer(100, ROOT_LOGGER)
     for i in range(5000):
         mon = [{"ts": i, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100, "other": 0}]
         mon_buffer.record_data(mon)
         for source, buffer in iteritems(mon_buffer.data):
             self.assertLessEqual(len(buffer), 100)
             sizes = [item['interval'] for item in viewvalues(buffer)]
             avg_size = float(sum(sizes)) / len(sizes)
             expected_size = 5000 / 100
             self.assertLessEqual(avg_size, expected_size * 1.20)
Ejemplo n.º 7
0
    def traverse(cls, obj, visitor):
        """
        Deep traverse dict with visitor

        :type obj: list or dict or object
        :type visitor: callable
        """
        if isinstance(obj, dict):
            visitor(obj)
            for val in viewvalues(obj):
                cls.traverse(val, visitor)
        elif isinstance(obj, list):
            for val in obj:
                cls.traverse(val, visitor)
Ejemplo n.º 8
0
    def traverse(cls, obj, visitor):
        """
        Deep traverse dict with visitor

        :type obj: list or dict or object
        :type visitor: callable
        """
        if isinstance(obj, dict):
            visitor(obj)
            for val in viewvalues(obj):
                cls.traverse(val, visitor)
        elif isinstance(obj, list):
            for val in obj:
                cls.traverse(val, visitor)