def test_harmonic(self): iterations = 50 size_limit = 10 mon_buffer = MonitoringBuffer(size_limit, ROOT_LOGGER) for i in range(iterations): cpu = math.sin(self.to_rad(float(i) / iterations * 180)) mon = [{"ts": i, "source": "local", "cpu": cpu}] mon_buffer.record_data(mon) self.assertLessEqual(len(mon_buffer.data['local']), size_limit)
def test_unpack(self): ITERATIONS = 200 SIZE_LIMIT = 10 mon_buffer = MonitoringBuffer(SIZE_LIMIT, ROOT_LOGGER) for i in range(ITERATIONS): mon = [{"ts": i, "source": "local", "cpu": 1}] mon_buffer.record_data(mon) unpacked = sum(item['interval'] for item in viewvalues(mon_buffer.data['local'])) self.assertEqual(unpacked, ITERATIONS)
def test_sources(self): mon_buffer = MonitoringBuffer(10, ROOT_LOGGER) for i in range(100): mon = [ {"ts": i, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100}, {"ts": i, "source": "server-agent", "cpu": 10, "mem": 20}, ] mon_buffer.record_data(mon) for source, buffer in iteritems(mon_buffer.data): self.assertLessEqual(len(buffer), 10)
def test_downsample_theorem(self): # Theorem: average interval size in monitoring buffer will always # be less or equal than ITERATIONS / BUFFER_LIMIT mon_buffer = MonitoringBuffer(100, ROOT_LOGGER) for i in range(5000): mon = [{"ts": i, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100, "other": 0}] mon_buffer.record_data(mon) for source, buffer in iteritems(mon_buffer.data): self.assertLessEqual(len(buffer), 100) sizes = [item['interval'] for item in viewvalues(buffer)] avg_size = float(sum(sizes)) / len(sizes) expected_size = 5000 / 100 self.assertLessEqual(avg_size, expected_size * 1.20)