def testNetwork(self): # FIXME: cx_state to true, but needs sysstat installed config = """ init_config: instances: - collect_connection_state: false excluded_interfaces: - lo - lo0 """ check, instances = get_check('network', config) check.check(instances[0]) check.get_metrics() metric_names = [m[0] for m in check.aggregator.metrics] assert 'system.net.bytes_rcvd' in metric_names assert 'system.net.bytes_sent' in metric_names if Platform.is_linux(): assert 'system.net.tcp.retrans_segs' in metric_names assert 'system.net.tcp.in_segs' in metric_names assert 'system.net.tcp.out_segs' in metric_names elif Platform.is_bsd(): assert 'system.net.tcp.retrans_packs' in metric_names assert 'system.net.tcp.sent_packs' in metric_names assert 'system.net.tcp.rcv_packs' in metric_names
def testNetwork(self): # FIXME: cx_state to true, but needs sysstat installed config = """ init_config: instances: - collect_connection_state: false excluded_interfaces: - lo - lo0 """ check, instances = get_check("network", config) check.check(instances[0]) check.get_metrics() metric_names = [m[0] for m in check.aggregator.metrics] assert "system.net.bytes_rcvd" in metric_names assert "system.net.bytes_sent" in metric_names if Platform.is_linux(): assert "system.net.tcp.retrans_segs" in metric_names assert "system.net.tcp.in_segs" in metric_names assert "system.net.tcp.out_segs" in metric_names elif Platform.is_bsd(): assert "system.net.tcp.retrans_packs" in metric_names assert "system.net.tcp.sent_packs" in metric_names assert "system.net.tcp.rcv_packs" in metric_names
def test_checks(self): config = """ init_config: crash_dir: %s instances: - name: crash_stats """ % self.crash_dir (check, instances) = get_check('crash', config) # Baseline check check.check(instances[0]) metrics = check.get_metrics() self.assertEqual(metrics[0].value, 0) self.assertEqual(metrics[0].value_meta['latest'], '') # Add a crash and re-check os.mkdir(os.path.join(self.crash_dir,'201504141011')) check.check(instances[0]) metrics = check.get_metrics() self.assertEqual(metrics[0].value, 1) self.assertEqual(metrics[0].value_meta['latest'], '2015-04-14 10:11:00') # Add a second crash and re-check os.mkdir(os.path.join(self.crash_dir,'201505222303')) check.check(instances[0]) metrics = check.get_metrics() self.assertEqual(metrics[0].value, 2) self.assertEqual(metrics[0].value_meta['latest'], '2015-05-22 23:03:00')
def testNetwork(self): config = """ init_config: instances: - """ check, instances = get_check('network', config) check.check(instances[0]) check.get_metrics() metric_names = [m[0] for m in check.aggregator.metrics] assert 'system.net.bytes_rcvd' in metric_names assert 'system.net.bytes_sent' in metric_names
def testNetwork(self): config = """ init_config: instances: - collect_connection_state: true excluded_interfaces: - lo - lo0 """ check, instances = get_check('network', config) check.check(instances[0]) check.get_metrics() metric_names = [m[0] for m in check.aggregator.metrics] assert 'net_bytes_in' in metric_names assert 'net_bytes_out' in metric_names
def test_checks(self): raise SkipTest('Requires root access to postfix') self.config = self.stripHeredoc("""init_config: instances: - directory: %s queues: - bounce - maildrop - incoming - active - deferred """ % (self.queue_root)) # stuff 10K msgs in random queues for _ in xrange(1, 10000): shuffle(self.queues) rand_queue = sample(self.queues, 1)[0] queue_file = binascii.b2a_hex(os.urandom(7)) open(os.path.join(self.queue_root, rand_queue, queue_file), 'w') # keep track of what we put in self.in_count[rand_queue][0] += 1 check, instances = get_check('postfix', self.config) check.check(instances[0]) out_count = check.get_metrics() # output what went in... per queue print() for queue, count in self.in_count.iteritems(): print('Test messges put into', queue, '= ', self.in_count[queue][0]) # output postfix.py dd-agent plugin counts... per queue print() for tuple in out_count: queue = tuple[3]['dimensions'][0].split(':')[1] self.assertEqual(int(tuple[2]), int(self.in_count[queue][0])) print('Test messages counted by dd-agent for', queue, '= ', tuple[2])
def testNetwork(self): config = """ init_config: instances: - collect_connection_state: true excluded_interfaces: - lo - lo0 """ check, instances = get_check('network', config) check.check(instances[0]) check.get_metrics() metric_names = [m[0] for m in check.aggregator.metrics] assert 'system.net.bytes_rcvd' in metric_names assert 'system.net.bytes_sent' in metric_names assert 'system.net.tcp.retrans_segs' in metric_names assert 'system.net.tcp.in_segs' in metric_names assert 'system.net.tcp.out_segs' in metric_names
def testChecks(self): check, instances = get_check('cacti', CONFIG) rrd_dir = os.path.join(self.tmp_dir, 'rrds') # Restore the RRDs from the XML dumps if not self._copy_rrds(self.rrd_dir): return # Do a check to establish the last timestamps check.check(instances[0]) check.get_metrics() # Bump the last timestamps back 20 minutes so we have some actual data twenty_min = 20 * 60 for k,v in check.last_ts.items(): check.last_ts[k] = v - twenty_min # Do a first check check.check(instances[0]) results1 = check.get_metrics() # Check again and make sure no new metrics are picked up # But we will still have the payload stats check.check(instances[0]) results2 = check.get_metrics() last_ts1 = check.last_ts[rrd_dir + '/localhost_hdd_free_10.rrd.AVERAGE'] # Check once more to make sure last_ts ignores None vals when calculating # where to start from check.check(instances[0]) results3 = check.get_metrics() last_ts2 = check.last_ts[rrd_dir + '/localhost_hdd_free_10.rrd.AVERAGE'] self.assertEquals(last_ts1, last_ts2) metrics = [r[0] for r in results2] # make sure diagnostic metrics are included assert 'cacti.metrics.count' in metrics assert 'cacti.rrd.count' in metrics assert 'cacti.hosts.count' in metrics metrics_count = [r for r in results2 if r[0] == 'cacti.metrics.count'][0][2] hosts_count = [r for r in results2 if r[0] == 'cacti.hosts.count'][0][2] rrd_count = [r for r in results2 if r[0] == 'cacti.rrd.count'][0][2] assert metrics_count == 0 assert hosts_count == 1 assert rrd_count == 3 load1 = [m[2] for m in results1 if m[0] == 'system.load.1' and m[2]] # Make sure some load metrics were returned assert len(load1) > 0 # Should not have any - not included in the whitelist current_users = [m[2] for m in results1 if m[0] == 'system.users.current' and m[2]] self.assertEquals(len(current_users), 0) disk_used = [m for m in results1 if m[0] == 'system.disk.used' and m[2]] assert len(disk_used) > 0 # Make sure no None values are picked up none_metrics = [m[2] for m in results1 if m[2] is None] self.assertEquals(len(none_metrics), 0)
def testChecks(self): check, instances = get_check('cacti', CONFIG) rrd_dir = os.path.join(self.tmp_dir, 'rrds') # Restore the RRDs from the XML dumps if not self._copy_rrds(self.rrd_dir): return # Do a check to establish the last timestamps check.check(instances[0]) check.get_metrics() # Bump the last timestamps back 20 minutes so we have some actual data twenty_min = 20 * 60 for k, v in check.last_ts.items(): check.last_ts[k] = v - twenty_min # Do a first check check.check(instances[0]) results1 = check.get_metrics() # Check again and make sure no new metrics are picked up # But we will still have the payload stats check.check(instances[0]) results2 = check.get_metrics() last_ts1 = check.last_ts[rrd_dir + '/localhost_hdd_free_10.rrd.AVERAGE'] # Check once more to make sure last_ts ignores None vals when calculating # where to start from check.check(instances[0]) last_ts2 = check.last_ts[rrd_dir + '/localhost_hdd_free_10.rrd.AVERAGE'] self.assertEqual(last_ts1, last_ts2) metrics = [r[0] for r in results2] # make sure diagnostic metrics are included assert 'cacti.metrics.count' in metrics assert 'cacti.rrd.count' in metrics assert 'cacti.hosts.count' in metrics metrics_count = [r for r in results2 if r[0] == 'cacti.metrics.count'][0][2] hosts_count = [r for r in results2 if r[0] == 'cacti.hosts.count'][0][2] rrd_count = [r for r in results2 if r[0] == 'cacti.rrd.count'][0][2] assert metrics_count == 0 assert hosts_count == 1 assert rrd_count == 3 load1 = [m[2] for m in results1 if m[0] == 'system.load.1' and m[2]] # Make sure some load metrics were returned assert len(load1) > 0 # Should not have any - not included in the whitelist current_users = [ m[2] for m in results1 if m[0] == 'system.users.current' and m[2] ] self.assertEqual(len(current_users), 0) disk_used = [ m for m in results1 if m[0] == 'system.disk.used' and m[2] ] assert len(disk_used) > 0 # Make sure no None values are picked up none_metrics = [m[2] for m in results1 if m[2] is None] self.assertEqual(len(none_metrics), 0)