def gather_data(registry): """Gathers the metrics""" # Get the host name of the machine host = socket.gethostname() # Create our collectors trig_metric = Gauge("trigonometry_example", "Various trigonometry examples.", {'host': host}) # register the metric collectors registry.register(trig_metric) # Start gathering metrics every second counter = 0 while True: time.sleep(1) sine = math.sin(math.radians(counter % 360)) cosine = math.cos(math.radians(counter % 360)) trig_metric.set({'type': "sine"}, sine) trig_metric.set({'type': "cosine"}, cosine) counter += 1
def test_gauge(self): # Add some metrics data = ( ({'data': 1}, 100), ({'data': "2"}, 200), ({'data': 3}, 300), ({'data': 1}, 400), ) g = Gauge("test_gauge", "Test Gauge.", {'test': "test_gauge"}) self.registry.register(g) for i in data: g.set(i[0], i[1]) headers = {'accept': 'text/plain; version=0.0.4'} url = urllib.parse.urljoin(TEST_URL, TEST_METRICS_PATH[1:]) r = requests.get(url, headers=headers) valid_data = """# HELP test_gauge Test Gauge. # TYPE test_gauge gauge test_gauge{data="1",test="test_gauge"} 400 test_gauge{data="2",test="test_gauge"} 200 test_gauge{data="3",test="test_gauge"} 300 """ self.assertEqual("text/plain; version=0.0.4; charset=utf-8", r.headers['content-type']) self.assertEqual(200, r.status_code) self.assertEqual(valid_data, r.text)
def gather_data(registry): """Gathers the metrics""" # Get the host name of the machine host = socket.gethostname() # Create our collectors ram_metric = Gauge("memory_usage_bytes", "Memory usage in bytes.", {'host': host}) cpu_metric = Gauge("cpu_usage_percent", "CPU usage percent.", {'host': host}) # register the metric collectors registry.register(ram_metric) registry.register(cpu_metric) # Start gathering metrics every second while True: time.sleep(1) # Add ram metrics ram = psutil.virtual_memory() swap = psutil.swap_memory() ram_metric.set({ 'type': "virtual", }, ram.used) ram_metric.set({'type': "virtual", 'status': "cached"}, ram.cached) ram_metric.set({'type': "swap"}, swap.used) # Add cpu metrics for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)): cpu_metric.set({'core': c}, p)
def gather_data(registry): """Gathers the metrics""" host = socket.gethostname() asterisk_total_active_channels_metric = Gauge("asterisk_active_channels", "Total current acitve channels", {'host': host}) asterisk_total_active_calls_metric = Gauge("asterisk_active_calls", "Total current acitve calls", {'host': host}) asterisk_total_calls_processed_metric = Gauge("asterisk_calls_processed", "Total current calls processed", {'host': host}) registry.register(asterisk_total_active_calls_metric) registry.register(asterisk_total_active_channels_metric) registry.register(asterisk_total_calls_processed_metric) while True: time.sleep(1) command_active_channels = "asterisk -rx 'core show channels' | grep 'active channels' | awk '{print $1}'" command_active_calls = "asterisk -rx 'core show channels' | grep 'active calls' | awk '{print $1}'" command_calls_processed = "asterisk -rx 'core show channels' | grep 'calls processed' | awk '{print $1}'" active_channels = os.popen(command_active_channels).read() asterisk_total_active_channels_metric.set({'type': "active channels", }, active_channels) active_calls = os.popen(command_active_calls).read() asterisk_total_active_calls_metric.set({'type': "active calls", }, active_calls) calls_processed = os.popen(command_calls_processed).read() asterisk_total_calls_processed_metric.set({'type': "calls processed", }, calls_processed)
def gather_data(registry): """Gathers the metrics""" # Get the host name of the machine host = socket.gethostname() # Create our collectors mysql_seconds_behind_master = Gauge("mysql_slave_seconds_behind_master", "MySQL slave secons behind master", {'host': host}) mysql_io_running = Gauge("mysql_slave_io_running", "MySQL slave IO Running", {'host': host}) mysql_sql_running = Gauge("mysql_slave_sql_running", "MySQL slave SQL Running", {'host': host}) # register the metric collectors registry.register(mysql_seconds_behind_master) registry.register(mysql_io_running) registry.register(mysql_sql_running) # Connect to mysql con = mdb.connect(host=host, port=port, user=user, passwd=password) cur = con.cursor(mdb.cursors.DictCursor) # Start gathering metrics every second while True: time.sleep(1) # Get replication infomation cur.execute('show slave status') slave_status = cur.fetchone() slave_file = slave_status["Seconds_Behind_Master"] slave_sql_running = 1 if slave_status[ "Slave_SQL_Running"] == "Yes" else 0 slave_io_running = 1 if slave_status["Slave_IO_Running"] == "Yes" else 0 #con.close() #Add metrics mysql_seconds_behind_master.set({}, slave_file) mysql_io_running.set({}, slave_io_running) mysql_sql_running.set({}, slave_sql_running)
def draytek_gather_data(registry): host = '192.168.1.1' """Gathers the metrics""" # Get the host name of the machine metric_cpu_5_seconds = Gauge("switch_cisso_sf300_cpu_5_seconds", "switch cisco sf300 cpu usage 5 seconds", {'host': host}) metric_cpu_1_minutes = Gauge("switch_cisso_sf300_cpu_1_minutes", "switch cisco sf300 cpu usage 1 minutes", {'host': host}) metric_cpu_5_minutes = Gauge("switch_cisso_sf300_cpu_5_minutes", "switch cisco sf300 cpu usage 5 minutes", {'host': host}) registry.register(metric_cpu_5_seconds) registry.register(metric_cpu_1_minutes) registry.register(metric_cpu_5_minutes) while True: time.sleep(1) mode_enable = 'enable' net_connect_device = ConnectHandler(**switch_cisco_sf300) net_connect_device.enable() command_show_cpu = 'show cpu utilization' result_run_command_cpu = net_connect_device.send_command( command_show_cpu, expect_string=r'#') [cpu_5_seconds, cpu_1_minutes, cpu_5_minutes] = re.findall("\d+", result_run_command_cpu) metric_cpu_5_seconds.set({}, cpu_5_seconds) metric_cpu_1_minutes.set({}, cpu_1_minutes) metric_cpu_5_minutes.set({}, cpu_5_minutes) net_connect_device.disconnect()
def gather_data(registry): """Gathers the metrics""" # Get the host name of the machine host = socket.gethostname() # Create our collectors mysql_seconds_behind_master = Gauge("mysql_slave_seconds_behind_master", "MySQL slave secons behind master", {'host': host}) mysql_io_running = Gauge("mysql_slave_io_running", "MySQL slave IO Running", {'host': host}) mysql_sql_running = Gauge("mysql_slave_sql_running", "MySQL slave SQL Running", {'host': host}) # register the metric collectors registry.register(mysql_seconds_behind_master) registry.register(mysql_io_running) registry.register(mysql_sql_running) # Connect to mysql con = mdb.connect(host=host, port=port, user=user, passwd=password); cur = con.cursor(mdb.cursors.DictCursor) # Start gathering metrics every second while True: time.sleep(1) # Get replication infomation cur.execute('show slave status') slave_status = cur.fetchone() slave_file = slave_status["Seconds_Behind_Master"] slave_sql_running = 1 if slave_status["Slave_SQL_Running"] == "Yes" else 0 slave_io_running = 1 if slave_status["Slave_IO_Running"] == "Yes" else 0 #con.close() #Add metrics mysql_seconds_behind_master.set({},slave_file) mysql_io_running.set({},slave_io_running) mysql_sql_running.set({},slave_sql_running)
def draytek_gather_data(registry): host = '192.168.1.1' """Gathers the metrics""" # Get the host name of the machine metric_memory_usage = Gauge("draytek_vigor_3900_memory_usage", "Draytek Vigor 3900 Memory Usage", {'host': host}) metric_cpu_usage = Gauge("draytek_vigor_3900_metric_cpu_usage", "Draytek Vigor 3900 CPU Usage", {'host': host}) metric_memory_size = Gauge("draytek_vigor_3900_metric_memory_size", "Draytek Vigor 3900 Memory Size", {'host': host}) metric_model = Gauge("draytek_vigor_3900_metric_model", "Draytek Vigor 3900 Model", {'host': host}) metric_hardware_verison = Gauge( "draytek_vigor_3900_metric_hardware_verison", "Draytek Vigor 3900 Hardware Version", {'host': host}) metric_firmware_verison = Gauge( "draytek_vigor_3900_metric_firmware_verison", "Draytek Vigor 3900 Firmware Version", {'host': host}) metric_build_date_time = Gauge("draytek_vigor_3900_metric_build_date_time", "Draytek Vigor 3900 Build Date Time", {'host': host}) metric_revision = Gauge("draytek_vigor_3900_metric_revision", "Draytek Vigor 3900 Revision", {'host': host}) metric_system_up_time = Gauge("draytek_vigor_3900_metric_system_up_time", "Draytek Vigor 3900 System Up Time", {'host': host}) metric_current_system_time = Gauge( "draytek_vigor_3900_metric_current_system_time", "Draytek Vigor 3900 Current System Time", {'host': host}) metric_eeprom_version = Gauge("draytek_vigor_3900_metric_eeprom_version", "Draytek Vigor 3900 EEPROM Version", {'host': host}) metric_bootloader_version = Gauge( "draytek_vigor_3900_metric_bootloader_version", "Draytek Vigor 3900 BootLoader Version", {'host': host}) metric_memory_used = Gauge("draytek_vigor_3900_metric_memory_used", "Draytek Vigor 3900 Memory Used", {'host': host}) metric_memory_free = Gauge("draytek_vigor_3900_metric_memory_free", "Draytek Vigor 3900 Memory Free", {'host': host}) metric_memory_shards = Gauge("draytek_vigor_3900_metric_memory_shards", "Draytek Vigor 3900 Memory Shards", {'host': host}) metric_memory_buffer = Gauge("draytek_vigor_3900_metric_memory_buffer", "Draytek Vigor 3900 Memory Buffer", {'host': host}) metric_memory_cached = Gauge("draytek_vigor_3900_metric_memory_cached", "Draytek Vigor 3900 Memory Cached", {'host': host}) metric_load_average_1 = Gauge("draytek_vigor_3900_metric_load_average_1", "Draytek Vigor 3900 Load Average 1 Minutes", {'host': host}) metric_load_average_5 = Gauge("draytek_vigor_3900_metric_load_average_5", "Draytek Vigor 3900 Load Average 5 Minutes", {'host': host}) metric_load_average_15 = Gauge( "draytek_vigor_3900_metric_load_average_15", "Draytek Vigor 3900 Load Average 15 Minutes", {'host': host}) metric_command_process_top_1 = Gauge( "draytek_vigor_3900_metric_command_process_top_1", "Draytek Vigor 3900 command process top 1", {'host': host}) metric_cpu_process_top_1 = Gauge( "draytek_vigor_3900_metric_cpu_process_top_1", "Draytek Vigor 3900 cpu process top 1", {'host': host}) metric_memory_process_top_1 = Gauge( "draytek_vigor_3900_metric_memory_process_top_1", "Draytek Vigor 3900 memory process top 1", {'host': host}) metric_command_process_top_2 = Gauge( "draytek_vigor_3900_metric_command_process_top_2", "Draytek Vigor 3900 command process top 2", {'host': host}) metric_cpu_process_top_2 = Gauge( "draytek_vigor_3900_metric_cpu_process_top_2", "Draytek Vigor 3900 cpu process top 2", {'host': host}) metric_memory_process_top_2 = Gauge( "draytek_vigor_3900_metric_memory_process_top_2", "Draytek Vigor 3900 memmory process top 2", {'host': host}) registry.register(metric_memory_usage) registry.register(metric_cpu_usage) registry.register(metric_model) registry.register(metric_hardware_verison) registry.register(metric_firmware_verison) registry.register(metric_build_date_time) registry.register(metric_revision) registry.register(metric_system_up_time) registry.register(metric_current_system_time) registry.register(metric_eeprom_version) registry.register(metric_bootloader_version) registry.register(metric_memory_used) registry.register(metric_memory_free) registry.register(metric_memory_shards) registry.register(metric_memory_buffer) registry.register(metric_memory_cached) registry.register(metric_load_average_1) registry.register(metric_load_average_5) registry.register(metric_load_average_15) registry.register(metric_command_process_top_1) registry.register(metric_cpu_process_top_1) registry.register(metric_memory_process_top_1) registry.register(metric_command_process_top_2) registry.register(metric_cpu_process_top_2) registry.register(metric_memory_process_top_2) while True: time.sleep(1) mode_enable = 'enable' net_connect_device = ConnectHandler(**vigor_draytek_3900) net_connect_device.enable() command_status_system = 'status system' command_status_process = 'status process' result_run_command_system = '' result_run_command_process = '' result_run_command = net_connect_device.send_command( mode_enable, expect_string=r'Entering enable mode...') result_run_command += net_connect_device.send_command( command_status_system, expect_string=r'#') result_run_command_process += net_connect_device.send_command( command_show_process, expect_string=r'#') [ Model, Hardware_Version, Firmware_Version, Build_Date_Time, Revision, System_up_Time, CPU_usage, Memory_Size, Memory_Usage, Current_System_Time, EEPROM_Version, Bootloader_Version ] = re.findall("\d.+", result_run_command) process_array = re.findall("\d.+", result_run_command_process) [ memory_used, memory_free, memory_shards, memory_buffer, memory_cached ] = re.findall('\d+', process_array[0]) [load_average1, load_average5, load_average15] = re.findall('[0-9.]*[0-9]+', process_array[1]) [ PID_TOP1, USER_TOP1, STATUS_TOP1, RSS_TOP1, PPID_TOP1, CPU_TOP1, MEM_TOP1, COMMAND_TOP1 ] = re.findall('\S+', process_array[2]) [ PID_TOP2, USER_TOP2, STATUS_TOP2, RSS_TOP2, PPID_TOP2, CPU_TOP2, MEM_TOP2, COMMAND_TOP2 ] = re.findall('\S+', process_array[3]) [ PID_TOP3, USER_TOP3, STATUS_TOP3, RSS_TOP3, PPID_TOP3, CPU_TOP3, MEM_TOP3, COMMAND_TOP3 ] = re.findall('\S+', process_array[4]) [ PID_TOP4, USER_TOP4, STATUS_TOP4, RSS_TOP4, PPID_TOP4, CPU_TOP4, MEM_TOP4, COMMAND_TOP4 ] = re.findall('\S+', process_array[5]) [ PID_TOP5, USER_TOP5, STATUS_TOP5, RSS_TOP5, PPID_TOP5, CPU_TOP5, MEM_TOP5, COMMAND_TOP5 ] = re.findall('\S+', process_array[6]) [ PID_TOP6, USER_TOP6, STATUS_TOP6, RSS_TOP6, PPID_TOP6, CPU_TOP6, MEM_TOP6, COMMAND_TOP6 ] = re.findall('\S+', process_array[7]) [ PID_TOP7, USER_TOP7, STATUS_TOP7, RSS_TOP7, PPID_TOP7, CPU_TOP7, MEM_TOP7, COMMAND_TOP7 ] = re.findall('\S+', process_array[8]) [ PID_TOP8, USER_TOP8, STATUS_TOP8, RSS_TOP8, PPID_TOP8, CPU_TOP8, MEM_TOP8, COMMAND_TOP8 ] = re.findall('\S+', process_array[9]) [ PID_TOP9, USER_TOP9, STATUS_TOP9, RSS_TOP9, PPID_TOP9, CPU_TOP9, MEM_TOP9, COMMAND_TOP9 ] = re.findall('\S+', process_array[10]) [ PID_TOP10, USER_TOP10, STATUS_TOP10, RSS_TOP10, PPID_TOP10, CPU_TOP10, MEM_TOP10, COMMAND_TOP10 ] = re.findall('\S+', process_array[11]) Memory_Usage = Memory_Usage[:len(Memory_Usage) - 1] CPU_usage = CPU_usage[:len(CPU_usage) - 1] metric_model.set({}, Model) metric_hardware_verison.set({}, Hardware_Version) metric_firmware_verison.set({}, Firmware_Version) metric_build_date_time.set({}, Build_Date_Time) metric_revision.set({}, Revision) metric_system_up_time.set({}, System_up_Time) metric_memory_usage.set({}, Memory_Usage) metric_cpu_usage.set({}, CPU_usage) metric_memory_size.set({}, Memory_Size) metric_current_system_time.set({}, Current_System_Time) metric_eeprom_version.set({}, EEPROM_Version) metric_bootloader_version.set({}, Bootloader_Version) metric_memory_used.set({}, memory_used) metric_memory_free.set({}, memory_free) metric_memory_shards.set({}, memory_shards) metric_memory_buffer.set({}, memory_buffer) metric_memory_cached.set({}, memory_cached) metric_load_average_1.set({}, load_average1) metric_load_average_5.set({}, load_average5) metric_load_average_15.set({}, load_average15) metric_command_process_top_1.set({}, COMMAND_TOP1) metric_cpu_process_top_1.set({}, CPU_TOP1) metric_memory_process_top_1.set({}, MEM_TOP1) metric_command_process_top_2.set({}, COMMAND_TOP2) metric_cpu_process_top_2.set({}, CPU_TOP2) metric_memory_process_top_2.set({}, MEM_TOP2) net_connect_device.disconnect()
def gather_data(registry): """Gathers the metrics""" host = socket.gethostname() asterisk_total_active_channels_metric = Gauge("asterisk_total_active_channels_metric", "Total current acitve channels", {'host': host}) asterisk_total_active_calls_metric = Gauge("asterisk_total_active_calls_metric", "Total current acitve calls", {'host': host}) asterisk_total_calls_processed_metric = Gauge("asterisk_total_calls_processed_metric", "Total current calls processed", {'host': host}) asterisk_system_uptime_seconds_metric = Gauge("asterisk_system_uptime_seconds_metric", "system uptime", {'host': host}) asterisk_last_reload_seconds_metric = Gauge("asterisk_last_reload_seconds_metric", "last reload", {'host': host}) asterisk_total_sip_peers_metric = Gauge("asterisk_total_sip_peers_metric", "ip peers", {'host': host}) asterisk_total_monitored_online_metric = Gauge("asterisk_total_monitored_online_metric", "monitored online", {'host': host}) asterisk_total_monitored_offline_metric = Gauge("asterisk_total_monitored_offline_metric", "monitored offline", {'host': host}) asterisk_total_unmonitored_online_metric = Gauge("asterisk_total_unmonitored_online_metric", "unmonitored online", {'host': host}) asterisk_total_unmonitored_offline_metric = Gauge("asterisk_total_unmonitored_offline_metric", "unmonitored offline", {'host': host}) asterisk_total_threads_listed_metric = Gauge("asterisk_total_threads_metric", "total threads listed", {'host': host}) asterisk_total_sip_status_unknown_metric = Gauge("asterisk_total_sip_status_unknown_metric", "total sip status unknown", {'host': host}) asterisk_total_sip_status_qualified_metric = Gauge("asterisk_total_sip_status_qualified_metric", "total sip status qualified", {'host': host}) registry.register(asterisk_total_active_calls_metric) registry.register(asterisk_total_active_channels_metric) registry.register(asterisk_total_calls_processed_metric) registry.register(asterisk_system_uptime_seconds_metric) registry.register(asterisk_last_reload_seconds_metric) registry.register(asterisk_total_sip_peers_metric) registry.register(asterisk_total_monitored_online_metric) registry.register(asterisk_total_monitored_offline_metric) registry.register(asterisk_total_unmonitored_online_metric) registry.register(asterisk_total_unmonitored_offline_metric) registry.register(asterisk_total_threads_listed_metric) registry.register(asterisk_total_sip_status_unknown_metric) registry.register(asterisk_total_sip_status_qualified_metric) while True: time.sleep(1) command_core_show_channels = [ "/usr/sbin/asterisk -rx 'core show channels' | awk '{print $1}'" ] command_core_show_uptime = [ "/usr/sbin/asterisk -rx 'core show uptime seconds' | awk '{print $3}'" ] command_sip_show_peers = "/usr/sbin/asterisk -rx 'sip show peers' | grep 'sip peers' | grep 'Monitored' | grep 'Unmonitored'" command_core_show_threads = "/usr/sbin/asterisk -rx 'core show threads' | tail -1 | cut -d' ' -f1" command_sip_show_peers_status_unknown = "/usr/sbin/asterisk -rx 'sip show peers' | grep -P '^\d{3,}.*UNKNOWN\s' | wc -l" command_sip_show_peers_status_qualified = "/usr/sbin/asterisk -rx 'sip show peers' | grep -P '^\d{3,}.*OK\s\(\d+' | wc -l" # command_active_channels = "asterisk -rx 'core show channels' | grep 'active channels' | awk '{print $1}'" # command_active_calls = "asterisk -rx 'core show channels' | grep 'active calls' | awk '{print $1}'" # command_calls_processed = "asterisk -rx 'core show channels' | grep 'calls processed' | awk '{print $1}'" # active_channels = os.popen(command_active_channels).read() # asterisk_total_active_channels_metric.set({'type': "active channels", }, active_channels) # active_calls = os.popen(command_active_calls).read() # asterisk_total_active_calls_metric.set({'type': "active calls", }, active_calls) # calls_processed = os.popen(command_calls_processed).read() # asterisk_total_calls_processed_metric.set({'type': "calls processed", }, calls_processed) for core_show_channels in command_core_show_channels: array_core_show_channels = os.popen(core_show_channels).readlines() active_channels = array_core_show_channels[1].rstrip() active_calls = array_core_show_channels[2].rstrip() calls_processed = array_core_show_channels[3].rstrip() asterisk_total_active_channels_metric.set({'type': "active channels", }, active_channels) asterisk_total_active_calls_metric.set({'type': "active calls", }, active_calls) asterisk_total_calls_processed_metric.set({'type': "calls processed", }, calls_processed) for core_show_uptime in command_core_show_uptime: array_core_show_uptime = os.popen(core_show_uptime).readlines() system_uptime = array_core_show_uptime[0].rstrip() last_reload = array_core_show_uptime[1].rstrip() asterisk_system_uptime_seconds_metric.set({'type': "system uptime seconds", }, active_channels) asterisk_last_reload_seconds_metric.set({'type': "last reload seconds", }, active_calls) sip_show_peers = os.popen(command_sip_show_peers).read() [sip_peers, monitored_online, monitored_offline, unmonitored_online, unmonitored_offline] = re.findall("\d+", sip_show_peers) asterisk_total_sip_peers_metric.set({'type': "total sip peers", }, sip_peers) asterisk_total_monitored_online_metric.set({'type': "total monitored online", }, monitored_online) asterisk_total_monitored_offline_metric.set({'type': "total monitored_offline", }, monitored_offline) asterisk_total_unmonitored_online_metric.set({'type': "total unmonitored_online", }, unmonitored_online) asterisk_total_unmonitored_offline_metric.set({'type': "total unmonitored offline", }, unmonitored_offline) core_show_threads = os.popen(command_core_show_threads).read() asterisk_total_threads_listed_metric.set({'type': "total threads listed", }, core_show_threads) sip_show_peers_status_unknown = os.popen(command_sip_show_peers_status_unknown).read() asterisk_total_sip_status_unknown_metric.set({'type': "total sip status unknown", }, sip_show_peers_status_unknown) sip_show_peers_status_qualified = os.popen(command_sip_show_peers_status_qualified).read() asterisk_total_sip_status_qualified_metric.set({'type': "total sip status qualified", }, sip_show_peers_status_qualified)
class TestGauge(unittest.TestCase): def setUp(self): self.data = { 'name': "hdd_disk_used", 'help_text': "Disk space used", 'const_labels': { "server": "1.db.production.my-app" }, } self.g = Gauge(**self.data) def test_set(self): data = ({ 'labels': { 'max': "500G", 'dev': "sda" }, 'values': range(0, 500, 50) }, { 'labels': { 'max': "1T", 'dev': "sdb" }, 'values': range(0, 1000, 100) }, { 'labels': { 'max': "10T", 'dev': "sdc" }, 'values': range(0, 10000, 1000) }) for i in data: for j in i['values']: self.g.set(i['labels'], j) self.assertEqual(len(data), len(self.g.values)) def test_get(self): data = ({ 'labels': { 'max': "500G", 'dev': "sda" }, 'values': range(0, 500, 50) }, { 'labels': { 'max': "1T", 'dev': "sdb" }, 'values': range(0, 1000, 100) }, { 'labels': { 'max': "10T", 'dev': "sdc" }, 'values': range(0, 10000, 1000) }) for i in data: for j in i['values']: self.g.set(i['labels'], j) self.assertEqual(j, self.g.get(i['labels'])) for i in data: self.assertEqual(max(i['values']), self.g.get(i['labels'])) def test_set_get_without_labels(self): data = {'labels': {}, 'values': range(100)} for i in data['values']: self.g.set(data['labels'], i) self.assertEqual(1, len(self.g.values)) self.assertEqual(max(data['values']), self.g.get(data['labels'])) def test_inc(self): iterations = 100 labels = {'max': "10T", 'dev': "sdc"} for i in range(iterations): self.g.inc(labels) self.assertEqual(i + 1, self.g.get(labels)) self.assertEqual(iterations, self.g.get(labels)) def test_dec(self): iterations = 100 labels = {'max': "10T", 'dev': "sdc"} self.g.set(labels, iterations) for i in range(iterations): self.g.dec(labels) self.assertEqual(iterations - (i + 1), self.g.get(labels)) self.assertEqual(0, self.g.get(labels)) def test_add(self): iterations = 100 labels = {'max': "10T", 'dev': "sdc"} for i in range(iterations): self.g.add(labels, i) self.assertEqual(sum(range(iterations)), self.g.get(labels)) def test_add_negative(self): iterations = 100 labels = {'max': "10T", 'dev': "sdc"} for i in range(iterations): self.g.add(labels, -i) self.assertEqual(sum(map(lambda x: -x, range(iterations))), self.g.get(labels)) def test_sub(self): iterations = 100 labels = {'max': "10T", 'dev': "sdc"} for i in range(iterations): self.g.sub(labels, i) self.assertEqual(sum(map(lambda x: -x, range(iterations))), self.g.get(labels)) def test_sub_positive(self): iterations = 100 labels = {'max': "10T", 'dev': "sdc"} for i in range(iterations): self.g.sub(labels, -i) self.assertEqual(sum(range(iterations)), self.g.get(labels))
def test_all(self): format_times = 10 counter_data = ( ({'c_sample': '1'}, 100), ({'c_sample': '2'}, 200), ({'c_sample': '3'}, 300), ({'c_sample': '1', 'c_subsample': 'b'}, 400), ) gauge_data = ( ({'g_sample': '1'}, 500), ({'g_sample': '2'}, 600), ({'g_sample': '3'}, 700), ({'g_sample': '1', 'g_subsample': 'b'}, 800), ) summary_data = ( ({'s_sample': '1'}, range(1000, 2000, 4)), ({'s_sample': '2'}, range(2000, 3000, 20)), ({'s_sample': '3'}, range(3000, 4000, 13)), ({'s_sample': '1', 's_subsample': 'b'}, range(4000, 5000, 47)), ) registry = Registry() counter = Counter("counter_test", "A counter.", {'type': "counter"}) gauge = Gauge("gauge_test", "A gauge.", {'type': "gauge"}) summary = Summary("summary_test", "A summary.", {'type': "summary"}) self.registry.register(counter) self.registry.register(gauge) self.registry.register(summary) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] registry.register(counter) registry.register(gauge) registry.register(summary) valid_data = """# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",c_subsample="b",type="counter"} 400 counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 # HELP summary_test A summary. # TYPE summary_test summary summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22 summary_test_count{s_sample="1",type="summary"} 250 summary_test_count{s_sample="2",type="summary"} 50 summary_test_count{s_sample="3",type="summary"} 77 summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0 summary_test_sum{s_sample="1",type="summary"} 374500.0 summary_test_sum{s_sample="2",type="summary"} 124500.0 summary_test_sum{s_sample="3",type="summary"} 269038.0 summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0 summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0 summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0 summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0 summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0 summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0 summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0 summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0 summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0 summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0 summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0 summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0 """ headers = {'accept': 'text/plain; version=0.0.4'} url = urllib.parse.urljoin(TEST_URL, TEST_METRICS_PATH[1:]) r = requests.get(url, headers=headers) self.assertEqual("text/plain; version=0.0.4; charset=utf-8", r.headers['content-type']) self.assertEqual(200, r.status_code) self.assertEqual(valid_data, r.text)
def gather_data(registry): #def gather_data(): """Gathers the metrics""" host = '192.168.1.1' stacks_1 = 'stacks 1' stacks_2 = 'stacks 2' # Get the host name of the machine metric_total_disk_stacks1 = Gauge( "cisco_switch_3650_total_disk_stacks1_bytes", "Cisco switch 3650 total disk stacks 1", { 'host': host, 'stacks': stacks_1 }) metric_free_disk_stacks1 = Gauge( "cisco_switch_3650_free_disk_stacks1_bytes", "Cisco switch 3650 free disk stacks 1", { 'host': host, 'stacks': stacks_1 }) metric_used_disk_stacks1 = Gauge( "cisco_switch_3650_used_disk_stacks1_bytes", "Cisco switch 3650 used disk of stacks 1", { 'host': host, 'stacks': stacks_1 }) metric_total_disk_stacks2 = Gauge( "cisco_switch_3650_total_disk_stacks2_bytes", "Cisco switch 3650 total disk stacks 2", { 'host': host, 'stacks': stacks_2 }) metric_free_disk_stacks2 = Gauge( "cisco_switch_3650_free_disk_stacks2_bytes", "Cisco switch 3650 free disk stacks 2", { 'host': host, 'stacks': stacks_2 }) metric_used_disk_stacks2 = Gauge( "cisco_switch_3650_used_disk_stacks2_bytes", "Cisco switch 3650 used disk of stacks 2", { 'host': host, 'stacks': stacks_2 }) registry.register(metric_total_disk_stacks1) registry.register(metric_free_disk_stacks1) registry.register(metric_used_disk_stacks1) registry.register(metric_total_disk_stacks2) registry.register(metric_free_disk_stacks2) registry.register(metric_used_disk_stacks2) net_connect = ConnectHandler(**cisco_ios) while True: time.sleep(1) command_show_infor_disk_stacks1 = 'dir flash-1:/ | include bytes | include total | include free' command_show_infor_disk_stacks2 = 'dir flash-2:/ | include bytes | include total | include free' result_run_command_show_infor_disk_stacks1 = net_connect.send_command( command_show_infor_disk_stacks1) result_run_command_show_infor_disk_stacks2 = net_connect.send_command( command_show_infor_disk_stacks2) [value_total_disk_stacks1, value_free_disk_stacks1 ] = re.findall("\d+", result_run_command_show_infor_disk_stacks1) [value_total_disk_stacks2, value_free_disk_stacks2 ] = re.findall("\d+", result_run_command_show_infor_disk_stacks2) value_used_disk_stacks1 = int(value_total_disk_stacks1) - int( value_free_disk_stacks1) value_used_disk_stacks2 = int(value_total_disk_stacks2) - int( value_free_disk_stacks2) metric_free_disk_stacks1.set({}, value_free_disk_stacks1) metric_used_disk_stacks1.set({}, value_used_disk_stacks1) metric_total_disk_stacks1.set({}, value_total_disk_stacks1) metric_free_disk_stacks2.set({}, value_free_disk_stacks2) metric_used_disk_stacks2.set({}, value_used_disk_stacks2) metric_total_disk_stacks2.set({}, value_total_disk_stacks2)