class testPluginHelper(unittest.TestCase): def setUp(self): self.argv_store = sys.argv from pynag.Plugins import PluginHelper self.my_plugin = PluginHelper() self.my_plugin.parser.add_option('-F', dest='fakedata', help='fake data to test thresholds') sys.stdout = StringIO() def tearDown(self): sys.argv = self.argv_store sys.stdout = original_stdout def run_expect(self, case, value, expected_exit): sys.argv = [sys.argv[0]] + case.split() + ('-F %s' % value).split() self.my_plugin.parse_arguments() self.my_plugin.add_status(pynag.Plugins.ok) self.my_plugin.add_summary(self.my_plugin.options.fakedata) self.my_plugin.add_metric('fakedata', self.my_plugin.options.fakedata) try: self.my_plugin.check_all_metrics() self.my_plugin.exit() except SystemExit, e: self.assertEquals(type(e), type(SystemExit())) self.assertEquals(e.code, expected_exit) except Exception, e: self.fail('unexpected exception: %s' % e)
def check_metric(self): """Check if the metric value is within the threshold range, and exits with status code, message and perfdata. """ # Get values metric_values = self._get_metric_values() unit = self._AZURE_METRICS_UNIT_SYMBOLS.get( self._metric_properties['unit']) if unit is None: unit = '' # Test if value to display if metric_values is None: message = 'No value available for metric {}'.format(self['metric']) if self['dimension'] is not None: message += ' and dimension {}'.format(self['dimension']) self.nagios_exit(Plugins.UNKNOWN, message) # PluginHelper of pynag import # https://pynag.readthedocs.io/en/latest/pynag.Plugins.html?highlight=check_threshold#pynag.Plugins.PluginHelper p = PluginHelper() # For each value, declare metric with according thresholds for metric_idx in metric_values: p.add_metric(label=metric_idx, value=metric_values[metric_idx], uom=unit, warn=self['warning'], crit=self['critical']) # Test all metrics according to there thresholds p.check_all_metrics() # Add global summary for output p.add_summary(self._metric_properties['name']['localizedValue']) # Exit and display plugin output p.exit()
now=time.time() commandstr='m.'+valuesKey+"[%s]" % interfaceKey labelstr=str(m.ifDescr[interfaceKey])+'-'+valuesKey counthash=str(hash('checkinterfaces'+helper.options.host+labelstr)) timehash=str(hash('now'+helper.options.host+labelstr)) oldcounter=mc.get(str(counthash)) oldtime=mc.get(str(timehash)) if oldcounter is None: oldcounter = 0 if oldtime is None: oldtime = now-30 counter=eval(commandstr) mc.set(counthash,counter) mc.set(timehash, now) mbps=((counter-oldcounter)*(8))/(now-oldtime) #mbps=((counter-oldcounter)*(8/1000000))/(now-oldtime) helper.add_metric(label=labelstr, value=mbps) # print " %s: %i" % (labelstr, counter) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status: helper.status(ok) # Here all metrics will be checked against thresholds that are either # built-in or added via --threshold from the command-line helper.check_all_metrics() # Print out plugin information and exit nagios-style helper.exit()
# Here comes the specific check logic try: start_time = time.time() result = socket.gethostbyname( hostname ) # result will contain the ip address resolved end_time = time.time() # If no address was specified with -a, then we return # OK if hostname resolved to anything at all if address is None or address == result: my_plugin.status(ok) my_plugin.add_summary("%s resolves to %s" % (hostname, result)) else: my_plugin.status(critical) my_plugin.add_summary("%s resolves to %s but should resolve to %s" % (hostname,result,address)) # Add run_time metric, so we can also alert if lookup takes to long run_time = end_time - start_time my_plugin.add_metric('run_time', run_time) except gaierror: # If any exceptions happened in the code above, lets return a critical status my_plugin.status(critical) my_plugin.add_summary('Could not resolve host "%s"' % hostname ) # when check_all_metrics() is run, any metrics we have added with add_metric() will be processed against # Thresholds (like --threshold). This part will allow our plugin users to alert on lookup_time my_plugin.check_all_metrics() # Print status output and exit my_plugin.exit()
scale = scale.replace(',', '.') quality = quality.replace(',', '.') latitude = latitude.replace(',', '.') longitude = longitude.replace(',', '.') distance = distance.replace(',', '.') # manipulate location, well.. at least remove spaces location = location.replace(' ', '_') datetimestr = str_date + " " + str_time.split(',', 1)[0] timestamp = time.mktime(parse(datetimestr).timetuple()) timestamp = int(timestamp) timesince = now - timestamp if timesince > 60 * 60: # Less than one hour since earthquake continue if row.find('ATHUGI') > 0: major_earthquakes += 1 recent_earthquakes += 1 helper.add_long_output( "%s %s: scale=%s depth=%s quality=%s %s %s" % (str_date, str_time, scale, depth, quality, distance, location)) helper.add_summary('%s major earthquakes. %s total earthquakes' % (major_earthquakes, recent_earthquakes)) helper.add_metric('major earthquakes', value=major_earthquakes, crit='1..inf') helper.add_metric('recent earthquakes', value=recent_earthquakes, warn='3..inf') helper.check_all_metrics() helper.exit()
m = Manager(helper.options.host, helper.options.community, int(helper.options.version)) formatstring = helper.options.value + ': %s' commandstring = "m." + helper.options.value content = eval(commandstring) helper.add_summary(formatstring % content) # Read metrics from /proc/loadavg and add them as performance metrics #load1,load5,load15,processes,last_proc_id = content.split() #running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here # However we decide that there are no thresholds by default and they have to be # applied on runtime with the --threshold option helper.add_metric(label=helper.options.value, value=content) #helper.add_metric(label='load5',value=load5) #helper.add_metric(label='load15',value=load15) #helper.add_metric(label='running_processes',value=running) #helper.add_metric(label='total_processes',value=total) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status: helper.status(ok) # Here all metrics will be checked against thresholds that are either # built-in or added via --threshold from the command-line helper.check_all_metrics() # Print out plugin information and exit nagios-style helper.exit()
xy = (x,y) rgb = image.getpixel(xy) colormap[rgb] += 1 pixels_per_triangle = 251 # How many pixels are in an triangle grey = (110,110,110) green = (0,255,0) yellow = (255,255,0) orange = (255,140,0) red = (255,0,0) grey = colormap.get(grey) green = colormap.get(green) yellow = colormap.get(yellow) orange = colormap.get(orange) red = colormap.get(red) p.status(ok) # Typical metrics: 'grey'=1553;;;; 'green'=16732;;;; 'yellow'=237;;;; 'orange'=232;;;; 'red'=251;;;; p.add_metric('grey', grey) p.add_metric('green', green) p.add_metric('yellow', yellow, warn="240..inf") p.add_metric('orange', orange,crit="240..inf") p.add_metric('red', red,crit="260..inf") p.check_all_metrics() p.exit()
# check_stuff.py Takes any arguments from the command_line and treats them as performance metrics. from pynag.Plugins import PluginHelper my_plugin = PluginHelper() my_plugin.parse_arguments() # Any perfdatastring added as argument will be treated as a performance metric for i in my_plugin.arguments: my_plugin.add_metric(perfdatastring=i) my_plugin.check_all_metrics() my_plugin.exit()
class PluginHelper(unittest.TestCase): def setUp(self): self.argv_store = sys.argv from pynag.Plugins import PluginHelper self.my_plugin = PluginHelper() self.my_plugin.parser.add_option('-F', dest='fakedata', help='fake data to test thresholds') sys.stdout = StringIO() def tearDown(self): sys.argv = self.argv_store sys.stdout = original_stdout def run_expect(self, case, value, expected_exit): sys.argv = [sys.argv[0]] + case.split() + ('-F %s' % value).split() self.my_plugin.parse_arguments() self.my_plugin.add_status(pynag.Plugins.ok) self.my_plugin.add_summary(self.my_plugin.options.fakedata) self.my_plugin.add_metric('fakedata', self.my_plugin.options.fakedata) try: self.my_plugin.check_all_metrics() self.my_plugin.exit() except SystemExit as e: self.assertEquals(type(e), type(SystemExit())) self.assertEquals(e.code, expected_exit) except Exception as e: self.fail('unexpected exception: %s' % e) else: self.fail('SystemExit exception expected') finally: signal.alarm(0) # Critical if "stuff" is over 20, else warn if over 10 # (will be critical if "stuff" is less than 0) def test_number_1(self): case = '--th=metric=fakedata,ok=0..10,warn=10..20' self.run_expect(case, -23, 2) def test_number_2(self): case = '--th=metric=fakedata,ok=0..10,warn=10..20' self.run_expect(case, 3, 0) def test_number_3(self): case = '--th=metric=fakedata,ok=0..10,warn=10..20' self.run_expect(case, 13, 1) def test_number_4(self): case = '--th=metric=fakedata,ok=0..10,warn=10..20' self.run_expect(case, 23, 2) # Same as above. Negative "stuff" is OK def test_number_5(self): case = '--th=metric=fakedata,ok=inf..10,warn=10..20' self.run_expect(case, '-23', 0) def test_number_6(self): case = '--th=metric=fakedata,ok=inf..10,warn=10..20' self.run_expect(case, '3', 0) def test_number_7(self): case = '--th=metric=fakedata,ok=inf..10,warn=10..20' self.run_expect(case, '13', 1) def test_number_8(self): case = '--th=metric=fakedata,ok=inf..10,warn=10..20' self.run_expect(case, '23', 2) # Critical if "stuff" is over 20, else warn if "stuff" is below 10 # (will be critical if "stuff" is less than 0) def test_number_9(self): case = '--th=metric=fakedata,warn=0..10,crit=20..inf' self.run_expect(case, '-23', 0) def test_number_10(self): case = '--th=metric=fakedata,warn=0..10,crit=20..inf' self.run_expect(case, '3', 1) def test_number_11(self): case = '--th=metric=fakedata,warn=0..10,crit=20..inf' self.run_expect(case, '13', 0) def test_number_12(self): case = '--th=metric=fakedata,warn=0..10,crit=20..inf' self.run_expect(case, '23', 2) # Critical if "stuff" is less than 1 def test_number_13(self): case = '--th=metric=fakedata,ok=1..inf' self.run_expect(case, '-23', 2) def test_number_14(self): case = '--th=metric=fakedata,ok=1..inf' self.run_expect(case, '0', 2) def test_number_15(self): case = '--th=metric=fakedata,ok=1..inf' self.run_expect(case, '13', 0) def test_number_16(self): case = '--th=metric=fakedata,ok=1..inf' self.run_expect(case, '23', 0) # 1-9 is warning, negative or above 10 is critical def test_number_17(self): case = '--th=metric=fakedata,warn=1..9,crit=^0..10' self.run_expect(case, '-23', 2) def test_number_18(self): case = '--th=metric=fakedata,warn=1..9,crit=^0..10' self.run_expect(case, '0', 0) def test_number_19(self): case = '--th=metric=fakedata,warn=1..9,crit=^0..10' self.run_expect(case, '7', 1) def test_number_20(self): case = '--th=metric=fakedata,warn=1..9,crit=^0..10' self.run_expect(case, '23', 2) # The only noncritical range is 5:6 def test_number_21(self): case = '--th=metric=fakedata,ok=5..6' self.run_expect(case, '-23', 2) def test_number_22(self): case = '--th=metric=fakedata,ok=5..6' self.run_expect(case, '0', 2) def test_number_23(self): case = '--th=metric=fakedata,ok=5..6' self.run_expect(case, '2', 2) def test_number_24(self): case = '--th=metric=fakedata,ok=5..6' self.run_expect(case, '5', 0) def test_number_25(self): case = '--th=metric=fakedata,ok=5..6' self.run_expect(case, '6', 0) def test_number_26(self): case = '--th=metric=fakedata,ok=5..6' self.run_expect(case, '7', 2) # Critical if "stuff" is 10 to 20 def test_number_27(self): case = '--th=metric=fakedata,ok=^10..20' self.run_expect(case, '-23', 0) def test_number_28(self): case = '--th=metric=fakedata,ok=^10..20' self.run_expect(case, '0', 0) def test_number_29(self): case = '--th=metric=fakedata,ok=^10..20' self.run_expect(case, '2', 0) def test_number_30(self): case = '--th=metric=fakedata,ok=^10..20' self.run_expect(case, '10', 2) def test_number_31(self): case = '--th=metric=fakedata,ok=^10..20' self.run_expect(case, '15', 2) def test_number_32(self): case = '--th=metric=fakedata,ok=^10..20' self.run_expect(case, '20', 2) def test_number_33(self): case = '--th=metric=fakedata,ok=^10..20' self.run_expect(case, '23', 0) # Cmdline thresholds pass but we insert a "hardcoded" metric with thresholds # which will also be evaluated def test_number_34(self): # Extra case with hardcoded thresholds self.my_plugin.add_metric('fakedata2', value='15', warn='0..10', crit='10..inf') case = '--th=metric=fakedata,ok=0..10,warn=10..20' self.run_expect(case, 3, 2) def test_number_35(self): # Extra case with hardcoded thresholds self.my_plugin.add_metric('fakedata2', value='9', warn='0..10', crit='10..inf') case = '--th=metric=fakedata,ok=0..10,warn=10..20' self.run_expect(case, 3, 1) def test_number_36(self): # Extra case with hardcoded thresholds self.my_plugin.add_metric('fakedata2', value='-4', warn='0..10', crit='10..inf') case = '--th=metric=fakedata,ok=0..10,warn=10..20' self.run_expect(case, 3, 0) def testTimeout(self): try: self.my_plugin.set_timeout(1) time.sleep(1) self.assertTrue(False, "Code should have timed out by now") except SystemExit as e: self.assertEquals(type(e), type(SystemExit())) self.assertEquals(e.code, pynag.Plugins.unknown) self.assertTrue(True, "Timeout occured in plugin, just like expected.")
auth = (plugin.options.user, plugin.options.password) # Build the metric URL. api = 'http://{}:{}/api/overview'.format(plugin.options.hostname, plugin.options.port) payload = { 'msg_rates_age': '3600', 'msg_rates_incr': '10', 'columns': 'message_stats.deliver_get_details.avg_rate', } # No need to specify a timeout: pynag has --timeout option for the whole plugin. r = requests.get(api, params=payload, auth=auth) if plugin.options.show_debug: show_response() if r.status_code == 401: plugin.add_summary("Login failed") plugin.exit() try: deliver_rate = r.json()["message_stats"]["deliver_get_details"]["avg_rate"] except ValueError: plugin.add_summary("Can't decode server's response") plugin.exit() plugin.add_metric('deliver_rate', deliver_rate) plugin.add_summary('message.deliver.avg_rate: {}'.format(deliver_rate)) plugin.check_all_metrics() plugin.exit()
for x in range(0, height): xy = (x, y) rgb = image.getpixel(xy) colormap[rgb] += 1 pixels_per_triangle = 251 # How many pixels are in an triangle grey = (110, 110, 110) green = (0, 255, 0) yellow = (255, 255, 0) orange = (255, 140, 0) red = (255, 0, 0) grey = colormap.get(grey) green = colormap.get(green) yellow = colormap.get(yellow) orange = colormap.get(orange) red = colormap.get(red) p.status(ok) # Typical metrics: 'grey'=1553;;;; 'green'=16732;;;; 'yellow'=237;;;; 'orange'=232;;;; 'red'=251;;;; p.add_metric('grey', grey) p.add_metric('green', green) p.add_metric('yellow', yellow, warn="240..inf") p.add_metric('orange', orange, crit="240..inf") p.add_metric('red', red, crit="260..inf") p.check_all_metrics() p.exit()
remote_timestamp += datetime.timedelta(hours=remote_time_hours_offset, minutes=remote_time_minutes_offset) try: # Windows will return the local time (not UTC), so we need to use the local time to compare # Force this this if '-l' or '--localtime' is set in commandline if windows or use_local: local_timestamp = datetime.datetime.now() time_type = 'Remote (Local)' else: # usually the we need the UTC time local_timestamp = datetime.datetime.utcnow() time_type = 'Remote (UTC)' # Calculate the offset between local and remote time offset = time.mktime(local_timestamp.timetuple()) - time.mktime(remote_timestamp.timetuple()) + 60 * o_tzoff helper.add_metric(label='offset', value=offset, uom='s') helper.check_all_metrics() except IndexError: helper.exit(summary='remote device does not return a time value', exit_code=unknown, perfdata='') # Print out plugin information and exit nagios-style helper.add_summary( '%s: ' % (time_type) + datetime.datetime.fromtimestamp(time.mktime(remote_timestamp.timetuple())).strftime( '%H:%M:%S') + '. Offset = %d s' % offset) helper.add_long_output( '%s: ' % (time_type) + datetime.datetime.fromtimestamp(time.mktime(remote_timestamp.timetuple())).strftime( '%Y.%m.%d %H:%M:%S')) helper.exit()
p = PluginHelper() chars = string.letters + string.digits randomstring= ''.join([random.choice(chars) for i in xrange(4)]) # avoid cache default_url = 'http://landspitali.is' p.parser.add_option('--url', dest='url', default=default_url) p.parse_arguments() p.check_all_metrics() p.show_legacy = True html = requests.get(p.options.url).content soup = BeautifulSoup(html) activitylist = soup.find('div', {'class':'activityNumbers activityNumbersNew'}) activities = activitylist.findAll('div', recursive=False) p.add_metric('metrics_found', value=len(activities), warn='0..1') p.add_summary('%s metrics found on landspitali website' % (len(activities))) for i in activities: metric_name = i.get('class') metric_value = i.find('div', {'class': "todaysCount"}).text heading = i.find('div', {'class': 'heading'}) text = i.find('div', {'class': 'todaysText'}) # If string dag... is found, this is a counter for the whole day if 'dag...' in heading.text: uom = 'c' else: uom = '' p.add_metric(metric_name, metric_value, uom=uom) p.add_long_output("%s: %s %s %s" % (metric_name, heading.text, metric_value, text.text))
json = simplejson.loads(html) total_bars = len(json) open_bars = 0 now = datetime.datetime.now() current_day = now.weekday() current_hour = now.hour for i in json: fields = i['fields'] start = fields.get('happy_hour_start') end = fields.get('happy_hour_end') days = fields.get('happy_hour_days') # format the data a little bit start = int(start) end = int(end) days = days.split(',') days = map(lambda x: int(x), days) if current_day in days and start <= current_hour < end: open_bars += 1 p.add_metric('total bars', value=total_bars) p.add_metric('ongoing happy hours', value=open_bars) p.status(ok) p.add_summary('%s out of %s bars have an ongoing happy hour' % (open_bars,total_bars)) p.check_all_metrics() p.exit()
formatstring=helper.options.value+': %s' commandstring="m."+helper.options.value content=eval(commandstring) helper.add_summary(formatstring % content) # Read metrics from /proc/loadavg and add them as performance metrics #load1,load5,load15,processes,last_proc_id = content.split() #running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here # However we decide that there are no thresholds by default and they have to be # applied on runtime with the --threshold option helper.add_metric(label=helper.options.value,value=content) #helper.add_metric(label='load5',value=load5) #helper.add_metric(label='load15',value=load15) #helper.add_metric(label='running_processes',value=running) #helper.add_metric(label='total_processes',value=total) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status: helper.status(ok) # Here all metrics will be checked against thresholds that are either # built-in or added via --threshold from the command-line helper.check_all_metrics() # Print out plugin information and exit nagios-style helper.exit()
from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper, ok, warning, critical, unknown reload(sys) sys.setdefaultencoding('utf-8') helper = PluginHelper() helper.parse_arguments() now = time.time() url = 'http://www.einkamal.is' html = requests.get(url).content soup = BeautifulSoup(html) tables = soup.find('div', {'class': 'welcomemsg'}) p = tables.findAll('p') li = soup.find('li', {'class': 'accounts'}) active_accounts = li.find('b').text active_accounts = active_accounts.replace('.', '') li = soup.find('li', {'class': 'active'}) logged_in = li.find('b').text logged_in = logged_in.replace('.', '') helper.add_metric('active users', active_accounts) helper.add_metric('logged in users', logged_in) helper.status(ok) helper.add_summary("%s logged in users. %s active accounts" % (logged_in, active_accounts)) helper.exit()
reload(sys) sys.setdefaultencoding('utf-8') helper = PluginHelper() helper.parse_arguments() now = time.time() url = 'http://www.einkamal.is' html = requests.get(url).content soup = BeautifulSoup(html) tables = soup.find('div', {'class':'welcomemsg'}) p = tables.findAll('p') li = soup.find('li',{'class':'accounts'}) active_accounts = li.find('b').text active_accounts = active_accounts.replace('.','') li = soup.find('li',{'class':'active'}) logged_in = li.find('b').text logged_in = logged_in.replace('.','') helper.add_metric('active users', active_accounts) helper.add_metric('logged in users', logged_in) helper.status(ok) helper.add_summary("%s logged in users. %s active accounts" % (logged_in,active_accounts)) helper.exit()
# Get a list of northatlantic airplanes via flightradar # import requests from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper, ok, warning, critical, unknown import simplejson as json p = PluginHelper() p.parser.add_option( '--url', dest='url', default= 'http://db8.flightradar24.com/zones/northatlantic_all.js?callback=pd_callback&_=1373991753137' ) p.parse_arguments() html = requests.get(p.options.url).content html = html.replace('pd_callback(', '') html = html.replace(");", '') json_data = json.loads(html) flights = json_data.values() p.add_metric('total_airplanes', len(flights), warn="0..1") p.add_summary('%s airplanes are currently in the air above iceland' % (len(flights))) p.check_all_metrics() p.exit()
distance = columns[7].text.strip() direction = columns[8].text location = columns[9].text depth = depth.replace(',','.') scale = scale.replace(',','.') quality = quality.replace(',','.') latitude = latitude.replace(',','.') longitude = longitude.replace(',','.') distance = distance.replace(',','.') # manipulate location, well.. at least remove spaces location = location.replace(' ','_') datetimestr = str_date + " " + str_time.split(',',1)[0] timestamp = time.mktime( parse(datetimestr).timetuple() ) timestamp = int(timestamp) timesince = now-timestamp if timesince > 60*60: # Less than one hour since earthquake continue if row.find('ATHUGI') > 0: major_earthquakes += 1 recent_earthquakes += 1 helper.add_long_output("%s %s: scale=%s depth=%s quality=%s %s %s" % (str_date, str_time, scale, depth, quality, distance, location)) helper.add_summary('%s major earthquakes. %s total earthquakes' % (major_earthquakes, recent_earthquakes)) helper.add_metric('major earthquakes', value=major_earthquakes, crit='1..inf') helper.add_metric('recent earthquakes', value=recent_earthquakes, warn='3..inf') helper.check_all_metrics() helper.exit()
def main(): helper = PluginHelper() helper.parser.add_option('-w', help='warning free (X% or XM)', dest='warning') helper.parser.add_option('-c', help='critical free (X% or XM)', dest='critical') helper.parse_arguments() warn = helper.options.warning crit = helper.options.critical memory = getMemory() if helper.options.warning is not None: warn = helper.options.warning if re.match('.*%$', warn): warn = str(memory['total'] * int(re.search('\d*', warn).group(0)) / 100) else: warn = '0' if helper.options.critical is not None: crit = helper.options.critical if re.match('.*%$', crit): crit = str(memory['total'] * int(re.search('\d*', crit).group(0)) / 100) else: crit = '0' helper.status(ok) status = "OK" if memory['totalfree'] <= int(warn): helper.status(warning) status = "WARNING" if memory['totalfree'] <= int(crit): helper.status(critical) status = "CRITICAL" helper.add_summary(status + ': Memory free: %(totalfree)s %% (%(free)s %% including buffers/cached)' % {'totalfree': (round((float(memory['totalfree']) / float(memory['total']) * 100), 1 )), 'free': (round((float(memory['free']) / float(memory['total']) * 100), 1 ))}) helper.add_metric(label='total',value=memory['total']) helper.add_metric(label='free',value=memory['free']) helper.add_metric(label='totalfree',value=memory['totalfree'], warn=warn+'..0', crit=crit+'..0') helper.add_metric(label='used',value=memory['used']) helper.add_metric(label='buffers',value=memory['buffers']) helper.add_metric(label='cached',value=memory['cached']) helper.add_metric(label='swapcached',value=memory['swapcached']) helper.check_all_metrics() helper.exit()
# Built on boilerplate form pynag: # https://github.com/pynag/pynag/wiki/Writing-Plugins-with-pynag.Plugins.PluginHelper # Example usage: # python pynag2.py -s WARNING --th metric=some-metrics,ok=0..5,warning=5..10,critical=10..inf #Modules from pynag.Plugins import PluginHelper, ok, warning, critical, unknown helper = PluginHelper() # Arguments helper.parser.add_option("-s", help="Exit State", dest="state", default='OK') helper.parse_arguments() if helper.options.state == "OK": helper.status(ok) elif helper.options.state == "WARNING": helper.status(warning) elif helper.options.state == "CRITICAL": helper.status(critical) elif helper.options.state == "UNKNOWN": helper.status(unknown) else: print "No state specified, calculating from input metrics." helper.add_metric(label='some-metrics', value=5) helper.add_summary("Some status message.") helper.check_all_metrics() helper.exit()
except: helper.exit(summary="not able to read data - sensor not available", exit_code=unknown, perfdata='') if __name__ == "__main__": if component == "temp1": # read the data sensors_file = "/sys/class/thermal/thermal_zone0/temp" temperature = read_temperature(sensors_file) # Show the summary and add the metric and afterwards check the metric helper.add_summary("CPU temperature: %s C" % temperature) helper.add_metric(label='temp', value=temperature) elif "Core" in component: core = int(component.split(" ")[1]) sensors_file = ( "/sys/devices/platform/coretemp.0/hwmon/hwmon1/temp%s_input" % (core + 2)) temperature = read_temperature(sensors_file) # Show the summary and add the metric and afterwards check the metric helper.add_summary("Core %s temperature: %s C" % (core, temperature)) helper.add_metric(label='temp', value=temperature) helper.check_all_metrics() helper.exit()
continue # Get the text content out of the <td> cells textdata = textdata.text numberdata = numberdata.text # clear some formatting numberdata = numberdata.replace('.', '').replace(',', '') # Add the keyfigure data to longoutput output = "%-30s %s" % (textdata, numberdata) p.add_long_output(output) # Now lets find those keyfigures, the content of textdata is dynamic so # some guesswork is required if 'Mannfj' in textdata: p.add_metric(label="mannfjoldi", value=numberdata) elif "Hagv" in textdata: p.add_metric(label="hagvoxtur", value=numberdata) elif "VLF" in textdata: p.add_metric("verg landsframleidsla", value=numberdata, uom="Mkr") elif "VNV" in textdata: p.add_metric(label="VNV", value=numberdata) elif "Launav" in textdata: p.add_metric(label="launavisitala", value=numberdata) elif "Bygg.v" in textdata: p.add_metric(label="byggingavisitala", value=numberdata) elif "sit. framl" in textdata: p.add_metric(label="visitala framleidsluverds", value=numberdata) elif "Fiskafli" in textdata: p.add_metric(label="fiskafli", value=numberdata, uom="tonn") elif "ruskipti" in textdata:
except Exception, e: helper.exit(summary="Could not read MIB file.", long_output=str(e), exit_code=unknown, perfdata='') m=Manager(helper.options.host,helper.options.community,int(helper.options.version)) values={'systemCurrent':'','systemUsedCapacity':'','psBatteryVoltage':'','psBatteryCurrent':'','psInputLineAVoltage':''} formatstring='systemUsedCapacity: %s' content=m.systemUsedCapacity helper.add_summary(formatstring % content) for key in values: commandstring="m."+key values[key]=eval(commandstring) # print key,values[key] helper.add_metric(label=key,value=values[key]) #formatstring=helper.options.value+': %s' #commandstring="m."+helper.options.value #content=eval(commandstring) content='foo' #helper.add_summary(formatstring % content) # Read metrics from /proc/loadavg and add them as performance metrics #load1,load5,load15,processes,last_proc_id = content.split() #running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here
# Build the metric URL. api = 'http://{}:{}/api/overview'.format(plugin.options.hostname, plugin.options.port) payload = { 'msg_rates_age': '3600', 'msg_rates_incr': '10', 'columns': 'message_stats.deliver_get_details.avg_rate', } # No need to specify a timeout: pynag has --timeout option for the whole plugin. r = requests.get(api, params=payload, auth=auth) if plugin.options.show_debug: show_response() if r.status_code == 401: plugin.add_summary("Login failed") plugin.exit() try: deliver_rate = r.json( )["message_stats"]["deliver_get_details"]["avg_rate"] except ValueError: plugin.add_summary("Can't decode server's response") plugin.exit() plugin.add_metric('deliver_rate', deliver_rate) plugin.add_summary('message.deliver.avg_rate: {}'.format(deliver_rate)) plugin.check_all_metrics() plugin.exit()
#!/usr/bin/env python import requests from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper, ok, warning, critical, unknown import simplejson as json p = PluginHelper() p.parser.add_option('--url', dest='url', default='http://apis.is/bus/realtime') p.parse_arguments() html = requests.get(p.options.url).content json = json.loads(html) buses_running = len(json['results']) p.add_metric('buses running', buses_running) soup = BeautifulSoup(html) warnings = soup.findAll('div', {'class': 'warning'}) p.add_summary('%s buses are currently running' % (buses_running)) for i in warnings: p.status(warning) p.add_long_output(i.text) p.check_all_metrics() p.exit()
device=device, destination=helper.options.destination, source=helper.options.source or None, #ttl=helper.options.ttl, timeout=helper.options.probe_timeout, size=helper.options.size, count=helper.options.count, debug=helper.options.show_debug) if helper.options.show_debug: print(result) if 'error' in result: helper.status(critical) helper.add_summary('%s: unable to ping' % helper.options.destination) helper.add_long_output(result['error']) elif 'success' in result: success = result['success'] helper.status(ok) helper.add_summary('%s: rta %.1fms, pl %d%%' % (helper.options.destination, success['rtt_avg'], success['packet_loss'])) helper.add_metric('pl', success['packet_loss'], uom='%') helper.add_metric('rta', success['rtt_avg'], uom='ms') else: helper.status(unknown) helper.add_summary('Unrecognized result from ping function') helper.add_long_output(str(result)) helper.check_all_metrics() helper.exit()
except Exception as e: helper.exit(summary="Could not read /proc/loadavg", long_output=str(e), exit_code=unknown, perfdata='') # We have read the contents of loadavg file. Lets put it in the summary of our plugin output: helper.add_summary("Load: %s" % content) # Read metrics from /proc/loadavg and add them as performance metrics load1,load5,load15,processes,last_proc_id = content.split() running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here # However we decide that there are no thresholds by default and they have to be # applied on runtime with the --threshold option helper.add_metric(label='load1',value=load1) helper.add_metric(label='load5',value=load5) helper.add_metric(label='load15',value=load15) helper.add_metric(label='running_processes',value=running) helper.add_metric(label='total_processes',value=total) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status: helper.status(ok) # Here all metrics will be checked against thresholds that are either # built-in or added via --threshold from the command-line helper.check_all_metrics() # Print out plugin information and exit nagios-style helper.exit()
#!/usr/bin/env python import requests from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper,ok,warning,critical,unknown import simplejson as json p = PluginHelper() p.parser.add_option('--url', dest='url', default='http://api.gulur.is/buses/?geo=true&latitude=64.1251991&longitude=-21.8108419&accuracy=20&range=restOfDay&radius=750000') p.parse_arguments() html = requests.get(p.options.url).content json_data = json.loads(html) buses_running = len(json_data) p.add_metric('buses running', buses_running) p.add_summary('%s buses are currently running' % (buses_running)) print json.dumps(json_data[0], indent=4) p.check_all_metrics() p.exit()
#!/usr/bin/python import os.path import sys pynagbase = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path[0] = pynagbase # Standard init from pynag.Plugins import PluginHelper,ok # Create an instance of PluginHelper() my_plugin = PluginHelper() # Feed fake data for range checking my_plugin.parser.add_option('-F', dest='fakedata', help='fake data to test thresholds') # Activate my_plugin.parse_arguments() my_plugin.add_status(ok) my_plugin.add_summary(my_plugin.options.fakedata) my_plugin.add_metric('fakedata', my_plugin.options.fakedata) my_plugin.check_all_metrics() my_plugin.exit()
now = time.time() commandstr = 'm.' + valuesKey + "[%s]" % interfaceKey labelstr = str(m.ifDescr[interfaceKey]) + '-' + valuesKey counthash = str( hash('checkinterfaces' + helper.options.host + labelstr)) timehash = str(hash('now' + helper.options.host + labelstr)) oldcounter = mc.get(str(counthash)) oldtime = mc.get(str(timehash)) if oldcounter is None: oldcounter = 0 if oldtime is None: oldtime = now - 30 counter = eval(commandstr) mc.set(counthash, counter) mc.set(timehash, now) mbps = ((counter - oldcounter) * (8)) / (now - oldtime) #mbps=((counter-oldcounter)*(8/1000000))/(now-oldtime) helper.add_metric(label=labelstr, value=mbps) # print " %s: %i" % (labelstr, counter) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status: helper.status(ok) # Here all metrics will be checked against thresholds that are either # built-in or added via --threshold from the command-line helper.check_all_metrics() # Print out plugin information and exit nagios-style helper.exit()
results['ResponseTime']="{0:.4f}".format(time.time() - start) # Catch any Errors except urllib2.HTTPError, e: my_plugin.exit(summary="Cannot retrieve URL: HTTP Error Code %s" % e.code, long_output=str(e), exit_code=unknown) except urllib2.URLError, e: my_plugin.exit(summary="Cannot retrieve URL: Perhaps a bad protocol (ssl not supported)?" , long_output=str(e), exit_code=unknown) except Exception, e: my_plugin.exit(summary="Something horrible happened:", long_output=str(e), exit_code=unknown, perfdata='') # Lets Parse the data: my_plugin.add_summary( "%s seconds response time" % results['ResponseTime']) # and add metrics: my_plugin.add_metric( label='Total Accesses', value=results['Total Accesses'], uom='c', ) my_plugin.add_metric( label='Total kBytes', value=results['Total kBytes'], uom='kb', ) my_plugin.add_metric( label='CPULoad', value=float(results['CPULoad'])*100, uom='%', ) my_plugin.add_metric( label='Uptime', value=results['Uptime'], uom='c', ) my_plugin.add_metric( label='ReqPerSec', value=results['ReqPerSec'], ) my_plugin.add_metric( label='BytesPerSec', value=results['BytesPerSec'], uom='b', ) my_plugin.add_metric( label='BytesPerReq', value=results['BytesPerReq'], uom='b', ) my_plugin.add_metric( label='BusyWorkers', value=results['BusyWorkers'], ) my_plugin.add_metric( label='IdleWorkers', value=results['IdleWorkers'], ) my_plugin.add_metric( label='ResponseTime', value=results['ResponseTime'], uom='s',warn=my_plugin.options.warning, crit=my_plugin.options.critical ) my_plugin.add_metric( label='Open slots', value=results['OpenSlots'] ) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status: my_plugin.status(ok) # Here all metrics will be checked against thresholds that are either
'systemCurrent': '', 'systemUsedCapacity': '', 'psBatteryVoltage': '', 'psBatteryCurrent': '', 'psInputLineAVoltage': '' } formatstring = 'systemUsedCapacity: %s' content = m.systemUsedCapacity helper.add_summary(formatstring % content) for key in values: commandstring = "m." + key values[key] = eval(commandstring) # print key,values[key] helper.add_metric(label=key, value=values[key]) #formatstring=helper.options.value+': %s' #commandstring="m."+helper.options.value #content=eval(commandstring) content = 'foo' #helper.add_summary(formatstring % content) # Read metrics from /proc/loadavg and add them as performance metrics #load1,load5,load15,processes,last_proc_id = content.split() #running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here # However we decide that there are no thresholds by default and they have to be # applied on runtime with the --threshold option #helper.add_metric(label=helper.options.value,value=content)
inlet_warning_lower = real_value(inlet_warning_lowers[x], inlet_digit) inlet_critical_lower = real_value(inlet_critical_lowers[x], inlet_digit) if inlet_state == "belowLowerCritical" or inlet_state == "aboveUpperCritical": # we don't want to use the thresholds. we rely on the state value of the device helper.add_summary("%s is %s" % (inlet_value, inlet_unit, inlet_state)) helper.status(critical) if inlet_state == "belowLowerWarning" or inlet_state == "aboveUpperWarning": helper.add_summary("%s %s is %s" % (inlet_value, inlet_unit, inlet_state)) helper.status(warning) # we always want to see the values in the long output and in the perf data helper.add_summary("%s %s" % (inlet_value, inlet_unit)) helper.add_long_output("%s %s: %s" % (inlet_value, inlet_unit, inlet_state)) helper.add_metric("Sensor " + str(x), inlet_value, inlet_warning_lower + ":" + inlet_warning_upper, inlet_critical_lower + ":" + inlet_critical_upper, "", "", inlet_unit) ###### # here we check the outlets ###### if typ.lower() == "outlet": # here we need the id base_oid_outlet_name = '.1.3.6.1.4.1.13742.6.3.5.3.1.3.1' # Name base_oid_outlet_state = '.1.3.6.1.4.1.13742.6.5.4.3.1.3.1' # Value oid_outlet_name = base_oid_outlet_name + "." + id # here we add the id, to get the name oid_outlet_state = base_oid_outlet_state + "." + id + ".14" # here we add the id, to get the state # we just want to receive the status of one sensor outlet_name = get_data(host, version, community, oid_outlet_name)
#Claculate UTC-time from local-time if remote_time_utc_dir == '+': remote_timestamp -= datetime.timedelta(hours=remote_time_hours_offset, minutes=remote_time_minutes_offset) elif remote_time_utc_dir == '-': remote_timestamp += datetime.timedelta(hours=remote_time_hours_offset, minutes=remote_time_minutes_offset) try: # Windows will return the local time (not UTC), so we need to use the local time to compare # Force this this if '-l' or '--localtime' is set in commandline if windows or use_local : local_timestamp = datetime.datetime.now() time_type = 'Remote (Local)' else: # usually the we need the UTC time local_timestamp = datetime.datetime.utcnow() time_type = 'Remote (UTC)' #Calculate the offset between local and remote time offset = time.mktime(local_timestamp.timetuple()) - time.mktime(remote_timestamp.timetuple()) + 60 * o_tzoff helper.add_metric(label = 'offset', value = offset, uom = 's') helper.check_all_metrics() except IndexError: helper.exit(summary = 'remote device does not return a time value', exit_code = unknown, perfdata = '') #Print out plugin information and exit nagios-style helper.add_summary('%s: ' % (time_type) + datetime.datetime.fromtimestamp(time.mktime(remote_timestamp.timetuple())).strftime('%H:%M:%S') + '. Offset = %d s' % offset) helper.add_long_output('%s: ' % (time_type) + datetime.datetime.fromtimestamp(time.mktime(remote_timestamp.timetuple())).strftime('%Y.%m.%d %H:%M:%S')) helper.exit()
from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper, ok, warning, critical, unknown import simplejson as json p = PluginHelper() p.parser.add_option( "--url", dest="url", default="http://db8.flightradar24.com/zones/northatlantic_all.js?callback=pd_callback&_=1373991753137", ) p.parse_arguments() html = requests.get(p.options.url).content html = html.replace("pd_callback(", "") html = html.replace(");", "") json_data = json.loads(html) flights = json_data.values() for i in flights: # print i # print "..." pass p.add_metric("total_airplanes", len(flights), warn="0..1") p.add_summary("%s airplanes are currently in the air above iceland" % (len(flights))) p.check_all_metrics() p.exit()
soup = BeautifulSoup(html) lev1 = soup.findAll('div', {'class': 'lev1'}) lev2 = soup.findAll('div', {'class': 'lev2'}) lev3 = soup.findAll('div', {'class': 'lev3'}) lev4 = soup.findAll('div', {'class': 'lev4'}) lev5 = soup.findAll('div', {'class': 'lev5'}) all_levels = (lev1, lev2, lev3, lev4, lev5) # First a little sanity check, if any of the above divs are not found # It means the layout of the site has changed so we exit with unknown for level in all_levels: if not level: p.add_summary( "Could not find a <div class=lev...> .. Layout of vedur.is must have changed" ) p.status(unknown) p.exit() p.add_metric("lev1", len(lev1) - 1) p.add_metric("lev2", len(lev2) - 1, warn="1..inf") p.add_metric("lev3", len(lev3) - 1, warn="1..inf") p.add_metric("lev4", len(lev4) - 1, crit="1..inf") p.add_metric("lev5", len(lev5) - 1, crit="1..inf") total_areas = sum(map(lambda x: len(x), all_levels)) p.add_summary("Avalance statistics successfully gathered for %s areas" % total_areas) p.status(ok) p.check_all_metrics() p.exit()
for w,v in zip(names, descriptions): print w + ' = ' + v helper.status(unknown) helper.exit(summary='This is just a list and not a check!') # verify that a hostname is set verify_host(host, helper) # open session after validated host sess = netsnmp.Session(Version=version, DestHost=host, Community=community) # verify, that status(/type) parameter is not empty if (status == None) or (status not in names): helper.status(unknown) helper.exit(summary='Argument -t is missing or false!') # snmp gets for all oids in type-list ind = names.index(status) value = get_data(sess, oids[ind],helper) if names.index(status) == 0: value = str(datetime.timedelta(seconds=int(value))) helper.exit(summary='Uptime = %s'%value) # metric compares helper.add_metric(label='type', value = value, uom =' '+units[ind]+' ') helper.check_all_metrics() # programm end helper.exit()
#!/usr/bin/python import os.path import sys pynagbase = os.path.abspath( os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path[0] = pynagbase # Standard init from pynag.Plugins import PluginHelper, ok # Create an instance of PluginHelper() my_plugin = PluginHelper() # Feed fake data for range checking my_plugin.parser.add_option('-F', dest='fakedata', help='fake data to test thresholds') # Activate my_plugin.parse_arguments() my_plugin.add_status(ok) my_plugin.add_summary(my_plugin.options.fakedata) my_plugin.add_metric('fakedata', my_plugin.options.fakedata) my_plugin.check_all_metrics() my_plugin.exit()
helper.status(ok) # shows the list of possible types if the flag is set if flag_list == True: for w,v in zip(names, descriptions): print w + ' = ' + v helper.status(unknown) helper.exit(summary='This is just a list and not a check!') # verify that a hostname is set verify_host(host, helper) # open session after validated host sess = netsnmp.Session(Version=version, DestHost=host, Community=community) # verify, that status(/type) parameter is not empty if (status == None) or (status not in names): helper.status(unknown) helper.exit(summary='Argument -t is missing or false!') # snmp gets for all oids in type-list ind = names.index(status) value = get_data(sess, basicoid+oid[ind],helper) # metric compares helper.add_metric(label='type', value = value, uom =' '+descriptions[ind]+' ') helper.check_all_metrics() # programm end helper.exit()
humidity = i[81:85].strip().strip('%') current_traffic = i[86:90].strip() total_traffic = i[90:].strip() if max_wind: max_winds.append(int(max_wind)) if average_wind: average_winds.append(int(average_wind)) if road_temperature: road_temperatures.append(int(road_temperature)) if air_temperature: air_temperatures.append(int(air_temperature)) if humidity: humidities.append(int(humidity)) if current_traffic: current_traffics.append(int(current_traffic)) if total_traffic: total_traffics.append(int(total_traffic)) p.add_metric('Average Wind Speed', value=np.mean(average_winds), uom='m_per_s') p.add_metric('Max Gust measured', value=max(max_winds), uom='m_per_s') p.add_metric('Air temperature', value=np.mean(air_temperatures), uom='celcius') p.add_metric('Road temperature', value=np.mean(road_temperatures), uom='celcius') p.add_metric('traffic today', value=sum(total_traffics), uom='c') p.add_metric('current traffic', value=sum(current_traffics), uom='cars') p.add_summary('Got metrics from %s weather stations' % (len(average_winds))) p.status(ok) p.exit()
# <div class="lev5"> <!-- Very high risk --> soup = BeautifulSoup(html) lev1 = soup.findAll('div', {'class':'lev1'}) lev2 = soup.findAll('div', {'class':'lev2'}) lev3 = soup.findAll('div', {'class':'lev3'}) lev4 = soup.findAll('div', {'class':'lev4'}) lev5 = soup.findAll('div', {'class':'lev5'}) all_levels = (lev1,lev2,lev3,lev4,lev5) # First a little sanity check, if any of the above divs are not found # It means the layout of the site has changed so we exit with unknown for level in all_levels: if not level: p.add_summary("Could not find a <div class=lev...> .. Layout of vedur.is must have changed") p.status(unknown) p.exit() p.add_metric("lev1", len(lev1)-1) p.add_metric("lev2", len(lev2)-1, warn="1..inf") p.add_metric("lev3", len(lev3)-1, warn="1..inf") p.add_metric("lev4", len(lev4)-1, crit="1..inf") p.add_metric("lev5", len(lev5)-1, crit="1..inf") total_areas = sum(map(lambda x: len(x), all_levels)) p.add_summary("Avalance statistics successfully gathered for %s areas" % total_areas) p.status(ok) p.check_all_metrics() p.exit()
chars = string.letters + string.digits randomstring = ''.join([random.choice(chars) for i in xrange(4)]) # avoid cache default_url = 'http://landspitali.is' p.parser.add_option('--url', dest='url', default=default_url) p.parse_arguments() p.check_all_metrics() p.show_legacy = True html = requests.get(p.options.url).content soup = BeautifulSoup(html) activitylist = soup.find('div', {'class': 'activityNumbers activityNumbersNew'}) activities = activitylist.findAll('div', recursive=False) p.add_metric('metrics_found', value=len(activities), warn='0..1') p.add_summary('%s metrics found on landspitali website' % (len(activities))) for i in activities: metric_name = i.get('class') metric_value = i.find('div', {'class': "todaysCount"}).text heading = i.find('div', {'class': 'heading'}) text = i.find('div', {'class': 'todaysText'}) # If string dag... is found, this is a counter for the whole day if 'dag...' in heading.text: uom = 'c' else: uom = '' p.add_metric(metric_name, metric_value, uom=uom) p.add_long_output("%s: %s %s %s" %
p.exit() soup = BeautifulSoup(html) serverOpen = soup.findAll('serveropen') onlinePlayers = soup.findAll('onlineplayers') if not serverOpen or not onlinePlayers: p.status(unknown) p.add_summary("Failed to get all metrics from EVE API") p.add_long_output("HTTP request returned:") p.add_long_output(html) p.exit() server_status = serverOpen[0].text num_players = onlinePlayers[0].text p.add_summary('Server open: %s' % (server_status)) if server_status != 'True': p.status(critical) p.add_metric(label='online players', value=num_players) p.status(ok) p.check_all_metrics() p.exit()
except Exception, e: helper.exit(summary="Could not read /proc/loadavg", long_output=str(e), exit_code=unknown, perfdata='') # We have read the contents of loadavg file. Lets put it in the summary of our plugin output: helper.add_summary("Load: %s" % content) # Read metrics from /proc/loadavg and add them as performance metrics load1,load5,load15,processes,last_proc_id = content.split() running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here # However we decide that there are no thresholds by default and they have to be # applied on runtime with the --threshold option helper.add_metric(label='load1',value=load1) helper.add_metric(label='load5',value=load5) helper.add_metric(label='load15',value=load15) helper.add_metric(label='running_processes',value=running) helper.add_metric(label='total_processes',value=total) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status: helper.status(ok) # Here all metrics will be checked against thresholds that are either # built-in or added via --threshold from the command-line helper.check_all_metrics() # Print out plugin information and exit nagios-style helper.exit()
current_traffic = i[86:90].strip() total_traffic = i[90:].strip() if max_wind: max_winds.append( int(max_wind) ) if average_wind: average_winds.append( int(average_wind) ) if road_temperature: road_temperatures.append( int(road_temperature)) if air_temperature: air_temperatures.append( int(air_temperature)) if humidity: humidities.append( int(humidity) ) if current_traffic: current_traffics.append( int(current_traffic)) if total_traffic: total_traffics.append( int(total_traffic) ) p.add_metric('Average Wind Speed', value=np.mean(average_winds),uom='m_per_s') p.add_metric('Max Gust measured', value=max(max_winds),uom='m_per_s') p.add_metric('Air temperature', value=np.mean(air_temperatures), uom='celcius') p.add_metric('Road temperature', value=np.mean(road_temperatures), uom='celcius') p.add_metric('traffic today', value=sum(total_traffics), uom='c') p.add_metric('current traffic', value=sum(current_traffics), uom='cars') p.add_summary('Got metrics from %s weather stations' % ( len(average_winds) )) p.status(ok) p.exit()
# shows the list of possible types if the flag is set if flag_list == True: for w, v in zip(names, descriptions): print w + ' = ' + v helper.status(unknown) helper.exit(summary='This is just a list and not a check!') # verify that a hostname is set verify_host(host, helper) # open session after validated host sess = netsnmp.Session(Version=version, DestHost=host, Community=community) # verify, that status(/type) parameter is not empty if (status == None) or (status not in names): helper.status(unknown) helper.exit(summary='Argument -t is missing or false!') # snmp gets for all oids in type-list ind = names.index(status) value = get_data(sess, basicoid + oid[ind], helper) # metric compares helper.add_metric(label='type', value=value, uom=' ' + descriptions[ind] + ' ') helper.check_all_metrics() # programm end helper.exit()
json = simplejson.loads(html) total_bars = len(json) open_bars = 0 now = datetime.datetime.now() current_day = now.weekday() current_hour = now.hour for i in json: fields = i['fields'] start = fields.get('happy_hour_start') end = fields.get('happy_hour_end') days = fields.get('happy_hour_days') # format the data a little bit start = int(start) end = int(end) days = days.split(',') days = map(lambda x: int(x), days) if current_day in days and start <= current_hour < end: open_bars += 1 p.add_metric('total bars', value=total_bars) p.add_metric('ongoing happy hours', value=open_bars) p.status(ok) p.add_summary('%s out of %s bars have an ongoing happy hour' % (open_bars, total_bars)) p.check_all_metrics() p.exit()