def main(): p = PluginHelper() # Warn on inactive level = 2 service_status = get_service_status(sys.argv[1]) if loaded(service_status)[0] is False: p.exit(3, "%s - %s" % (service_status['name'], loaded(service_status)[1]), "\n" + service_status['unparsed']) active = service_status['headers']['Active'][0] if active.startswith("inactive") or active.startswith('failed'): p.add_status(level) elif active.startswith("active"): p.add_status(0) else: p.add_status(3) p.add_summary("%s - %s" % ( service_status['name'], active)) p.add_long_output("\n" + service_status['unparsed']) p.exit()
class testPluginHelper(unittest.TestCase): def setUp(self): self.argv_store = sys.argv from pynag.Plugins import PluginHelper self.my_plugin = PluginHelper() self.my_plugin.parser.add_option('-F', dest='fakedata', help='fake data to test thresholds') sys.stdout = StringIO() def tearDown(self): sys.argv = self.argv_store sys.stdout = original_stdout def run_expect(self, case, value, expected_exit): sys.argv = [sys.argv[0]] + case.split() + ('-F %s' % value).split() self.my_plugin.parse_arguments() self.my_plugin.add_status(pynag.Plugins.ok) self.my_plugin.add_summary(self.my_plugin.options.fakedata) self.my_plugin.add_metric('fakedata', self.my_plugin.options.fakedata) try: self.my_plugin.check_all_metrics() self.my_plugin.exit() except SystemExit, e: self.assertEquals(type(e), type(SystemExit())) self.assertEquals(e.code, expected_exit) except Exception, e: self.fail('unexpected exception: %s' % e)
def main(): helper = PluginHelper() helper.parser.add_option('-w', help='warning free (X% or XM)', dest='warning') helper.parser.add_option('-c', help='critical free (X% or XM)', dest='critical') helper.parse_arguments() warn = helper.options.warning crit = helper.options.critical memory = getMemory() if helper.options.warning is not None: warn = helper.options.warning if re.match('.*%$', warn): warn = str(memory['total'] * int(re.search('\d*', warn).group(0)) / 100) else: warn = '0' if helper.options.critical is not None: crit = helper.options.critical if re.match('.*%$', crit): crit = str(memory['total'] * int(re.search('\d*', crit).group(0)) / 100) else: crit = '0' helper.status(ok) status = "OK" if memory['totalfree'] <= int(warn): helper.status(warning) status = "WARNING" if memory['totalfree'] <= int(crit): helper.status(critical) status = "CRITICAL" helper.add_summary(status + ': Memory free: %(totalfree)s %% (%(free)s %% including buffers/cached)' % {'totalfree': (round((float(memory['totalfree']) / float(memory['total']) * 100), 1 )), 'free': (round((float(memory['free']) / float(memory['total']) * 100), 1 ))}) helper.add_metric(label='total',value=memory['total']) helper.add_metric(label='free',value=memory['free']) helper.add_metric(label='totalfree',value=memory['totalfree'], warn=warn+'..0', crit=crit+'..0') helper.add_metric(label='used',value=memory['used']) helper.add_metric(label='buffers',value=memory['buffers']) helper.add_metric(label='cached',value=memory['cached']) helper.add_metric(label='swapcached',value=memory['swapcached']) helper.check_all_metrics() helper.exit()
def check_metric(self): """Check if the metric value is within the threshold range, and exits with status code, message and perfdata. """ # Get values metric_values = self._get_metric_values() unit = self._AZURE_METRICS_UNIT_SYMBOLS.get( self._metric_properties['unit']) if unit is None: unit = '' # Test if value to display if metric_values is None: message = 'No value available for metric {}'.format(self['metric']) if self['dimension'] is not None: message += ' and dimension {}'.format(self['dimension']) self.nagios_exit(Plugins.UNKNOWN, message) # PluginHelper of pynag import # https://pynag.readthedocs.io/en/latest/pynag.Plugins.html?highlight=check_threshold#pynag.Plugins.PluginHelper p = PluginHelper() # For each value, declare metric with according thresholds for metric_idx in metric_values: p.add_metric(label=metric_idx, value=metric_values[metric_idx], uom=unit, warn=self['warning'], crit=self['critical']) # Test all metrics according to there thresholds p.check_all_metrics() # Add global summary for output p.add_summary(self._metric_properties['name']['localizedValue']) # Exit and display plugin output p.exit()
def main(): p = PluginHelper() # Warn on inactive level = 2 service_status = get_service_status(sys.argv[1]) if loaded(service_status)[0] is False: p.exit(3, "%s - %s" % (service_status['name'], loaded(service_status)[1]), "\n" + service_status['unparsed']) active = service_status['headers']['Active'][0] if active.startswith("inactive") or active.startswith('failed'): p.add_status(level) elif active.startswith("active"): p.add_status(0) else: p.add_status(3) p.add_summary("%s - %s" % (service_status['name'], active)) p.add_long_output("\n" + service_status['unparsed']) p.exit()
"RF Switch 1": "iso.3.6.1.4.1.20712.2.1.3.1.2.11", "RF Switch 2": "iso.3.6.1.4.1.20712.2.1.3.1.2.12" } status = {"0": "No Fault", "1": "Fault", "2": "N/A", "3": "Pos1", "4": "Pos2"} if __name__ == "__main__": # verify that a hostname is set verify_host(host, helper) # The default return value should be always OK helper.status(ok) sess = netsnmp.Session(Version=version, DestHost=host, Community=community) # here we check the status for name in sorted(oids): # get the snmp values value = get_data(sess, oids[name], helper) helper.add_summary("%s: %s" % (name, status[value])) # if the value is 1 / Fault the status is set to critical if value == "1": helper.status(critical) # Print out plugin information and exit nagios-style helper.exit()
#!/usr/bin/env python import requests from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper,ok,warning,critical,unknown p = PluginHelper() p.parser.add_option('--url', dest='url', default='http://www.vedur.is') p.parse_arguments() html = requests.get(p.options.url).content soup = BeautifulSoup(html) warnings = soup.findAll('div', {'class':'warning'}) p.add_summary('%s warnings are being displayed on vedur.is' % len(warnings)) for i in warnings: p.status(warning) p.add_long_output( i.text ) p.status(ok) p.check_all_metrics() p.exit()
"2" : "Alarmed" } if __name__ == "__main__": # verify that a hostname is set verify_host(host, helper) # The default return value should be always OK helper.status(ok) sess = netsnmp.Session(Version=version, DestHost=host, Community=community) # get the values if unit == "1": value = get_data(sess, unit1_oid, helper) elif unit == "2": value = get_data(sess, unit2_oid, helper) else: helper.exit(summary="Wrong unit specified", exit_code=unknown, perfdata='') # add the summary helper.add_summary("Unit status is: %s" % (status[value])) if value == "2": helper.status(critical) # Print out plugin information and exit nagios-style helper.exit()
if len(services) == 0: # if there are no running services, print the message print "no service running at host" # we don't want to return a icinga output, so we just end the script here quit() else: ############# # Here we check the service ############# ## convert the service name to a oid service_oid = convert_in_oid(service) # get the data result = attempt_get_data(sess, service_oid) if not result or result == "NOSUCHOBJECT": service_status = "NOT RUNNING" helper.status(critical) else: service_status = "RUNNING" helper.status(ok) helper.add_summary("Status of Service '" + service + "' is: " + service_status) # Print out plugin information and exit nagios-style helper.exit()
# Now lets find those keyfigures, the content of textdata is dynamic so # some guesswork is required if 'Mannfj' in textdata: p.add_metric(label="mannfjoldi", value=numberdata) elif "Hagv" in textdata: p.add_metric(label="hagvoxtur", value=numberdata) elif "VLF" in textdata: p.add_metric("verg landsframleidsla", value=numberdata, uom="Mkr") elif "VNV" in textdata: p.add_metric(label="VNV", value=numberdata) elif "Launav" in textdata: p.add_metric(label="launavisitala", value=numberdata) elif "Bygg.v" in textdata: p.add_metric(label="byggingavisitala", value=numberdata) elif "sit. framl" in textdata: p.add_metric(label="visitala framleidsluverds", value=numberdata) elif "Fiskafli" in textdata: p.add_metric(label="fiskafli", value=numberdata, uom="tonn") elif "ruskipti" in textdata: p.add_metric(label="voruskipti", value=numberdata, uom="Mkr") summary = "%s metrics collected from hagstofan" % (len(p._perfdata.metrics)) p.add_summary(summary) p.status(ok) p.check_all_metrics() p.exit()
distance = columns[7].text.strip() direction = columns[8].text location = columns[9].text depth = depth.replace(',','.') scale = scale.replace(',','.') quality = quality.replace(',','.') latitude = latitude.replace(',','.') longitude = longitude.replace(',','.') distance = distance.replace(',','.') # manipulate location, well.. at least remove spaces location = location.replace(' ','_') datetimestr = str_date + " " + str_time.split(',',1)[0] timestamp = time.mktime( parse(datetimestr).timetuple() ) timestamp = int(timestamp) timesince = now-timestamp if timesince > 60*60: # Less than one hour since earthquake continue if row.find('ATHUGI') > 0: major_earthquakes += 1 recent_earthquakes += 1 helper.add_long_output("%s %s: scale=%s depth=%s quality=%s %s %s" % (str_date, str_time, scale, depth, quality, distance, location)) helper.add_summary('%s major earthquakes. %s total earthquakes' % (major_earthquakes, recent_earthquakes)) helper.add_metric('major earthquakes', value=major_earthquakes, crit='1..inf') helper.add_metric('recent earthquakes', value=recent_earthquakes, warn='3..inf') helper.check_all_metrics() helper.exit()
# Built on boilerplate form pynag: # https://github.com/pynag/pynag/wiki/Writing-Plugins-with-pynag.Plugins.PluginHelper # Example usage: # python pynag2.py -s WARNING --th metric=some-metrics,ok=0..5,warning=5..10,critical=10..inf #Modules from pynag.Plugins import PluginHelper, ok, warning, critical, unknown helper = PluginHelper() # Arguments helper.parser.add_option("-s", help="Exit State", dest="state", default='OK') helper.parse_arguments() if helper.options.state == "OK": helper.status(ok) elif helper.options.state == "WARNING": helper.status(warning) elif helper.options.state == "CRITICAL": helper.status(critical) elif helper.options.state == "UNKNOWN": helper.status(unknown) else: print "No state specified, calculating from input metrics." helper.add_metric(label='some-metrics', value=5) helper.add_summary("Some status message.") helper.check_all_metrics() helper.exit()
"1" : "Fault", "2" : "N/A", "3" : "Pos1", "4" : "Pos2" } if __name__ == "__main__": # verify that a hostname is set verify_host(host, helper) # The default return value should be always OK helper.status(ok) sess = netsnmp.Session(Version=version, DestHost=host, Community=community) # here we check the status for name in sorted(oids): # get the snmp values value = get_data(sess, oids[name], helper) helper.add_summary("%s: %s" % (name, status[value])) # if the value is 1 / Fault the status is set to critical if value == "1": helper.status(critical) # Print out plugin information and exit nagios-style helper.exit()
mib='/usr/share/mibs/site/NetSure-SCU-Plus' # Here starts our plugin specific logic. Lets try to read /proc/loadavg # And if it fails, we exit immediately with UNKNOWN status try: load(mib) except Exception, e: helper.exit(summary="Could not read MIB file.", long_output=str(e), exit_code=unknown, perfdata='') m=Manager(helper.options.host,helper.options.community,int(helper.options.version)) values={'systemCurrent':'','systemUsedCapacity':'','psBatteryVoltage':'','psBatteryCurrent':'','psInputLineAVoltage':''} formatstring='systemUsedCapacity: %s' content=m.systemUsedCapacity helper.add_summary(formatstring % content) for key in values: commandstring="m."+key values[key]=eval(commandstring) # print key,values[key] helper.add_metric(label=key,value=values[key]) #formatstring=helper.options.value+': %s' #commandstring="m."+helper.options.value #content=eval(commandstring) content='foo' #helper.add_summary(formatstring % content)
tn.write('{}\n'.format(word)) return tn.read_all() if __name__ == '__main__': plugin = PluginHelper() plugin.parser.add_option("-H","--hostname", help="Zookeeper's host", default='127.0.0.1') plugin.parser.add_option("-p","--port", help="Zookeeper's port", default='2181') plugin.parse_arguments() try: zk = ZkClient(plugin.options.hostname, plugin.options.port) except socket.error: plugin.status(critical) plugin.add_summary("Can't connect to {}:{}".format(plugin.options.hostname, plugin.options.port)) plugin.exit() try: if zk.cmd('ruok') != 'imok': plugin.status(critical) plugin.add_summary("Command 'ruok' failed") plugin.exit() except socket.error, socket.timeout: plugin.status(critical) plugin.add_summary("Can't connect to {}:{}".format(plugin.options.hostname, plugin.options.port)) plugin.exit() try: if zk.cmd('isro') != 'rw': plugin.status(critical)
except: helper.exit(summary="not able to read data - sensor not available", exit_code=unknown, perfdata='') if __name__ == "__main__": if component == "temp1": # read the data sensors_file = "/sys/class/thermal/thermal_zone0/temp" temperature = read_temperature(sensors_file) # Show the summary and add the metric and afterwards check the metric helper.add_summary("CPU temperature: %s C" % temperature) helper.add_metric(label='temp', value=temperature) elif "Core" in component: core = int(component.split(" ")[1]) sensors_file = ( "/sys/devices/platform/coretemp.0/hwmon/hwmon1/temp%s_input" % (core + 2)) temperature = read_temperature(sensors_file) # Show the summary and add the metric and afterwards check the metric helper.add_summary("Core %s temperature: %s C" % (core, temperature)) helper.add_metric(label='temp', value=temperature) helper.check_all_metrics()
p = PluginHelper() chars = string.letters + string.digits randomstring= ''.join([random.choice(chars) for i in xrange(4)]) # avoid cache default_url = 'http://landspitali.is' p.parser.add_option('--url', dest='url', default=default_url) p.parse_arguments() p.check_all_metrics() p.show_legacy = True html = requests.get(p.options.url).content soup = BeautifulSoup(html) activitylist = soup.find('div', {'class':'activityNumbers activityNumbersNew'}) activities = activitylist.findAll('div', recursive=False) p.add_metric('metrics_found', value=len(activities), warn='0..1') p.add_summary('%s metrics found on landspitali website' % (len(activities))) for i in activities: metric_name = i.get('class') metric_value = i.find('div', {'class': "todaysCount"}).text heading = i.find('div', {'class': 'heading'}) text = i.find('div', {'class': 'todaysText'}) # If string dag... is found, this is a counter for the whole day if 'dag...' in heading.text: uom = 'c' else: uom = '' p.add_metric(metric_name, metric_value, uom=uom) p.add_long_output("%s: %s %s %s" % (metric_name, heading.text, metric_value, text.text))
# Get a list of northatlantic airplanes via flightradar # import requests from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper, ok, warning, critical, unknown import simplejson as json p = PluginHelper() p.parser.add_option( '--url', dest='url', default= 'http://db8.flightradar24.com/zones/northatlantic_all.js?callback=pd_callback&_=1373991753137' ) p.parse_arguments() html = requests.get(p.options.url).content html = html.replace('pd_callback(', '') html = html.replace(");", '') json_data = json.loads(html) flights = json_data.values() p.add_metric('total_airplanes', len(flights), warn="0..1") p.add_summary('%s airplanes are currently in the air above iceland' % (len(flights))) p.check_all_metrics() p.exit()
check_lsmdiskgrp() elif query == 'lsarray': check_lsarray() elif query == 'lsdrive': check_lsdrive() elif query == 'lsvdisk': check_lsvdisk() elif query == 'lsmgrp': check_lsmgrp() elif query == 'lsenclosure': check_lsenclosure() elif query == 'lsenclosurebattery': check_lsenclosurebattery() elif query == 'lsenclosurecanister': check_lsenclosurecanister() elif query == 'lsenclosurepsu': check_lsenclosurepsu() elif query == 'lsrcrelationship': check_lsrcrelationship() elif query == 'lsenclosureslot': check_lsenclosureslot() else: p.status(unknown) p.add_summary("unsupported query: %s. See -L for list of valid queries" % query) p.exit() # Check metrics and exit p.check_all_metrics() p.exit()
reload(sys) sys.setdefaultencoding('utf-8') helper = PluginHelper() helper.parse_arguments() now = time.time() url = 'http://www.einkamal.is' html = requests.get(url).content soup = BeautifulSoup(html) tables = soup.find('div', {'class':'welcomemsg'}) p = tables.findAll('p') li = soup.find('li',{'class':'accounts'}) active_accounts = li.find('b').text active_accounts = active_accounts.replace('.','') li = soup.find('li',{'class':'active'}) logged_in = li.find('b').text logged_in = logged_in.replace('.','') helper.add_metric('active users', active_accounts) helper.add_metric('logged in users', logged_in) helper.status(ok) helper.add_summary("%s logged in users. %s active accounts" % (logged_in,active_accounts)) helper.exit()
#!/usr/bin/env python import requests from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper, ok, warning, critical, unknown import simplejson as json p = PluginHelper() p.parser.add_option('--url', dest='url', default='http://apis.is/bus/realtime') p.parse_arguments() html = requests.get(p.options.url).content json = json.loads(html) buses_running = len(json['results']) p.add_metric('buses running', buses_running) soup = BeautifulSoup(html) warnings = soup.findAll('div', {'class': 'warning'}) p.add_summary('%s buses are currently running' % (buses_running)) for i in warnings: p.status(warning) p.add_long_output(i.text) p.check_all_metrics() p.exit()
#Claculate UTC-time from local-time if remote_time_utc_dir == '+': remote_timestamp -= datetime.timedelta(hours=remote_time_hours_offset, minutes=remote_time_minutes_offset) elif remote_time_utc_dir == '-': remote_timestamp += datetime.timedelta(hours=remote_time_hours_offset, minutes=remote_time_minutes_offset) try: # Windows will return the local time (not UTC), so we need to use the local time to compare # Force this this if '-l' or '--localtime' is set in commandline if windows or use_local : local_timestamp = datetime.datetime.now() time_type = 'Remote (Local)' else: # usually the we need the UTC time local_timestamp = datetime.datetime.utcnow() time_type = 'Remote (UTC)' #Calculate the offset between local and remote time offset = time.mktime(local_timestamp.timetuple()) - time.mktime(remote_timestamp.timetuple()) + 60 * o_tzoff helper.add_metric(label = 'offset', value = offset, uom = 's') helper.check_all_metrics() except IndexError: helper.exit(summary = 'remote device does not return a time value', exit_code = unknown, perfdata = '') #Print out plugin information and exit nagios-style helper.add_summary('%s: ' % (time_type) + datetime.datetime.fromtimestamp(time.mktime(remote_timestamp.timetuple())).strftime('%H:%M:%S') + '. Offset = %d s' % offset) helper.add_long_output('%s: ' % (time_type) + datetime.datetime.fromtimestamp(time.mktime(remote_timestamp.timetuple())).strftime('%Y.%m.%d %H:%M:%S')) helper.exit()
sess = netsnmp.Session(Version=version, DestHost=host, Community=community) # if no port is set, we will do a scan if port == "" or port is None: scan = True else: check_port(helper, port) check_typ(helper, typ) # The default return value should be always OK helper.status(ok) ############# # Check UDP ############# if typ == "udp": helper.add_summary(check_udp(helper, host, port, sess)) # ############ # Check TCP # ############ if typ == "tcp": helper.add_summary( check_tcp(helper, host, port, warning_param, critical_param, sess)) # Print out plugin information and exit nagios-style helper.exit()
# Optionally, let helper handle command-line arguments for us for example --threshold # Note: If your plugin needs any commandline arguments on its own (like --hostname) you should add them # before this step with helper.parser.add_option() helper.parse_arguments() # Here starts our plugin specific logic. Lets try to read /proc/loadavg # And if it fails, we exit immediately with UNKNOWN status try: content = open('/proc/loadavg').read() except Exception as e: helper.exit(summary="Could not read /proc/loadavg", long_output=str(e), exit_code=unknown, perfdata='') # We have read the contents of loadavg file. Lets put it in the summary of our plugin output: helper.add_summary("Load: %s" % content) # Read metrics from /proc/loadavg and add them as performance metrics load1,load5,load15,processes,last_proc_id = content.split() running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here # However we decide that there are no thresholds by default and they have to be # applied on runtime with the --threshold option helper.add_metric(label='load1',value=load1) helper.add_metric(label='load5',value=load5) helper.add_metric(label='load15',value=load15) helper.add_metric(label='running_processes',value=running) helper.add_metric(label='total_processes',value=total)
# <div class="lev5"> <!-- Very high risk --> soup = BeautifulSoup(html) lev1 = soup.findAll('div', {'class': 'lev1'}) lev2 = soup.findAll('div', {'class': 'lev2'}) lev3 = soup.findAll('div', {'class': 'lev3'}) lev4 = soup.findAll('div', {'class': 'lev4'}) lev5 = soup.findAll('div', {'class': 'lev5'}) all_levels = (lev1, lev2, lev3, lev4, lev5) # First a little sanity check, if any of the above divs are not found # It means the layout of the site has changed so we exit with unknown for level in all_levels: if not level: p.add_summary( "Could not find a <div class=lev...> .. Layout of vedur.is must have changed" ) p.status(unknown) p.exit() p.add_metric("lev1", len(lev1) - 1) p.add_metric("lev2", len(lev2) - 1, warn="1..inf") p.add_metric("lev3", len(lev3) - 1, warn="1..inf") p.add_metric("lev4", len(lev4) - 1, crit="1..inf") p.add_metric("lev5", len(lev5) - 1, crit="1..inf") total_areas = sum(map(lambda x: len(x), all_levels)) p.add_summary("Avalance statistics successfully gathered for %s areas" % total_areas) p.status(ok) p.check_all_metrics()
#!/usr/bin/python import os.path import sys pynagbase = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path[0] = pynagbase # Standard init from pynag.Plugins import PluginHelper,ok # Create an instance of PluginHelper() my_plugin = PluginHelper() # Feed fake data for range checking my_plugin.parser.add_option('-F', dest='fakedata', help='fake data to test thresholds') # Activate my_plugin.parse_arguments() my_plugin.add_status(ok) my_plugin.add_summary(my_plugin.options.fakedata) my_plugin.add_metric('fakedata', my_plugin.options.fakedata) my_plugin.check_all_metrics() my_plugin.exit()
relpath = normalizeRelPath(helper.options.relpath) url = "{0}://{1}:{2}/{3}".format(prefix, helper.options.host, helper.options.port, relpath) method = getattr(requests, helper.options.method.lower()) response = method(url, auth=auth, cert=cert) status_code = response.status_code status_check = response.text.strip() # status_code = 200 # status_check = "WARNING - sprocket count ok | count=12 | latency=1s" if status_code > 300: helper.status(unknown) helper.add_summary( "Error making request; status code: {0}".format(status_code)) else: status = status_check[:status_check.find("-")].strip() message = status_check[status_check.find("-") + 1:].strip() helper.add_summary(message) if status.startswith("OK"): helper.status(ok) elif status.startswith("WARNING"): helper.status(warning) elif status.startswith("CRITICAL"): helper.status(critical) else:
# Split each parameter into a dict results = dict(re.split(':\s*', line) for line in content.split('\n')) results['OpenSlots']= results['Scoreboard'].count('.') results['ResponseTime']="{0:.4f}".format(time.time() - start) # Catch any Errors except urllib2.HTTPError, e: my_plugin.exit(summary="Cannot retrieve URL: HTTP Error Code %s" % e.code, long_output=str(e), exit_code=unknown) except urllib2.URLError, e: my_plugin.exit(summary="Cannot retrieve URL: Perhaps a bad protocol (ssl not supported)?" , long_output=str(e), exit_code=unknown) except Exception, e: my_plugin.exit(summary="Something horrible happened:", long_output=str(e), exit_code=unknown, perfdata='') # Lets Parse the data: my_plugin.add_summary( "%s seconds response time" % results['ResponseTime']) # and add metrics: my_plugin.add_metric( label='Total Accesses', value=results['Total Accesses'], uom='c', ) my_plugin.add_metric( label='Total kBytes', value=results['Total kBytes'], uom='kb', ) my_plugin.add_metric( label='CPULoad', value=float(results['CPULoad'])*100, uom='%', ) my_plugin.add_metric( label='Uptime', value=results['Uptime'], uom='c', ) my_plugin.add_metric( label='ReqPerSec', value=results['ReqPerSec'], ) my_plugin.add_metric( label='BytesPerSec', value=results['BytesPerSec'], uom='b', ) my_plugin.add_metric( label='BytesPerReq', value=results['BytesPerReq'], uom='b', ) my_plugin.add_metric( label='BusyWorkers', value=results['BusyWorkers'], ) my_plugin.add_metric( label='IdleWorkers', value=results['IdleWorkers'], ) my_plugin.add_metric( label='ResponseTime', value=results['ResponseTime'], uom='s',warn=my_plugin.options.warning, crit=my_plugin.options.critical ) my_plugin.add_metric( label='Open slots', value=results['OpenSlots'] ) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status:
from pynag.Plugins import PluginHelper, ok, warning, critical, unknown import requests import cStringIO url = 'http://hraun.vedur.is/ja/eldgos/volcano_status.png' p = PluginHelper() p.parse_arguments() p.show_legacy = True tmp = requests.get(url) image_file = cStringIO.StringIO(tmp.content) image = Image.open(image_file) p.add_summary("Volcano data last updated: %s." % (tmp.headers['last-modified'])) width = image.size[0] height = image.size[1] colormap = defaultdict(int) for y in range(0, width): for x in range(0, height): xy = (x, y) rgb = image.getpixel(xy) colormap[rgb] += 1 pixels_per_triangle = 251 # How many pixels are in an triangle grey = (110, 110, 110) green = (0, 255, 0)
oid_inlet_critical_upper = '.1.3.6.1.4.1.13742.6.3.3.4.1.23' # critical_upper_threhold (must be divided by the digit) oid_inlet_warning_lower = '.1.3.6.1.4.1.13742.6.3.3.4.1.22' oid_inlet_critical_lower = '.1.3.6.1.4.1.13742.6.3.3.4.1.21' # walk the data inlet_values = walk_data(host, version, community, oid_inlet_value) inlet_units = walk_data(host, version, community, oid_inlet_unit) inlet_digits = walk_data(host, version, community, oid_inlet_digits) inlet_states = walk_data(host, version, community, oid_inlet_state) inlet_warning_uppers = walk_data(host, version, community, oid_inlet_warning_upper) inlet_critical_uppers = walk_data(host, version, community, oid_inlet_critical_upper) inlet_critical_lowers = walk_data(host, version, community, oid_inlet_critical_lower) inlet_warning_lowers = walk_data(host, version, community, oid_inlet_warning_lower) # just print the summary, that the inlet sensors are checked helper.add_summary("Inlet") # all list must have the same length, if not something went wrong. that makes it easier and we need less loops # translate the data in human readable units with help of the dicts for x in range(len(inlet_values)): inlet_sensor = "" # sensors[int(inlet_sensors[x])] inlet_unit = units[int(inlet_units[x])] inlet_digit = inlet_digits[x] inlet_state = states[int(inlet_states[x])] inlet_value = real_value(inlet_values[x], inlet_digit) inlet_warning_upper = real_value(inlet_warning_uppers[x], inlet_digit) inlet_critical_upper = real_value(inlet_critical_uppers[x], inlet_digit) inlet_warning_lower = real_value(inlet_warning_lowers[x], inlet_digit) inlet_critical_lower = real_value(inlet_critical_lowers[x], inlet_digit) if inlet_state == "belowLowerCritical" or inlet_state == "aboveUpperCritical":
#!/usr/bin/env python import requests from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper,ok,warning,critical,unknown import simplejson as json p = PluginHelper() p.parser.add_option('--url', dest='url', default='http://api.gulur.is/buses/?geo=true&latitude=64.1251991&longitude=-21.8108419&accuracy=20&range=restOfDay&radius=750000') p.parse_arguments() html = requests.get(p.options.url).content json_data = json.loads(html) buses_running = len(json_data) p.add_metric('buses running', buses_running) p.add_summary('%s buses are currently running' % (buses_running)) print json.dumps(json_data[0], indent=4) p.check_all_metrics() p.exit()
verify_host(host, helper) # verify that seclevel is correctly used, otherwise there will be an exception verify_seclevel(seclevel, helper) sess = netsnmp.Session(Version=version, DestHost=host, SecLevel=seclevel, SecName=secname, AuthProto=authproto, AuthPass=authpass, PrivProto=privproto, PrivPass=privpass, Community=community) # If the --scan option is set, we show all components and end the script if scan: scan_ilo() # Show always the product name and the serial number in the summary product_name = get_data(sess, oid_product_name, helper) serial_number = get_data(sess, oid_serial_numb, helper) helper.add_summary('%s - Serial number:%s' % (product_name, serial_number)) # Verify that there is an input for the amount of components if input_phy_drv == '' or input_phy_drv is None: helper.exit(summary="Amount of physical drives must be specified (--drives)", exit_code=unknown, perfdata='') if input_pwr_sply == '' or input_pwr_sply is None: helper.exit(summary="Amount of power supplies must be specified (--ps)", exit_code=unknown, perfdata='') if input_fan == '' or input_fan is None: helper.exit(summary="Amount of fans must be specified (--fan)", exit_code=unknown, perfdata='') # Check the global status check_global_status(storage_flag, 'Global storage', oid_storage) check_global_status(system_flag,'Global system',oid_system) check_global_status(power_supply_flag,'Global power supply',oid_glob_power_supply) check_global_status(temp_flag,'Overall thermal environment',oid_glob_temp) check_global_status(temp_sens_flag,'Temperature sensors',oid_glob_temp_sens)
scale = scale.replace(',', '.') quality = quality.replace(',', '.') latitude = latitude.replace(',', '.') longitude = longitude.replace(',', '.') distance = distance.replace(',', '.') # manipulate location, well.. at least remove spaces location = location.replace(' ', '_') datetimestr = str_date + " " + str_time.split(',', 1)[0] timestamp = time.mktime(parse(datetimestr).timetuple()) timestamp = int(timestamp) timesince = now - timestamp if timesince > 60 * 60: # Less than one hour since earthquake continue if row.find('ATHUGI') > 0: major_earthquakes += 1 recent_earthquakes += 1 helper.add_long_output( "%s %s: scale=%s depth=%s quality=%s %s %s" % (str_date, str_time, scale, depth, quality, distance, location)) helper.add_summary('%s major earthquakes. %s total earthquakes' % (major_earthquakes, recent_earthquakes)) helper.add_metric('major earthquakes', value=major_earthquakes, crit='1..inf') helper.add_metric('recent earthquakes', value=recent_earthquakes, warn='3..inf') helper.check_all_metrics() helper.exit()
p.add_long_output(output) # Now lets find those keyfigures, the content of textdata is dynamic so # some guesswork is required if 'Mannfj' in textdata: p.add_metric(label="mannfjoldi", value=numberdata) elif "Hagv" in textdata: p.add_metric(label="hagvoxtur", value=numberdata) elif "VLF" in textdata: p.add_metric("verg landsframleidsla", value=numberdata, uom="Mkr") elif "VNV" in textdata: p.add_metric(label="VNV", value=numberdata) elif "Launav" in textdata: p.add_metric(label="launavisitala", value=numberdata) elif "Bygg.v" in textdata: p.add_metric(label="byggingavisitala", value=numberdata) elif "sit. framl" in textdata: p.add_metric(label="visitala framleidsluverds", value=numberdata) elif "Fiskafli" in textdata: p.add_metric(label="fiskafli", value=numberdata, uom="tonn") elif "ruskipti" in textdata: p.add_metric(label="voruskipti", value=numberdata, uom="Mkr") summary = "%s metrics collected from hagstofan" % (len(p._perfdata.metrics)) p.add_summary(summary) p.status(ok) p.check_all_metrics() p.exit()
password = helper.options.password if helper.options.age: age = float(helper.options.age) else: age = None shutdown = helper.options.shutdown disconnect = helper.options.disconnect pwf = helper.options.pwf if pwf: with open(pwf, 'r') as myfile: password = myfile.read().replace('\n', '') # The default return value should be always OK helper.status(ok) helper.add_summary("%s" % host) if __name__ == "__main__": if age: helper.add_summary("job queue:") # query the data from the Jenkins JSON API url = "http://%s/queue/api/json?pretty=true:%s" % (host, port) request = urllib2.Request(url) base64string = base64.b64encode('%s:%s' % (user, password)) request.add_header("Authorization", "Basic %s" % base64string) response = urllib2.urlopen(request) data = json.loads(response.read()) # check every job in the build queue
helper.parser.error('-p argument is required') if helper.options.show_debug: logging.basicConfig() logging.getLogger().setLevel(logging.DEBUG) else: logging.disable(logging.ERROR) try: response = requests.get(url, auth=(username, password), verify=False, timeout=20) except requests.exceptions.Timeout as e: logging.debug(e, exc_info=1) helper.add_summary('Could not establish connection') helper.add_long_output(str(e)) helper.status(critical) except requests.exceptions.ConnectionError as e: logging.debug(e, exc_info=1) helper.add_summary('Connection error') helper.add_long_output('Connection error' + str(e)) helper.status(critical) except requests.exceptions.HTTPError as e: logging.debug(e, exc_info=1) helper.add_summary('HTTP error') helper.add_long_output(str(e)) helper.status(critical) except requests.exceptions.RequestException as e:
# The default return value should be always OK helper.status(ok) # verify that a hostname is set verify_host(host, helper) sess = netsnmp.Session(Version=version, DestHost=host, Community=community) # If the --scan option is set, we show all components and end the script if scan: scan_ilo() # Show always the product name and the serial number in the summary product_name = get_data(sess, oid_product_name, helper) serial_number = get_data(sess, oid_serial_numb, helper) helper.add_summary('%s - Serial number:%s' % (product_name, serial_number)) # Verify that there is an input for the amount of components if input_phy_drv == '' or input_phy_drv is None: helper.exit( summary="Amount of physical drives must be specified (--drives)", exit_code=unknown, perfdata='') if input_pwr_sply == '' or input_pwr_sply is None: helper.exit( summary="Amount of power supplies must be specified (--ps)", exit_code=unknown, perfdata='') if input_fan == '' or input_fan is None: helper.exit(summary="Amount of fans must be specified (--fan)", exit_code=unknown,
# <div class="lev4"> <!-- High risk --> # <div class="lev5"> <!-- Very high risk --> soup = BeautifulSoup(html) lev1 = soup.findAll('div', {'class':'lev1'}) lev2 = soup.findAll('div', {'class':'lev2'}) lev3 = soup.findAll('div', {'class':'lev3'}) lev4 = soup.findAll('div', {'class':'lev4'}) lev5 = soup.findAll('div', {'class':'lev5'}) all_levels = (lev1,lev2,lev3,lev4,lev5) # First a little sanity check, if any of the above divs are not found # It means the layout of the site has changed so we exit with unknown for level in all_levels: if not level: p.add_summary("Could not find a <div class=lev...> .. Layout of vedur.is must have changed") p.status(unknown) p.exit() p.add_metric("lev1", len(lev1)-1) p.add_metric("lev2", len(lev2)-1, warn="1..inf") p.add_metric("lev3", len(lev3)-1, warn="1..inf") p.add_metric("lev4", len(lev4)-1, crit="1..inf") p.add_metric("lev5", len(lev5)-1, crit="1..inf") total_areas = sum(map(lambda x: len(x), all_levels)) p.add_summary("Avalance statistics successfully gathered for %s areas" % total_areas) p.status(ok) p.check_all_metrics() p.exit()
if __name__ == '__main__': # verify that a hostname is set verify_host(host, helper) # The default return value should be always OK helper.status(ok) sess = netsnmp.Session(Version=version, DestHost=host, Community=community) user_assigned_name_data = get_data(sess, oid_user_assigned_name, helper) product_type_data = get_data(sess, oid_product_type, helper) service_tag_data = get_data(sess, oid_service_tag, helper) helper.add_summary( 'User assigned name: %s - Typ: %s - Service tag: %s' % (user_assigned_name_data, product_type_data, service_tag_data)) global_system_data = get_data(sess, oid_global_system, helper) system_lcd_data = get_data(sess, oid_system_lcd, helper) global_storage_data = get_data(sess, oid_global_storage, helper) system_power_data = get_data(sess, oid_system_power, helper) global_system_summary, global_system_long = state_summary( global_system_data, 'Global System', normal_state, helper) system_lcd_summary, system_lcd_long = state_summary( system_lcd_data, 'System LCD', normal_state, helper) global_storage_summary, global_storage_long = state_summary( global_storage_data, 'Global Storage', normal_state, helper) system_power_summary, system_power_long = state_summary( system_power_data, 'System Power', system_power_state, helper, 'on')
from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper,ok,warning,critical,unknown p = PluginHelper() default_url = 'https://api.eveonline.com/server/ServerStatus.xml.aspx/' p.parser.add_option('--url', dest='url', default=default_url) p.parse_arguments() p.show_legacy = True try: html = requests.get(p.options.url).content except Exception, e: p.status(unknown) p.add_summary("%s error encountered while trying to connect to EVE api: %s" % (type(e), e)) p.exit() soup = BeautifulSoup(html) serverOpen = soup.findAll('serveropen') onlinePlayers = soup.findAll('onlineplayers') if not serverOpen or not onlinePlayers: p.status(unknown) p.add_summary("Failed to get all metrics from EVE API") p.add_long_output("HTTP request returned:") p.add_long_output(html) p.exit()
from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper, ok, warning, critical, unknown import simplejson as json p = PluginHelper() p.parser.add_option( "--url", dest="url", default="http://db8.flightradar24.com/zones/northatlantic_all.js?callback=pd_callback&_=1373991753137", ) p.parse_arguments() html = requests.get(p.options.url).content html = html.replace("pd_callback(", "") html = html.replace(");", "") json_data = json.loads(html) flights = json_data.values() for i in flights: # print i # print "..." pass p.add_metric("total_airplanes", len(flights), warn="0..1") p.add_summary("%s airplanes are currently in the air above iceland" % (len(flights))) p.check_all_metrics() p.exit()
# Optionally, let helper handle command-line arguments for us for example --threshold # Note: If your plugin needs any commandline arguments on its own (like --hostname) you should add them # before this step with helper.parser.add_option() helper.parse_arguments() # Here starts our plugin specific logic. Lets try to read /proc/loadavg # And if it fails, we exit immediately with UNKNOWN status try: content = open('/proc/loadavg').read() except Exception, e: helper.exit(summary="Could not read /proc/loadavg", long_output=str(e), exit_code=unknown, perfdata='') # We have read the contents of loadavg file. Lets put it in the summary of our plugin output: helper.add_summary("Load: %s" % content) # Read metrics from /proc/loadavg and add them as performance metrics load1,load5,load15,processes,last_proc_id = content.split() running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here # However we decide that there are no thresholds by default and they have to be # applied on runtime with the --threshold option helper.add_metric(label='load1',value=load1) helper.add_metric(label='load5',value=load5) helper.add_metric(label='load15',value=load15) helper.add_metric(label='running_processes',value=running) helper.add_metric(label='total_processes',value=total)
import string import sys reload(sys) sys.setdefaultencoding('utf-8') from BeautifulSoup import BeautifulSoup from pynag.Plugins import PluginHelper,ok,warning,critical,unknown p = PluginHelper() default_url = 'http://www.isanicelandicvolcanoerupting.com' p.parser.add_option('--url', dest='url', default=default_url) p.parse_arguments() p.show_legacy = True html = requests.get(p.options.url).content soup = BeautifulSoup(html) answer = soup.find('h3').text p.add_summary('Source says: "%s"' % answer) if 'yes' in answer.lower(): p.status(warning) elif 'no' in answer.lower(): p.status(ok) else: p.status(unknown) p.check_all_metrics() p.exit()
current_traffic = i[86:90].strip() total_traffic = i[90:].strip() if max_wind: max_winds.append( int(max_wind) ) if average_wind: average_winds.append( int(average_wind) ) if road_temperature: road_temperatures.append( int(road_temperature)) if air_temperature: air_temperatures.append( int(air_temperature)) if humidity: humidities.append( int(humidity) ) if current_traffic: current_traffics.append( int(current_traffic)) if total_traffic: total_traffics.append( int(total_traffic) ) p.add_metric('Average Wind Speed', value=np.mean(average_winds),uom='m_per_s') p.add_metric('Max Gust measured', value=max(max_winds),uom='m_per_s') p.add_metric('Air temperature', value=np.mean(air_temperatures), uom='celcius') p.add_metric('Road temperature', value=np.mean(road_temperatures), uom='celcius') p.add_metric('traffic today', value=sum(total_traffics), uom='c') p.add_metric('current traffic', value=sum(current_traffics), uom='cars') p.add_summary('Got metrics from %s weather stations' % ( len(average_winds) )) p.status(ok) p.exit()
from pynag.Plugins import PluginHelper, ok, unknown p = PluginHelper() default_url = 'http://appyhour.herokuapp.com/iceland/' p.parser.add_option('--url', dest='url', default=default_url) p.parse_arguments() p.show_legacy = True try: html = requests.get(p.options.url).content except Exception, e: p.status(unknown) p.add_summary("%s error encountered while trying to connect to api hour api: %s" % (type(e), e)) p.exit() json = simplejson.loads(html) total_bars = len(json) open_bars = 0 now = datetime.datetime.now() current_day = now.weekday() current_hour = now.hour for i in json: fields = i['fields'] start = fields.get('happy_hour_start') end = fields.get('happy_hour_end') days = fields.get('happy_hour_days')
# And if it fails, we exit immediately with UNKNOWN status try: load(helper.options.mib) except Exception, e: helper.exit(summary="Could not read MIB file.", long_output=str(e), exit_code=unknown, perfdata='') m = Manager(helper.options.host, helper.options.community, int(helper.options.version)) formatstring = helper.options.value + ': %s' commandstring = "m." + helper.options.value content = eval(commandstring) helper.add_summary(formatstring % content) # Read metrics from /proc/loadavg and add them as performance metrics #load1,load5,load15,processes,last_proc_id = content.split() #running,total = processes.split('/') # If we so desire we can set default thresholds by adding warn attribute here # However we decide that there are no thresholds by default and they have to be # applied on runtime with the --threshold option helper.add_metric(label=helper.options.value, value=content) #helper.add_metric(label='load5',value=load5) #helper.add_metric(label='load15',value=load15) #helper.add_metric(label='running_processes',value=running) #helper.add_metric(label='total_processes',value=total) # By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status:
humidity = i[81:85].strip().strip('%') current_traffic = i[86:90].strip() total_traffic = i[90:].strip() if max_wind: max_winds.append(int(max_wind)) if average_wind: average_winds.append(int(average_wind)) if road_temperature: road_temperatures.append(int(road_temperature)) if air_temperature: air_temperatures.append(int(air_temperature)) if humidity: humidities.append(int(humidity)) if current_traffic: current_traffics.append(int(current_traffic)) if total_traffic: total_traffics.append(int(total_traffic)) p.add_metric('Average Wind Speed', value=np.mean(average_winds), uom='m_per_s') p.add_metric('Max Gust measured', value=max(max_winds), uom='m_per_s') p.add_metric('Air temperature', value=np.mean(air_temperatures), uom='celcius') p.add_metric('Road temperature', value=np.mean(road_temperatures), uom='celcius') p.add_metric('traffic today', value=sum(total_traffics), uom='c') p.add_metric('current traffic', value=sum(current_traffics), uom='cars') p.add_summary('Got metrics from %s weather stations' % (len(average_winds))) p.status(ok) p.exit()
remote_timestamp += datetime.timedelta(hours=remote_time_hours_offset, minutes=remote_time_minutes_offset) try: # Windows will return the local time (not UTC), so we need to use the local time to compare # Force this this if '-l' or '--localtime' is set in commandline if windows or use_local: local_timestamp = datetime.datetime.now() time_type = 'Remote (Local)' else: # usually the we need the UTC time local_timestamp = datetime.datetime.utcnow() time_type = 'Remote (UTC)' # Calculate the offset between local and remote time offset = time.mktime(local_timestamp.timetuple()) - time.mktime(remote_timestamp.timetuple()) + 60 * o_tzoff helper.add_metric(label='offset', value=offset, uom='s') helper.check_all_metrics() except IndexError: helper.exit(summary='remote device does not return a time value', exit_code=unknown, perfdata='') # Print out plugin information and exit nagios-style helper.add_summary( '%s: ' % (time_type) + datetime.datetime.fromtimestamp(time.mktime(remote_timestamp.timetuple())).strftime( '%H:%M:%S') + '. Offset = %d s' % offset) helper.add_long_output( '%s: ' % (time_type) + datetime.datetime.fromtimestamp(time.mktime(remote_timestamp.timetuple())).strftime( '%Y.%m.%d %H:%M:%S')) helper.exit()
#!/usr/bin/python import os.path import sys pynagbase = os.path.abspath( os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path[0] = pynagbase # Standard init from pynag.Plugins import PluginHelper, ok # Create an instance of PluginHelper() my_plugin = PluginHelper() # Feed fake data for range checking my_plugin.parser.add_option('-F', dest='fakedata', help='fake data to test thresholds') # Activate my_plugin.parse_arguments() my_plugin.add_status(ok) my_plugin.add_summary(my_plugin.options.fakedata) my_plugin.add_metric('fakedata', my_plugin.options.fakedata) my_plugin.check_all_metrics() my_plugin.exit()
device = netmiko.ConnectHandler(**conf) result = ping( device=device, destination=helper.options.destination, source=helper.options.source or None, #ttl=helper.options.ttl, timeout=helper.options.probe_timeout, size=helper.options.size, count=helper.options.count, debug=helper.options.show_debug) if helper.options.show_debug: print(result) if 'error' in result: helper.status(critical) helper.add_summary('%s: unable to ping' % helper.options.destination) helper.add_long_output(result['error']) elif 'success' in result: success = result['success'] helper.status(ok) helper.add_summary('%s: rta %.1fms, pl %d%%' % (helper.options.destination, success['rtt_avg'], success['packet_loss'])) helper.add_metric('pl', success['packet_loss'], uom='%') helper.add_metric('rta', success['rtt_avg'], uom='ms') else: helper.status(unknown) helper.add_summary('Unrecognized result from ping function') helper.add_long_output(str(result)) helper.check_all_metrics()
chars = string.letters + string.digits randomstring = ''.join([random.choice(chars) for i in xrange(4)]) # avoid cache default_url = 'http://landspitali.is' p.parser.add_option('--url', dest='url', default=default_url) p.parse_arguments() p.check_all_metrics() p.show_legacy = True html = requests.get(p.options.url).content soup = BeautifulSoup(html) activitylist = soup.find('div', {'class': 'activityNumbers activityNumbersNew'}) activities = activitylist.findAll('div', recursive=False) p.add_metric('metrics_found', value=len(activities), warn='0..1') p.add_summary('%s metrics found on landspitali website' % (len(activities))) for i in activities: metric_name = i.get('class') metric_value = i.find('div', {'class': "todaysCount"}).text heading = i.find('div', {'class': 'heading'}) text = i.find('div', {'class': 'todaysText'}) # If string dag... is found, this is a counter for the whole day if 'dag...' in heading.text: uom = 'c' else: uom = '' p.add_metric(metric_name, metric_value, uom=uom) p.add_long_output("%s: %s %s %s" % (metric_name, heading.text, metric_value, text.text))
address = my_plugin.options.address if hostname is None: my_plugin.parser.error('-H argument is required') # Here comes the specific check logic try: start_time = time.time() result = socket.gethostbyname( hostname ) # result will contain the ip address resolved end_time = time.time() # If no address was specified with -a, then we return # OK if hostname resolved to anything at all if address is None or address == result: my_plugin.status(ok) my_plugin.add_summary("%s resolves to %s" % (hostname, result)) else: my_plugin.status(critical) my_plugin.add_summary("%s resolves to %s but should resolve to %s" % (hostname,result,address)) # Add run_time metric, so we can also alert if lookup takes to long run_time = end_time - start_time my_plugin.add_metric('run_time', run_time) except gaierror: # If any exceptions happened in the code above, lets return a critical status my_plugin.status(critical) my_plugin.add_summary('Could not resolve host "%s"' % hostname ) # when check_all_metrics() is run, any metrics we have added with add_metric() will be processed against # Thresholds (like --threshold). This part will allow our plugin users to alert on lookup_time my_plugin.check_all_metrics()
import requests import cStringIO url = 'http://hraun.vedur.is/ja/eldgos/volcano_status.png' p = PluginHelper() p.parse_arguments() p.show_legacy = True tmp = requests.get(url) image_file = cStringIO.StringIO(tmp.content) image = Image.open(image_file) p.add_summary("Volcano data last updated: %s." % (tmp.headers['last-modified'])) width = image.size[0] height = image.size[1] colormap = defaultdict(int) for y in range(0, width): for x in range(0, height): xy = (x,y) rgb = image.getpixel(xy) colormap[rgb] += 1 pixels_per_triangle = 251 # How many pixels are in an triangle
auth = (plugin.options.user, plugin.options.password) # Build the metric URL. api = 'http://{}:{}/api/overview'.format(plugin.options.hostname, plugin.options.port) payload = { 'msg_rates_age': '3600', 'msg_rates_incr': '10', 'columns': 'message_stats.deliver_get_details.avg_rate', } # No need to specify a timeout: pynag has --timeout option for the whole plugin. r = requests.get(api, params=payload, auth=auth) if plugin.options.show_debug: show_response() if r.status_code == 401: plugin.add_summary("Login failed") plugin.exit() try: deliver_rate = r.json()["message_stats"]["deliver_get_details"]["avg_rate"] except ValueError: plugin.add_summary("Can't decode server's response") plugin.exit() plugin.add_metric('deliver_rate', deliver_rate) plugin.add_summary('message.deliver.avg_rate: {}'.format(deliver_rate)) plugin.check_all_metrics() plugin.exit()
http_status_counter[status] = ( http_status_counter.get(status, 0) + measurement['value']) else: helper.add_metric(label="%s.%s" % (key, measurement['statistic'].lower()), value=measurement['value']) helper.add_summary('{} is {}'.format(key, measurement['value'])) for status in http_status_counter: helper.add_metric(label='http{}'.format(status), value=http_status_counter[status]) helper.add_summary('{} is {}'.format(key, measurement['value'])) json_data, version, err = request_data(health_endpoint, **get_args) if json_data is None: if err is None: helper.status(unknown) helper.add_summary('no health data available') else: helper.status(critical) helper.add_summary('could not fetch health data: {}'.format(err)) else: # Only check health if there are no metrics specified in check if helper.options.metrics is None: status = json_data['status'] if status == 'UP': helper.status(ok) elif status in ('DOWN', 'OUT_OF_SERVICE'): helper.status(critical) else: helper.status(unknown) helper.add_summary('global status is {}'.format(status))
# The default return value is unknown helper.status(ok) sess = netsnmp.Session(Version=version, DestHost=host, Community=community) #query the data depending on the service try: oid = services[service] value = get_data(sess, oid, helper) except KeyError: helper.exit(summary="Wrong service specified", exit_code=unknown, perfdata='') # if one of the handover values drop to 0, show a warning. if service in ["HandoverConnectionsIn", "HandoverConnectionsOut"]: helper.add_summary("Currently %s %s" % (value, service)) if int(value) == 0: helper.status(warning) else: helper.status(ok) else: # add the summary helper.add_summary("%s status is: %s" % (service, value)) if value == "error": helper.status(critical) elif value == "stopping": helper.status(warning) elif value in ["initializing", "running", "backingoff"]: helper.status(ok)