def report_global_data_zabbix(self, resource: str) -> None: """ aggregate and report information for some speciality in resources """ if resource not in self.discovery_sent: self.logger.debug( 'skipping report_global_data_zabbix for %s, discovery not send yet!' % resource) return data_to_send = list() if resource == 'services': num_services = 0 num_ingress_services = 0 with self.thread_lock: for obj_uid, resourced_obj in self.data[ resource].objects.items(): num_services += 1 if resourced_obj.resource_data['is_ingress']: num_ingress_services += 1 data_to_send.append( ZabbixMetric(self.zabbix_host, 'check_kubernetes[get,services,num_services]', str(num_services))) data_to_send.append( ZabbixMetric( self.zabbix_host, 'check_kubernetes[get,services,num_ingress_services]', str(num_ingress_services))) self.send_data_to_zabbix(resource, None, data_to_send)
def __createMetrics(self): """Формирует метрики для отправки статистики на сервер""" self.__metrics = {} for name, data in self.miners.items(): # Ищем ошибки errorResponses = [ item['Response'] for item in data['Exchange'] if item['Error'] ] # Если при получении статистики работы майнера произошла ошибка if errorResponses: # Создаем соответвующую метрику self.__metrics[name] = [ ZabbixMetric(name, 'miner.status', 0), ] # Записываем сообщение в лог self.log.error( "error in request for miner {name} at " "{host}:{port} ({message})".format( name=name, host=data['Host'], port=data['Port'], message=errorResponses[0], ), ) else: # Создаем все метрики для майнера self.__metrics[name] = [ ZabbixMetric(name, key, value) for key, value in self.__supportedMiners[data['Miner']](data['Exchange']) ]
def get_container_zabbix_metrics(zabbix_host: str, name_space: str, pod_base_name: str, container_name: str, data: dict[str, str]) -> list[ZabbixMetric]: return [ ZabbixMetric( zabbix_host, 'check_kubernetesd[get,containers,%s,%s,%s,ready]' % (name_space, pod_base_name, container_name), data["ready"], ), ZabbixMetric( zabbix_host, 'check_kubernetesd[get,containers,%s,%s,%s,not_ready]' % (name_space, pod_base_name, container_name), data["not_ready"], ), ZabbixMetric( zabbix_host, 'check_kubernetesd[get,containers,%s,%s,%s,restart_count]' % (name_space, pod_base_name, container_name), data["restart_count"], ), ZabbixMetric( zabbix_host, 'check_kubernetesd[get,containers,%s,%s,%s,status]' % (name_space, pod_base_name, container_name), data["status"], ) ]
async def collect_item_vms( data, config_dict, zserver=None, hostId=None, port=None, key_vms_total=None, key_vms_running=None, key_vms_stop=None ): zserver = zserver or config_dict['zabbix-zserver'] hostId = hostId or config_dict['zabbix-hostid'] port = port or config_dict['zabbix-port'] key_vms_total = key_vms_total or config_dict['key-vms_total'] key_vms_running = key_vms_running or config_dict['key-vms_running'] key_vms_stop = key_vms_stop or config_dict['key-vms_stop'] total_vms = str(len(data['servers'])) total_vms_running = 0 total_vms_stop = 0 for vm in data['servers']: if vm['status'] == 'ACTIVE': total_vms_running += 1 else: total_vms_stop += 1 packet_vms = [ZabbixMetric(hostId, key_vms_total, total_vms), ZabbixMetric(hostId, key_vms_running, total_vms_running), ZabbixMetric(hostId, key_vms_stop, total_vms_stop)] result = await send_metric(zserver=zserver, port=int(port), packet_item=packet_vms) return result
def send_zabbix(self, log): if self.conf['zabbix_enable']: msg = self.verbose_format(log) impact = int(log['owasp']['impact']) allowed_range = range(1, 8) if self.conf['zabbix_advantage_keys']: metrics = [ ZabbixMetric( self.conf['server_name'], 'artlas_check{}'.format('_0{}'.format(impact) if impact in allowed_range else ''), msg) ] else: metrics = [ ZabbixMetric(self.conf['server_name'], 'artlas_check', msg) ] ZabbixSender(use_config=self.conf['agentd_config']).send(metrics) if self.conf['notifications']: if self.conf['zabbix_advantage_keys']: metrics = [ ZabbixMetric( self.conf['server_name'], 'artlas_check{}'.format('_0{}'.format( impact) if impact in allowed_range else ''), "OK") ] else: metrics = [ ZabbixMetric(self.conf['server_name'], 'artlas_check', "OK") ] ZabbixSender( use_config=self.conf['agentd_config']).send(metrics)
def filesystems_space(self): """Get filesystems occupancy info from FlashBlade. This generator performs LLD of current FlashBlade filesystems and gathers filesystems counters at the same time, optimizing into a single query to FlashBlade. """ host = self.name fbinfo = self.fb.file_systems.list_file_systems() zbx_lld_data = {"data": []} # Zabbix LLD data metrics = [] # metrics for Zabbix trapper for f in fbinfo.items: zbx_lld_data["data"].append({"{#FILESYSTEM}": f.name}) metrics.append(ZabbixMetric( host, "purestorage.fb.filesystems[" + f.name + ",total]", f.space.total_physical)) metrics.append(ZabbixMetric( host, "purestorage.fb.filesystems[" + f.name + ",unique]", f.space.unique)) metrics.append(ZabbixMetric( host, "purestorage.fb.filesystems[" + f.name + ",virtual]", f.space.virtual)) metrics.append(ZabbixMetric( host, "purestorage.fb.filesystems[" + f.name + ",snapshots]", f.space.snapshots)) if f.space.data_reduction is None: f.space.data_reduction = 0 metrics.append(ZabbixMetric( host, "purestorage.fb.filesystems[" + f.name + ",data_reduction]", f.space.data_reduction)) yield zbx_lld_data yield metrics
def main(): print(sys.version) # check python version print(serial.__version__) # check pyserial version alreadySend = False arduino = serial.Serial('/dev/ttyUSB0', 57600, timeout=2) time.sleep(1) # give the connection a second to settle while True: data = arduino.read(2000) if data: print(data) # strip out the new lines for now print("size recu= " + str(len(data))) if checkDataIntegrity(data): print("check ok!") obj = json.loads(extractStruct(data)) print(json.dumps(obj, indent=4)) print(obj['payload']['vbatt']) # Send metrics to zabbix trapper xbeeid = obj['payload']['xbeeid'] freemem = obj['payload']['freemem'] vbatt = obj['payload']['vbatt'] packet = [ ZabbixMetric(hostId, 'xbeeid', xbeeid) # multiple metrics can be sent in same call for effeciency , ZabbixMetric(hostId, 'freemem', freemem), ZabbixMetric(hostId, 'vbatt', vbatt) ] ZabbixResponse = ZabbixSender(zserver, port, use_config=None).send(packet) print(ZabbixResponse) else: print("bad message!")
def send(monitor=None): monitor = monitor if monitor else ZBX_PASSIVE_MONITOR if not ZBX_PASSIVE_SERVER or not ZBX_PASSIVE_PORT or not monitor: LOGGER.error('Settings insufficient to passive monitoring') return zabbix_server = ZabbixSender(zabbix_server=ZBX_PASSIVE_SERVER, zabbix_port=int(ZBX_PASSIVE_PORT), chunk_size=2) time_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # Creates the message passive_message_status = ZabbixMetric(ZBX_PASSIVE_MONITOR, 'passive_message', 'Working - {0}'.format(time_now)) # Creates the status (0 - OK; 1 - Warning; 2 - Critical) passive_monitor_status = ZabbixMetric(ZBX_PASSIVE_MONITOR, 'passive_check', 0) metrics = [passive_message_status, passive_monitor_status] result = zabbix_server.send(metrics) try: if result.failed == 0: LOGGER.info('Passive monitoring sent with success.') LOGGER.debug(result) else: LOGGER.error('Fail to sent Passive monitoring.') except AttributeError: LOGGER.error('Fail to verify return of Passive monitoring.')
def test_create_messages(self): m = [ZabbixMetric('host1', 'key1', 1), ZabbixMetric('host2', 'key2', 2)] zs = ZabbixSender() result = zs._create_messages(m) self.assertIsInstance(result, list) self.assertEqual(len(result), 2)
def main(argv=None): if argv is None: argv = sys.argv fileHostCheck='./myhosts.json' if not os.path.exists(fileHostCheck): return -1 else: with open(fileHostCheck, 'rb') as json_data: try: myHostDict = json.load(json_data) except Exception as e: print "failed when loading current host json file or parsing the json" + traceback.format_exc() zabbixPacket=[] for host in list(myHostDict): if len(myHostDict[host]['units'])>0 : myUser=myHostDict[host]['user'] myPass=myHostDict[host]['password'] myHostId=myHostDict[host]['hostId'] myHostIp=host reqArray=[] for unit in list(myHostDict[host]['units']): reqArray.append(DaikinStructReqPntState(type=0,id=int(unit))) pLogin=DaikinApi()/DaikinHeader()/DaikinReqSetLogin(username=myUser, password=myPass) respLogin=DaikinHeader(sendP(myHostIp,pLogin)) if int(respLogin.arg1)!=1: print "Login Failure",myHostIp continue myUserId=respLogin.arg2 pPntReq=DaikinApi()/DaikinHeader(id=myUserId)/DaikinReqGetPntState(reqIds=reqArray) respPnt=DaikinHeader(sendP(myHostIp,pPntReq)) pLogout=DaikinApi()/DaikinHeader(id=myUserId)/DaikinReqSetLogout() DaikinHeader(sendP(myHostIp,pLogout)) for pnt in respPnt.payload.pntStateArray: tempPacket=[ ZabbixMetric(myHostId,'daikin.pnt[enumDriveMode,{0}]'.format(pnt.id),pnt.enumDriveMode), ZabbixMetric(myHostId,'daikin.pnt[tempAmbient,{0}]'.format(pnt.id),pnt.tempAmbient), ZabbixMetric(myHostId,'daikin.pnt[tempSetPoint,{0}]'.format(pnt.id),pnt.tempSetPoint), ZabbixMetric(myHostId,'daikin.pnt[enumVentMode,{0}]'.format(pnt.id),pnt.enumVentMode), ZabbixMetric(myHostId,'daikin.pnt[enumVentVol,{0}]'.format(pnt.id),pnt.enumVentVol), ZabbixMetric(myHostId,'daikin.pnt[pntState,{0}]'.format(pnt.id),pnt.pntState), ZabbixMetric(myHostId,'daikin.pnt[errorString,{0}]'.format(pnt.id),pnt.errorString), ZabbixMetric(myHostId,'daikin.pnt[iconMode,{0}]'.format(pnt.id),pnt.iconMode), ZabbixMetric(myHostId,'daikin.pnt[iconAppend,{0}]'.format(pnt.id),pnt.iconAppend), ZabbixMetric(myHostId,'daikin.pnt[filterLed,{0}]'.format(pnt.id), 1 if (pnt.iconAppend & 0x08) else 0 ) ] zabbixPacket.extend(tempPacket) zbx=ZabbixSender('192.168.128.7') zbxResp=zbx.send(zabbixPacket) print zbxResp
async def collect_item_volumes( data, config_dict, zserver=None, hostId=None, port=None, key_volumes_total=None, key_volumes_available=None, key_volumes_in_use=None, key_volumes_other=None, key_volumes_error=None ): zserver = zserver or config_dict['zabbix-zserver'] hostId = hostId or config_dict['zabbix-hostid'] port = port or config_dict['zabbix-port'] key_volumes_total = key_volumes_total or config_dict[ 'key-volumes_total' ] key_volumes_available = key_volumes_available or config_dict[ 'key-volumes_available' ] key_volumes_other = key_volumes_other or config_dict['key-volumes_other'] key_volumes_in_use = key_volumes_in_use or config_dict['key-volumes_in_use'] key_volumes_error = key_volumes_error or config_dict['key-volumes_error'] total_volumes_available = 0 total_volumes_other = 0 total_volumes_in_use = 0 total_volumes_error = 0 data = json.dumps(data) data = json.loads(data) data = json.loads(data) print(data['volumes']) total_volumes = str(len(data['volumes'])) for volume in data['volumes']: if volume['status'] == 'available': total_volumes_available += 1 elif volume['status'] == 'in-use': total_volumes_in_use += 1 elif volume['status'] == 'error': total_volumes_error += 1 else: total_volumes_other += 1 packet_volumes = [ZabbixMetric(hostId, key_volumes_total, total_volumes), ZabbixMetric(hostId, key_volumes_available, total_volumes_available), ZabbixMetric(hostId, key_volumes_in_use, total_volumes_in_use), ZabbixMetric(hostId, key_volumes_error, total_volumes_error)] result = await send_metric(zserver=zserver, port=int(port), packet_item=packet_volumes) return result
def check_projects(client): total_projects = len(client.keystone_api.projects.list()) total_users = len(client.keystone_api.users.list()) packet_projects = [ ZabbixMetric(hostId, key_projects_total, total_projects), ZabbixMetric(hostId, key_users_total, total_users) ] return packet_projects
def event_to_metrics(event, float_keys, string_keys): """Add an event to the outgoing Zabbix list.""" state = event.data.get("new_state") if state is None or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE): return entity_id = state.entity_id if not entities_filter(entity_id): return floats = {} strings = {} try: _state_as_value = float(state.state) floats[entity_id] = _state_as_value except ValueError: try: _state_as_value = float(state_helper.state_as_number(state)) floats[entity_id] = _state_as_value except ValueError: strings[entity_id] = state.state for key, value in state.attributes.items(): # For each value we try to cast it as float # But if we can not do it we store the value # as string attribute_id = f"{entity_id}/{key}" try: float_value = float(value) except (ValueError, TypeError): float_value = None if float_value is None or not math.isfinite(float_value): strings[attribute_id] = str(value) else: floats[attribute_id] = float_value metrics = [] float_keys_count = len(float_keys) float_keys.update(floats) if len(float_keys) != float_keys_count: floats_discovery = [] for float_key in float_keys: floats_discovery.append({"{#KEY}": float_key}) metric = ZabbixMetric( publish_states_host, "homeassistant.floats_discovery", json.dumps(floats_discovery), ) metrics.append(metric) for key, value in floats.items(): metric = ZabbixMetric(publish_states_host, f"homeassistant.float[{key}]", value) metrics.append(metric) string_keys.update(strings) return metrics
def gcold(hostname, items, packet, pid): MC, MU, CCSC, CCSU, OC, OU, YGC, FGC, FGCT, GCT = items[:11] packet.append(ZabbixMetric(hostname, "GCOLD.MC[" + str(pid) + "]", MC)) packet.append(ZabbixMetric(hostname, "GCOLD.MU[" + str(pid) + "]", MU)) packet.append(ZabbixMetric(hostname, "GCOLD.CCSC[" + str(pid) + "]", CCSC)) packet.append(ZabbixMetric(hostname, "GCOLD.CCSU[" + str(pid) + "]", CCSU)) packet.append(ZabbixMetric(hostname, "GCOLD.OC[" + str(pid) + "]", OC)) packet.append(ZabbixMetric(hostname, "GCOLD.OU[" + str(pid) + "]", OU)) packet.append(ZabbixMetric(hostname, "GCOLD.YGC[" + str(pid) + "]", YGC)) packet.append(ZabbixMetric(hostname, "GCOLD.FGC[" + str(pid) + "]", FGC)) packet.append(ZabbixMetric(hostname, "GCOLD.FGCT[" + str(pid) + "]", FGCT)) packet.append(ZabbixMetric(hostname, "GCOLD.GCT[" + str(pid) + "]", GCT))
def sendMetrica(): # Send metrics to zabbix trapper packet = [ ZabbixMetric('hostname1', 'test[cpu_usage]', 2), ZabbixMetric('hostname1', 'test[system_status]', "OK"), ZabbixMetric('hostname1', 'test[disk_io]', '0.1'), ZabbixMetric('hostname1', 'test[cpu_usage]', 20, 1411598020), ] result = ZabbixSender(use_config=True).send(packet) return result
async def collect_item_ips( data, config_dict, id_token, zserver=None, hostId=None, port=None, network_name=None, key_ips_total=None, key_ips_used=None, key_ips_availabity=None ): zserver = zserver or config_dict['zabbix-zserver'] hostId = hostId or config_dict['zabbix-hostid'] port = port or config_dict['zabbix-port'] network_name = network_name or config_dict['key-network_name'] key_ips_total = key_ips_total or config_dict['key-ips_total'] key_ips_used = key_ips_used or config_dict['key-ips_used'] key_ips_availabity = key_ips_availabity or config_dict['key-ips_availabity'] total_ips = 0 total_ips_used = 0 total_ips_availabity = 0 for network in data['networks']: if network['provider:physical_network'] == network_name: id_network = network['id'] url_api_detail_network = config_dict[ 'openstack_api-detail_ips_of_network' ] + id_network detail_network = requests.get( url=url_api_detail_network, headers={"X-Auth-Token": id_token} ).json()['network_ip_availability'] for ipv4 in detail_network['subnet_ip_availability']: ipv4 = json.dumps(ipv4) ipv4 = json.loads(ipv4) if ipv4['ip_version'] == 4: total_ips += ipv4['total_ips'] total_ips_used += ipv4['used_ips'] total_ips_availabity += ipv4['total_ips']- ipv4['used_ips'] print(ipv4) packet_ips = [ZabbixMetric(hostId, key_ips_total, total_ips), ZabbixMetric(hostId, key_ips_used, total_ips_used), ZabbixMetric(hostId, key_ips_availabity, total_ips_availabity)] result = await send_metric(zserver=zserver, port=int(port), packet_item=packet_ips) return result
def gcmetacapacity(hostname, items, packet, pid): MCMN, MCMX, MC, CCSMN, CCSMX, CCSC, YGC, FGC, FGCT, GCT = items[:11] packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.MCMN[" + str(pid) + "]", MCMN)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.MCMX[" + str(pid) + "]", MCMX)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.MC[" + str(pid) + "]", MC)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.CCSMN[" + str(pid) + "]", CCSMN)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.CCSMX[" + str(pid) + "]", CCSMX)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.CCSC[" + str(pid) + "]", CCSC)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.YGC[" + str(pid) + "]", YGC)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.FGC[" + str(pid) + "]", FGC)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.FGCT[" + str(pid) + "]", FGCT)) packet.append(ZabbixMetric(hostname, "GCMETACAPACITY.GCT[" + str(pid) + "]", GCT))
def report_and_exit(): packet = [ ZabbixMetric(zbhost, 'rs.insert', inserted), ZabbixMetric(zbhost, 'rs.read', mread), ZabbixMetric(zbhost, 'rs.delete', deleted), ZabbixMetric(zbhost, 'rs.connect', connected), ZabbixMetric(zbhost, 'rs.warn', warn), ] # print (warn) result = ZabbixSender(zabbix_port=ZBPORT, zabbix_server=ZBHOST).send(packet) #print (result) exit()
def test_sendMetricsToServer(self): cur_date_unix = int(now()) m = [ ZabbixMetric('host2', 'key3', 'IDDQD'), ZabbixMetric('host1', 'key1', 33.1, cur_date_unix) ] z = ZabbixSender('127.0.0.1', 10051).send(m) self.assertIsInstance(z, ZabbixResponse) self.assertEqual(z.total, 2) self.assertEqual(z.processed, 2) self.assertEqual(z.failed, 0) self.assertEqual(z.chunk, 1)
def getPacket_mongos(mongohost): global insert global query global update global delete global getmore global command host_key = mongohost.split('.')[0] host_index = int(host_key.split('-')[-1]) res = run_command(mongohost, 'serverStatus') packet = [] packet.append( ZabbixMetric(zbhost, "mongos_connections_current[" + host_key + "]", int(res["connections"]["current"]))) packet.append( ZabbixMetric(zbhost, "mongos_connections_available[" + host_key + "]", int(res["connections"]["available"]))) packet.append( ZabbixMetric(zbhost, "mongos_connections_totalCreated[" + host_key + "]", int(res["connections"]["totalCreated"]))) packet.append( ZabbixMetric(zbhost, "mongos_insert[" + host_key + "]", int(res["opcounters"]["insert"]) - insert[host_index])) packet.append( ZabbixMetric(zbhost, "mongos_query[" + host_key + "]", int(res["opcounters"]["query"]) - query[host_index])) packet.append( ZabbixMetric(zbhost, "mongos_update[" + host_key + "]", int(res["opcounters"]["update"]) - update[host_index])) packet.append( ZabbixMetric(zbhost, "mongos_delete[" + host_key + "]", int(res["opcounters"]["delete"]) - delete[host_index])) packet.append( ZabbixMetric(zbhost, "mongos_getmore[" + host_key + "]", int(res["opcounters"]["getmore"]) - getmore[host_index])) packet.append( ZabbixMetric(zbhost, "mongos_command[" + host_key + "]", int(res["opcounters"]["command"]) - command[host_index])) packet.append( ZabbixMetric(zbhost, "mongos_ok[" + host_key + "]", int(res["ok"]))) # Updating counters insert[host_index] = int(res["opcounters"]["insert"]) query[host_index] = int(res["opcounters"]["query"]) update[host_index] = int(res["opcounters"]["update"]) delete[host_index] = int(res["opcounters"]["delete"]) getmore[host_index] = int(res["opcounters"]["getmore"]) command[host_index] = int(res["opcounters"]["command"]) return packet
def __sendToZabbix(self, deviceName, metricName, metricValue): if hasattr(self, 'ZabbixHost'): #print(deviceName,metricName,metricValue) metrics = [] metric = ZabbixMetric(deviceName, metricName, metricValue) metrics.append(metric) result = self.ZabbixHost.send(metrics)
def send_timestamp(sender, hostname, itemname, timestamp): """ When the data was completely replicated, the timestamp of the replication is send to the Zabbix replica """ metrics = [] m = ZabbixMetric(hostname, itemname, timestamp) metrics.append(m) sender.send(metrics)
def readNumberAndSaveToTable(instrument, address, table): global packet try: reading = instrument.read_register(address, 2, 4) packet.append(ZabbixMetric(hostname, table, str(reading))) except IOError: print("Failed to read from instrument")
def get_x_road_packages(params, node, server): """Convert XML Packages metric to ZabbixMetric (includes only X-Road packages) Return Zabbix packet elements. """ if params is None or node is None or server is None: return None p = [] try: name = node.find('./m:name', NS).text data = '' for pack in node.findall('./m:stringMetric', NS): package_name = pack.find('./m:name', NS).text if 'xroad' in package_name or 'xtee' in package_name: data += u"{}: {}\n".format(package_name, pack.find('./m:value', NS).text) p.append(ZabbixMetric(server, name, data)) return p except AttributeError: if params['debug'] > 1: print_debug(u"get_x_road_packages: Incorrect node: {}".format( ElementTree.tostring(node))) return None
def on_message(self, client: mqtt.Client, userdata, msg: mqtt.MQTTMessage): if msg.topic not in self._cfg["topics"]: logging.warning(f"Ignoring unrequested topic: {msg.topic}") return else: items = self._cfg["topics"][msg.topic] metrics = [] for item in items: payload = msg.payload.decode() if "jq" in item: try: payload = apply_jq(payload, item["jq"]) except Exception as e: logging.error("Failed to apply JQ to payload, skipping", e) continue metrics.append(ZabbixMetric(item["host"], item["item"], payload)) logging.debug( f"{msg.topic} -> {item['item']}@{item['host']}{{{payload}}}") try: # noinspection PyTypeChecker result = ZabbixSender(use_config=True).send(metrics) except Exception as e: logging.error("Failed to send metrics to Zabbix agent", e)
def send_to_zabbix(self, data: dict) -> None: """ Send results (evaluated with rules) to zabbix Args: data (dict): results dict """ zabbix_name = self.config['settings']['zabbix'].get('name', self.name) zabbix_url = self.config['settings']['zabbix']['url'] zabbix_port = int(self.config['settings']['zabbix']['port']) zabbix_key = self.config['settings']['zabbix']['key'] fixed_data = data[next(iter(data))] if 'error' in fixed_data or not fixed_data or not data: return payload = [] for topic, consumers in fixed_data.items(): for consumer, lag_data in consumers.items(): level = lag_data['zabbix']['level'] epoch = lag_data['epoch'] metric = ZabbixMetric( zabbix_name, f'{zabbix_key}[{topic},{consumer}]', level.value, clock=epoch ) payload.append(metric) try: with timeout(): zabbix_sender = ZabbixSender(zabbix_url, zabbix_port) zabbix_sender.send(payload) except Exception as e: self.log.error('zabbix_send_error', payload=payload, exc_info=e)
def send(target, instrument, component, sensor): """Define a function to be executed on scheduled times""" for measurand in sensor: c_idx = list(instrument).index(component) s_idx = list(component).index(sensor) m_idx = list(sensor).index(measurand) logger.debug("Trying to get value for c_idx=%d, s_idx=%d, m_idx=%d", c_idx, s_idx, m_idx) instrument.get_recent_value(c_idx, s_idx, m_idx) if target == "screen": click.echo(measurand) elif target == "mqtt": mqtt_client.publish( f"{CLIENT_ID}/status/{instrument.device_id}/{sensor.name}/" f"{measurand.name}", f'{{"val": {measurand.value}, "ts": {measurand.time}}}', ) logger.debug("MQTT message for %s published.", sensor.name) elif target == "zabbix": zbx_value = measurand.value zbx_key = f"{sensor.name}-{measurand.name}" metrics = [ZabbixMetric(HOST, zbx_key, zbx_value)] zbx.send(metrics) else: logger.error(("Target must be either screen, mqtt or zabbix."))
def check_vms(client): vms = client.nova_api.servers.list(search_opts={'all_tenants': 1}) total_vms = len(vms) total_vms_running = 0 total_vms_stop = 0 for vm in vms: if vm.status == 'ACTIVE': total_vms_running += 1 else: total_vms_stop += 1 packet_vms = [ ZabbixMetric(hostId, key_vms_total, total_vms), ZabbixMetric(hostId, key_vms_running, total_vms_running), ZabbixMetric(hostId, key_vms_stop, total_vms_stop) ] return packet_vms
def send_zabbix_script_monitoring(status_code, config_options): metrics = [] m = ZabbixMetric(config_options['APP']['SERVER_NAME'], "cron.domain_expiry_checker", status_code) metrics.append(m) zbx = ZabbixSender(use_config=config_options['APP']['ZABBIX_CONFIG_FILE']) zbx.send(metrics)
def test_send_sendall_exception(self, mock_socket): mock_socket.return_value = mock_socket mock_socket.sendall.side_effect = socket.error zm = ZabbixMetric('host1', 'key1', 100500, 1457358608) zs = ZabbixSender() with self.assertRaises(socket.error): zs.send([zm])
def test_repr(self): zm = ZabbixMetric('host1', 'key1', 100500) zm_repr = json.loads(zm.__repr__()) self.assertEqual(zm_repr, zm.__dict__)