Example #1
0
    def send_to_zabbix(self, data: dict) -> None:
        """
        Send results (evaluated with rules) to zabbix

        Args:
            data (dict): results dict
        """
        zabbix_name = self.config['settings']['zabbix'].get('name', self.name)
        zabbix_url = self.config['settings']['zabbix']['url']
        zabbix_port = int(self.config['settings']['zabbix']['port'])
        zabbix_key = self.config['settings']['zabbix']['key']
        fixed_data = data[next(iter(data))]

        if 'error' in fixed_data or not fixed_data or not data:
            return

        payload = []
        for topic, consumers in fixed_data.items():
            for consumer, lag_data in consumers.items():
                level = lag_data['zabbix']['level']
                epoch = lag_data['epoch']
                metric = ZabbixMetric(
                        zabbix_name,
                        f'{zabbix_key}[{topic},{consumer}]',
                        level.value,
                        clock=epoch
                )
                payload.append(metric)

        try:
            with timeout():
                zabbix_sender = ZabbixSender(zabbix_url, zabbix_port)
                zabbix_sender.send(payload)
        except Exception as e:
            self.log.error('zabbix_send_error', payload=payload, exc_info=e)
Example #2
0
def send_zabbix_script_monitoring(status_code, config_options):
    metrics = []
    m = ZabbixMetric(config_options['APP']['SERVER_NAME'],
                     "cron.domain_expiry_checker", status_code)
    metrics.append(m)
    zbx = ZabbixSender(use_config=config_options['APP']['ZABBIX_CONFIG_FILE'])
    zbx.send(metrics)
Example #3
0
def pure_disk_monitoring():
    try:
        '''Get the argument from Zabbix'''
        ip = str(sys.argv[2])  #IP of the Pure Storage Array
        token = str(sys.argv[3])  #API Token
        host = str(sys.argv[4])  #Host name (for the sender)
        zabbixIP = str(
            sys.argv[5])  #Zabbix Proxy or Server IP (for the sender)
        '''Get data'''
        arrayConnect = purestorage.FlashArray(ip,
                                              api_token=token,
                                              verify_https=False)
        diskList = arrayConnect.list_drives()
        metrics = []
        for i in diskList:
            disk = i["name"]
            diskMonitoring = arrayConnect.get_drive(drive=disk)
            '''Sending data'''

            if "status" in diskMonitoring:
                diskStatus = str(diskMonitoring["status"])
                m = ZabbixMetric(host, "pure.disk.status[" + disk + "]",
                                 diskStatus)
                metrics.append(m)
            if "capacity" in diskMonitoring:
                diskCapacity = str(diskMonitoring["capacity"])
                m = ZabbixMetric(host, "pure.disk.capacity[" + disk + "]",
                                 diskCapacity)
                metrics.append(m)
            if "protocol" in diskMonitoring:
                diskProtocol = str(diskMonitoring["protocol"])
                m = ZabbixMetric(host, "pure.disk.protocol[" + disk + "]",
                                 diskProtocol)
                metrics.append(m)
            if "type" in diskMonitoring:
                diskType = str(diskMonitoring["type"])
                m = ZabbixMetric(host, "pure.disk.type[" + disk + "]",
                                 diskType)
                metrics.append(m)
            if "last_failure" in diskMonitoring:
                diskLastFailure = str(diskMonitoring["last_failure"])
                m = ZabbixMetric(host, "pure.disk.last.failure[" + disk + "]",
                                 diskLastFailure)
                metrics.append(m)
        data = ZabbixSender(zabbixIP)
        data.send(metrics)
        '''Send 1 to give a result to Zabbix'''
        print(1)

    except Exception as e:
        '''
        Sending 0 to Zabbix instead of a Python error.
        Like that the items won't be considered as "unsupported"
        '''
        metrics = [
            ZabbixMetric(host, "pure.disk.monitoring.launcher.error", str(e))
        ]
        data = ZabbixSender(zabbixIP)
        data.send(metrics)
        print(0)
Example #4
0
    def test_send_sendall_exception(self, mock_socket):
        mock_socket.return_value = mock_socket
        mock_socket.sendall.side_effect = socket.error

        zm = ZabbixMetric('host1', 'key1', 100500, 1457358608)
        zs = ZabbixSender()
        with self.assertRaises(socket.error):
            zs.send([zm])
Example #5
0
    def test_send_sendall_exception(self, mock_socket):
        mock_socket.return_value = mock_socket
        mock_socket.sendall.side_effect = Exception

        zm = ZabbixMetric('host1', 'key1', 100500, 1457358608)
        zs = ZabbixSender()
        with self.assertRaises(Exception):
            zs.send([zm])
Example #6
0
def send_to_zabbix(metrics):
    zabbix = ZabbixSender(use_config=True)
    try:
        if len(metrics) >= 1:
            log.debug('about to send {} to zabbix'.format(metrics))
            zabbix.send(metrics)
            log.info('sent metrics to zabbix')
        else:
            log.warning('metrics size invalid')
    except Exception as e:
        log.error(e)
Example #7
0
    def test_send_failed(self, mock_socket):
        mock_data = b'\x01\\\x00\x00\x00\x00\x00\x00\x00'
        mock_socket.return_value = mock_socket
        mock_socket.recv.side_effect = (b'ZBXD', mock_data, b'''
{"response": "suces","info":"processed: 0; failed: \
10; total: 10; seconds spent: 0.000078"}
''')

        zm = ZabbixMetric('host1', 'key1', 100500, 1457358608)
        zs = ZabbixSender()
        with self.assertRaises(socket.error):
            zs.send([zm])
Example #8
0
    def test_send_failed(self, mock_socket):
        mock_data = b'\x01\\\x00\x00\x00\x00\x00\x00\x00'
        mock_socket.return_value = mock_socket
        mock_socket.recv.side_effect = (b'ZBXD', mock_data, b'''
{"response": "suces","info":"processed: 0; failed: \
10; total: 10; seconds spent: 0.000078"}
''')

        zm = ZabbixMetric('host1', 'key1', 100500, 1457358608)
        zs = ZabbixSender()
        with self.assertRaises(Exception):
            zs.send([zm])
Example #9
0
def main():

    logger = init_logger()
    args = parse_args()

    try:
        flashblade = connect_fb(args.endpoint, args.apitoken, args.ctimeo,
                                args.rtimeo, args.retries)
        if flashblade is None:
            print("Error")
            return

        zbx_checker = PureZabbixFBChecker(flashblade)

        zbx_lld_data = None

        if (args.metric == 'aperf'):
            z = zbx_checker.array_perf()
            zbx_metrics = next(z)
        elif (args.metric == 'aspace'):
            z = zbx_checker.array_space()
            zbx_metrics = next(z)
        elif (args.metric == 'bspace'):
            z = zbx_checker.buckets_space()
            zbx_lld_data = next(z)
            zbx_metrics = next(z)
        elif (args.metric == 'fspace'):
            z = zbx_checker.filesystems_space()
            zbx_lld_data = next(z)
            zbx_metrics = next(z)

        if zbx_lld_data is not None:
            # Return JSON formatted LLD buckets data to Zabbix external check
            print(json.dumps(zbx_lld_data, sort_keys=True, indent=4))
        else:
            # Remember, need to return a string to Zabbix as we are in an external checker
            print("Done!")

        if (args.debug):
            for zm in zbx_metrics:
                print(zm)
        else:
            sender = ZabbixSender(args.zabbix)
            sender.send(zbx_metrics)
        disconnect_fb(flashblade)

    except Exception as e:
        logger.exception("Exception occurred")
        print(
            "Error"
        )  # Remember, need to return a string to Zabbix as we are in an external checker
Example #10
0
 def ZabbixSend(self, value, key=''):
     packet = []
     if key == '':
         key = self.cfg['zabbix_trigger']
     metric = ZabbixMetric(self.cfg['hostname'], "{}".format(key), value)
     packet.append(metric)
     zbx = ZabbixSender(self.cfg['zabbix_address'])
     try:
         zbx.send(packet)
         self.logger.debug("Sent key '{}' value '{}' to zabbix".format(
             key, value))
     except:
         self.logger.error(
             "Error sending key '{}' value '{}' to zabbix.".format(
                 key, value))
Example #11
0
def send(monitor=None):
    monitor = monitor if monitor else ZBX_PASSIVE_MONITOR
    if not ZBX_PASSIVE_SERVER or not ZBX_PASSIVE_PORT or not monitor:
        LOGGER.error('Settings insufficient to passive monitoring')
        return

    zabbix_server = ZabbixSender(zabbix_server=ZBX_PASSIVE_SERVER,
                                 zabbix_port=int(ZBX_PASSIVE_PORT),
                                 chunk_size=2)

    time_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    # Creates the message
    passive_message_status = ZabbixMetric(ZBX_PASSIVE_MONITOR,
                                          'passive_message',
                                          'Working - {0}'.format(time_now))

    # Creates the status (0 - OK; 1 - Warning; 2 - Critical)
    passive_monitor_status = ZabbixMetric(ZBX_PASSIVE_MONITOR, 'passive_check',
                                          0)

    metrics = [passive_message_status, passive_monitor_status]
    result = zabbix_server.send(metrics)

    try:
        if result.failed == 0:
            LOGGER.info('Passive monitoring sent with success.')
            LOGGER.debug(result)
        else:
            LOGGER.error('Fail to sent Passive monitoring.')
    except AttributeError:
        LOGGER.error('Fail to verify return of Passive monitoring.')
Example #12
0
    def send(self):
        """Отправляет статистику работы майнеров на серввер"""

        for name, metrics in self.__metrics.items():
            server = ZabbixSender(
                self.server,
                self.port,
                chunk_size=len(metrics),
            )
            try:
                self.log.info(
                    "metrics sended to {server}:{port} "
                    "for {miner} ({result})".format(
                        server=self.server,
                        port=self.port,
                        miner=name,
                        result=server.send(metrics),
                    ), )
            except socket.error as e:
                self.log.error(
                    "error on sending metrics to {server}:{port} "
                    "for {miner} ({message})".format(
                        server=self.server,
                        port=self.port,
                        miner=name,
                        message=e,
                    ), )
                break
Example #13
0
def main():
    while True:
        visit = open("/code/visit", "r").read()
        url = 'http://web:5000/metrics'
        response = requests.get(url)
        metric = response.text.split()[-1]
        amount = int(metric) - int(visit)
        forsend = []
        m = ZabbixMetric('DevTest', 'Metric', amount)
        forsend.append(m)
        zbx = ZabbixSender('zabbix-server')
        zbx.send(forsend)
        refresh_visit = open("/code/visit", "w")
        refresh_visit.write(metric)
        refresh_visit.close()
        time.sleep(1)
Example #14
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    
    fileHostCheck='./myhosts.json'
    
    if not os.path.exists(fileHostCheck):
        return -1
    
    else:
        with open(fileHostCheck, 'rb') as json_data:
            try:
                myHostDict = json.load(json_data)
            except Exception as e:
                print "failed when loading current host json file or parsing the json" + traceback.format_exc()

    zabbixPacket=[]
    for host in list(myHostDict):
        if len(myHostDict[host]['units'])>0 :
            myUser=myHostDict[host]['user']
            myPass=myHostDict[host]['password']
            myHostId=myHostDict[host]['hostId']
            myHostIp=host
            reqArray=[]

            for unit in list(myHostDict[host]['units']):
                reqArray.append(DaikinStructReqPntState(type=0,id=int(unit)))

            pLogin=DaikinApi()/DaikinHeader()/DaikinReqSetLogin(username=myUser, password=myPass)
            respLogin=DaikinHeader(sendP(myHostIp,pLogin))
            if int(respLogin.arg1)!=1:
                print "Login Failure",myHostIp
                continue
            myUserId=respLogin.arg2

            pPntReq=DaikinApi()/DaikinHeader(id=myUserId)/DaikinReqGetPntState(reqIds=reqArray)
            respPnt=DaikinHeader(sendP(myHostIp,pPntReq))
            
            pLogout=DaikinApi()/DaikinHeader(id=myUserId)/DaikinReqSetLogout()
            DaikinHeader(sendP(myHostIp,pLogout))

            for pnt in respPnt.payload.pntStateArray:
                tempPacket=[
                    ZabbixMetric(myHostId,'daikin.pnt[enumDriveMode,{0}]'.format(pnt.id),pnt.enumDriveMode),
                    ZabbixMetric(myHostId,'daikin.pnt[tempAmbient,{0}]'.format(pnt.id),pnt.tempAmbient),
                    ZabbixMetric(myHostId,'daikin.pnt[tempSetPoint,{0}]'.format(pnt.id),pnt.tempSetPoint),
                    ZabbixMetric(myHostId,'daikin.pnt[enumVentMode,{0}]'.format(pnt.id),pnt.enumVentMode),
                    ZabbixMetric(myHostId,'daikin.pnt[enumVentVol,{0}]'.format(pnt.id),pnt.enumVentVol),
                    ZabbixMetric(myHostId,'daikin.pnt[pntState,{0}]'.format(pnt.id),pnt.pntState),
                    ZabbixMetric(myHostId,'daikin.pnt[errorString,{0}]'.format(pnt.id),pnt.errorString),
                    ZabbixMetric(myHostId,'daikin.pnt[iconMode,{0}]'.format(pnt.id),pnt.iconMode),
                    ZabbixMetric(myHostId,'daikin.pnt[iconAppend,{0}]'.format(pnt.id),pnt.iconAppend),
                    ZabbixMetric(myHostId,'daikin.pnt[filterLed,{0}]'.format(pnt.id), 1 if (pnt.iconAppend & 0x08) else 0 )
                ]
                zabbixPacket.extend(tempPacket)

    zbx=ZabbixSender('192.168.128.7')
    zbxResp=zbx.send(zabbixPacket)
    print zbxResp
Example #15
0
    def test_send(self, mock_socket):
        mock_data = b'\x01\\\x00\x00\x00\x00\x00\x00\x00'
        mock_socket.return_value = mock_socket
        mock_socket.recv.side_effect = (b'ZBXD', mock_data, self.resp_body)

        zm = ZabbixMetric('host1', 'key1', 100500, 1457358608)
        zs = ZabbixSender()
        result = zs.send([zm])
        self.assertTrue(result)
def send_to_zabbix(hostname, msg, conf_yaml):
    packet = [
        ZabbixMetric(hostname, 'syslog', msg),
    ]
    logger.debug(packet)
    sender = ZabbixSender(conf_yaml['api']['ip'])
    logger.debug(sender)
    result = sender.send(packet)
    logger.debug(result)
    return None
Example #17
0
    def __send_api_data(self, final_time):
        key = 'api.test'
        host = self.test_host
        value = float(final_time)

        metrics = []
        metric = ZabbixMetric(host, key, value)
        metrics.append(metric)

        zbx = ZabbixSender('', 10051)
        return (zbx.send(metrics))
Example #18
0
    def test_send(self, mock_socket):
        mock_data = b'\x01\\\x00\x00\x00\x00\x00\x00\x00'
        mock_socket.return_value = mock_socket
        mock_socket.recv.side_effect = (b'ZBXD', mock_data, self.resp_body)

        zm = ZabbixMetric('host1', 'key1', 100500, 1457358608)
        zs = ZabbixSender()
        result = zs.send([zm])
        self.assertIsInstance(result, ZabbixResponse)
        self.assertEqual(result.chunk, 1)
        self.assertEqual(result.total, 10)
        self.assertEqual(result.failed, 10)
Example #19
0
    def test_send(self, mock_socket):
        mock_data = b'\x01\\\x00\x00\x00\x00\x00\x00\x00'
        mock_socket.return_value = mock_socket
        mock_socket.recv.side_effect = (b'ZBXD', mock_data, self.resp_body)

        zm = ZabbixMetric('host1', 'key1', 100500, 1457358608)
        zs = ZabbixSender()
        result = zs.send([zm])
        self.assertIsInstance(result, ZabbixResponse)
        self.assertEqual(result.chunk, 1)
        self.assertEqual(result.total, 10)
        self.assertEqual(result.failed, 10)
    def send(self):
        zbx = ZabbixSender('127.0.0.1')
        data = self._data()
        try:
            zbx_result = zbx.send(data)
            failure_result = zbx_result.failed
        except:
            failure_result = 1

        if failure_result == 0:
            result = True
        else:
            result = False
        return result
Example #21
0
def main():
    hostname = socket.gethostname()
    zabbix = ZabbixSender(zabbix_server=config['zabbix_server'])
    while True:
        packets = []
        while stats_queue.empty() is False:
            disk, data = stats_queue.get()
            packets.extend(
                [ZabbixMetric(hostname, f'iostat[{disk},{key}]', value)
                 for key, value in data.items()])

        if packets:
            result = zabbix.send(packets)
            print(packets, result)

        time.sleep(1)
Example #22
0
def trapper(items_raw):
    if dict_setup["metric_sent_protocol"].lower() == "zabbix":
        hostname = dict_setup["metric_sent_hostname"].lower()
        zabbix_server = dict_setup["metric_sent_server"].lower()
        try:
            timestamp = items_raw['timestamp']
            metrics = []
            zbx = ZabbixSender(zabbix_server)
            for metric in items_raw:
                if metric != "timestamp":
                    m = ZabbixMetric(host=hostname, key=metric, value=items_raw[metric], clock=timestamp)
                    metrics.append(m)
            returapi = zbx.send(metrics)
            logging.info("{}: {}".format(inspect.stack()[1][3], returapi))
            return True
        except Exception as e:
            logging.error("Trappto zabbix error: {} -  {}".format(inspect.stack()[1][3], e))
            return False
    else:
        return False
def return_result(result, conf): #Отправка результата в Zabbix
    driver.quit()
    jresult = json.dumps(result)
    auto_log.info('Send result'+jresult)
    host = (url.hostname).replace(".otr.ru","").replace(".pds","")
    print("host:" + host)
#    print("jresult:" + jresult)
#    zabbix_sender = ZabbixSender(zabbix_server='zabbix_proxy02')
    zabbix_sender = ZabbixSender(zabbix_server=conf['zabbix_server'])
    metrics = []
    m = ZabbixMetric(host, "jsonresult", jresult)
    metrics.append(m)
    send = zabbix_sender.send(metrics)
    if send.failed:
        print('Something went wrong when sending test result for server: ' + host + ', check present item jsonresult for server or wait 1 hour after adding it')
        print(send)
        auto_log.error('Something went wrong when sending test result for server: ' + host + ', check present item jsonresult for server or wait 1 hour after adding it')
    else:
        print('Succesfuly sended result to Zabbix server')
        auto_log.info('Succesfuly sended result to Zabbix server')
    print(result)
    exit()
Example #24
0
def trapper(items_raw):
    if dict_setup["metric_sent_protocol"].lower() == "zabbix":
        hostname = dict_setup["metric_sent_hostname"].lower()
        zabbix_server = dict_setup["metric_sent_server"].lower()
        try:
            timestamp = items_raw['timestamp']
            metrics = []
            zbx = ZabbixSender(zabbix_server)
            for metric in items_raw:
                if metric != "timestamp":
                    m = ZabbixMetric(host=hostname,
                                     key=metric,
                                     value=items_raw[metric],
                                     clock=timestamp)
                    metrics.append(m)
            returapi = zbx.send(metrics)
            logging.info("{}: {}".format(inspect.stack()[1][3], returapi))
            return True
        except Exception as e:
            logging.error("Trappto zabbix error: {} -  {}".format(
                inspect.stack()[1][3], e))
            return False
    else:
        return False
Example #25
0
def send_trap(environment):
    # Выставляем признак неотправки трапа

    do_not_send_trap = False

    # Загружаем конфиг
    # в конфиге должны быть 4 секции:
    # 1. trap_to_environment_variables - маппинг переменных трапа в переменные окружения
    # 2. trap_parameters - параметры трапа, которые будут отправлены
    # 3. hostname - имя хоста ОЕМ
    # 4. zabbix - параметры хоста самого Заббикса или одного из его прокси,
    #    к которому подключены все(!) хосты, которые мониторятся в ОЕМ
    with open(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         os.pardir, 'config', 'snmp.json'), 'r') as json_file:
        config = json.load(json_file)

    # Маппинг переменных окружения в переменные трапа в соответствии с MIBом
    # # Переменные перечислены в соответсвии с главой
    # # "3.10.2 Passing Event, Incident, Problem Information to an OS Command or Script"
    # # документа Oracle® Enterprise Manager Cloud Control Administrator's Guide
    # # Если зачению переменной трапа может соответствовать несколько переменных окружения
    # # в зависимости от события, которое обрабатывается, такие переменные представлены
    # # в виде словаря, в которых ключ соответствует переменной ISSUE_TYPE - тип события,
    # # значение - переменной, которую нужно подставить
    trap_to_environment_variables = config['trap_to_environment_variables']

    # Все поля трапа OEM, которые мы будем передавать, получены из MIBа omstrap.v1
    trap_parameters = config['trap_parameters']

    hostname = config['hostname']
    zabbix = config['zabbix']

    # На вход получаем параметры окружения в виде словаря, которые создает OMS при вызове скрипта
    # Собираем только те параметры, которые укладываются в стандартный MIB omstrap.v1 Oracle OEM 13c
    # Кроме того, сохраняем в oms_event['oraEMNGEnvironment'] все переменные окружения, мало ли что-то упустили

    oms_event = {
        'oraEMNGEnvironment': environment,
        'oraEMNGEventSequenceId': 'null'
    }

    for trap_variable, os_variable in trap_to_environment_variables.iteritems(
    ):
        if type(os_variable) is unicode:
            oms_event.update({
                trap_variable:
                environment[os_variable] if os_variable in environment else ''
            })
        elif type(os_variable) is dict:
            issue_type = environment['ISSUE_TYPE']
            oms_event.update({
                trap_variable:
                environment[os_variable[issue_type]] if
                (issue_type in os_variable
                 and os_variable[issue_type] in environment) else ''
            })

    # Нужно подправить некоторые элементы
    # Во-первых, подрезаем длину сообщения и URL события до 255 символов, чтобы влезало в трап
    oms_event.update({
        'oraEMNGEventMessage':
        oms_event['oraEMNGEventMessage'][:255],
        'oraEMNGEventMessageURL':
        oms_event['oraEMNGEventMessageURL'][:255],
        'oraEMNGEventContextAttrs':
        oms_event['oraEMNGEventContextAttrs'][:255],
        'oraEMNGEventTargetName':
        oms_event['oraEMNGEventTargetName'].replace(
            '.severstal.severstalgroup.com', '')
    })

    # Во-вторых, для инцидентов и проблем не передается в переменную SequenceID
    # Будем брать его из SequenceID породившего события
    if oms_event['oraEMNGIssueType'] in ('2', '3'):
        logging.debug('Message is incident or problem')
        oms_event.update({
            'oraEMNGEventIssueId':
            re.search('&issueID=([ABCDEF|0-9]{32})$',
                      environment['MESSAGE_URL']).group(1)
        })

        emcli = Emcli()
        event_id = emcli.get_event_id(oms_event['oraEMNGEventIssueId'])
        if event_id is not None and len(event_id) != 0:
            logging.debug('Got event ID from OEM %s' % ', '.join(event_id))
            oms_event.update({'oraEMNGEventSequenceId': event_id[0]})
        else:
            logging.debug('Event ID not found in OEM')
            oms_event.update(
                {'oraEMNGEventSequenceId': oms_event['oraEMNGEventIssueId']})

        # В-третьих, нужно проверить, есть ли событие с таким же уровнем severity
        # и отправлялось ли по нему сообщение
        # Если есть, трап по инциденту или проблеме отправлять не нужно
        message_sent = emcli.check_message_sent(
            oms_event['oraEMNGEventIssueId'],
            oms_event['oraEMNGEventSeverity'])

        # Подождем 2 секунды, возможно сообщение по событию запаздывает
        if not message_sent:
            logging.debug('Message from OEM not sent')
            time.sleep(2)
            message_sent = emcli.check_message_sent(
                oms_event['oraEMNGEventIssueId'],
                oms_event['oraEMNGEventSeverity'])

        if message_sent:
            logging.debug('Message from OEM sent, skipping')
            do_not_send_trap = True
            # Если пришел Acknowledged, трап посылаем с ID породившего события
            if oms_event['oraEMNGAssocIncidentAcked'] == 'Yes':
                logging.debug('... But it is an Acknowledge message, sending')
                do_not_send_trap = False

        # Если пришла закрывашка, а само событие закрылось без отправки сообщения,
        # нужно отправить трап, подменив SequenceID на аналогичный параметр события
        if oms_event['oraEMNGEventSeverity'] == 'Clear':
            logging.debug('Clear message came')
            do_not_send_trap = False

    # Проверяем, если пришла закрывашка, а открывающего события в Заббиксе нет, отправлять не будем
    try:
        if oms_event['oraEMNGEventSeverity'] == 'Clear' or oms_event[
                'oraEMNGAssocIncidentAcked'] == 'Yes':
            request = get_event_by_id(oms_event['oraEMNGEventSequenceId'])
            if request is None or len(request) == 0:
                do_not_send_trap = True
        else:
            logging.debug('Opening message exists in Zabbix')
            do_not_send_trap = do_not_send_trap
    except Exception as e:
        log_event(oms_event_to_log=oms_event)
        raise e

    # Проверяем, если пришла закрывашка, будем закрывать через API, не будем отправлять
    try:
        if oms_event['oraEMNGEventSeverity'] == 'Clear' or oms_event[
                'oraEMNGAssocIncidentAcked'] == 'Yes':
            logging.debug(
                'Trying to acknowledge event to close it by API method')
            result = acknowledge_event_by_id(
                oms_event['oraEMNGEventSequenceId'])
            if result is not None and len(result) != 0:
                if 'TrapState' not in oms_event:
                    oms_event.update({'TrapState': 'closed by api'})
                do_not_send_trap = True
            else:
                do_not_send_trap = do_not_send_trap
    except Exception as e:
        log_event(oms_event_to_log=oms_event)
        raise e

    # Проверяем, нет ли случайно в Заббиксе события с таким же текстом
    # отображаемого на экране
    # Если есть - отсылать его не нужно
    try:
        if check_if_message_exists(
                '%s %s %s: %s Acknowledge=%s' %
            (oms_event['oraEMNGEventSequenceId'],
             oms_event['oraEMNGEventSeverity'],
             oms_event['oraEMNGEventTargetName'],
             oms_event['oraEMNGEventMessage'],
             oms_event['oraEMNGAssocIncidentAcked'])) and not (
                 oms_event['oraEMNGEventSeverity'] == 'Clear'
                 or oms_event['oraEMNGAssocIncidentAcked'] == 'Yes'):
            logging.debug('Message exists in Zabbix, skipping')
            do_not_send_trap = True
        else:
            logging.debug('Message do not exists in Zabbix')
            do_not_send_trap = do_not_send_trap
    except Exception as e:
        log_event(oms_event_to_log=oms_event)
        raise e

    # Если не стоит признак не посылать трап,
    if not do_not_send_trap:
        # Проверяем, нужно ли фильтровать трап
        # Если да - отсылать не будем
        if not filter_trap(message=environment['MESSAGE']
                           if 'MESSAGE' in environment else None,
                           event_name=environment['EVENT_NAME']
                           if 'EVENT_NAME' in environment else None):
            logging.debug('Message not filtered')
            # Для начала закроем событие в заббиксе с таким же ИД
            # если таковой имеется
            # но не трогаем закрывашки
            try:
                if not oms_event[
                        'oraEMNGEventSeverity'] == 'Clear' and not oms_event[
                            'oraEMNGAssocIncidentAcked'] == 'Yes':
                    logging.debug('Trying to acknowledge event in Zabbix')
                    acknowledge_event_by_id(
                        oms_event['oraEMNGEventSequenceId'])
            except Exception as e:
                log_event(oms_event_to_log=oms_event)
                raise e

            # Собираем SNMP трап
            # Для этого нужен MIB (Management Information Base)
            # # Есть проблема, Питон не хочет подхватывать напрямую MIB-файл из OMS,
            # # который лежит $OMS_HOME/network/doc/omstrap.v1. Кроме того, в дефолтном файле
            # # слишком много ненужной (устаревшей) информации. Поэтому мы удалили все OIDы oraEM4Alert,
            # # кроме тех которые необходимы для копиляции. После этого скомпилировали полученный MIB
            # # скриптом mibdump.py, который идет в поставке с пакетом pysmi, который ставиться pip'ом
            # # и положил полученный *.py файл в /usr/lib/python2.7/site-packages/pysnmp/smi/mibs с правами 644

            address = socket.gethostbyname(hostname)

            # Собираем переменные трапа
            trap_variables = [(ObjectIdentity('DISMAN-EVENT-MIB',
                                              'sysUpTimeInstance'),
                               TimeTicks(int(time.time()))),
                              (ObjectIdentity('SNMP-COMMUNITY-MIB',
                                              'snmpTrapAddress', 0), address)]

            for trap_variable in trap_parameters:
                trap_variables.append(
                    (ObjectIdentity('ORACLE-ENTERPRISE-MANAGER-4-MIB',
                                    trap_variable),
                     oms_event[trap_variable].replace('"', "'")
                     if trap_variable in oms_event else ''))

            # Посылаем трап
            try:
                logging.debug('Trying to send SNMP trap')
                error_indication, error_status, error_index, var_binds = next(
                    sendNotification(
                        SnmpEngine(), CommunityData('public', mpModel=0),
                        UdpTransportTarget((zabbix['host'], zabbix['port'])),
                        ContextData(), 'trap',
                        NotificationType(
                            ObjectIdentity(
                                'ORACLE-ENTERPRISE-MANAGER-4-MIB',
                                'oraEMNGEvent')).addVarBinds(*trap_variables)))

                if error_indication:
                    logging.debug('SNMP exception')
                    oms_event.update({'TrapState': 'exception'})
                    log_event(oms_event_to_log=oms_event)
                    raise Exception(error_indication)
                else:
                    logging.debug('SNMP sent')
                    oms_event.update({'TrapState': 'send snmp'})
            except Exception as e:
                log_event(oms_event_to_log=oms_event)
                raise e

            # Собираем Zabbix трап
            # Чтобы не было одновременной отправки нескольких сообщений
            # Добавляем функционал файла блокировок таким образом, чтобы
            # все наши процессы по отправке заббикс трапов шли по очереди
            # Нужно запомнить ИД процесса
            pid = os.getpid()

            # Разбираемся с лок-файлом
            # Лок-файл лежит в папке .secure
            lock_file = os.path.join(
                os.path.dirname(os.path.realpath(__file__)), os.pardir,
                '.secure', '.lock')
            if os.path.isfile(lock_file):
                # Если такой файл есть, дописываем в него ИД процесса
                with open(lock_file, 'a+') as lock:
                    lock.write(str(pid) + '\n')
            else:
                # Если нет - создаем и записываем
                with open(lock_file, 'w+') as lock:
                    lock.write(str(pid) + '\n')

            logging.info('Sent PID %d to lock file' % pid)

            # Собираем переменные трапа
            trap_variables = dict()

            for trap_variable in trap_parameters:
                if trap_variable in oms_event:
                    trap_variables.update({
                        trap_variable.encode('ascii'):
                        oms_event[trap_variable].encode('ascii')
                    })

            # Формируем метрику
            try:
                # В качестве метрики берем тот же набор параметров,
                # что и для SNMP трапа, но сваливаем его в json
                # и в таком виде отправляем в Заббикс
                m = ZabbixMetric(
                    oms_event['oraEMNGEventHostName'], 'data',
                    json.dumps(trap_variables, indent=3, sort_keys=True))
                zbx = ZabbixSender(zabbix['host'])

                # Проверяем, что наша очередь работать
                # Для этого ИД нашего процесса должен стоять первым в списке
                processes = list()
                counter = 0
                with open(lock_file, 'r') as lock:
                    for line in lock:
                        if line.replace(
                                '\n', '').strip() != '' and psutil.pid_exists(
                                    int(line.replace('\n', '').strip())):
                            processes.append(line.replace('\n', '').strip())

                # Если не первый - ждем своей очереди
                if processes[0] != str(pid):
                    logging.info(
                        'First PID is %s. It\'s not equal ours, sleeping' %
                        processes[0])
                    logging.info('Process queue is [%s]. ' %
                                 ', '.join(processes))

                while processes[0] != str(pid) and counter < 5:
                    # Ждем 1 секунду
                    # Потому, что Заббикс не может разделить два пришедших события
                    # если у них совпадает метка времени
                    # А метка времени у него берется с точностью до секунды
                    time.sleep(1)
                    # Но не более 5 раз
                    counter += 1
                    processes = list()
                    with open(lock_file, 'r') as lock:
                        for line in lock:
                            if line.replace(
                                    '\n',
                                    '').strip() != '' and psutil.pid_exists(
                                        int(line.replace('\n', '').strip())):
                                processes.append(
                                    line.replace('\n', '').strip())
                    logging.info('Process queue is [%s]. ' %
                                 ', '.join(processes))

                # Наша очередь, поехали
                if counter == 5:
                    logging.info('Enough waiting, running')
                else:
                    logging.info('First PID is ours, running')

                # Отправляем
                response = zbx.send([m])

                # Проверяем ответ
                # Наша отправка не должна зафейлиться, но должна быть обработана
                if response is not None:
                    if response.failed == 1:
                        oms_event.update({
                            'TrapState':
                            oms_event['TrapState'] + ', exception zabbix'
                        })
                    elif response.processed == 1:
                        oms_event.update({
                            'TrapState':
                            oms_event['TrapState'] + ', send zabbix'
                        })
            except Exception as e:
                log_event(oms_event_to_log=oms_event)
                raise e
            finally:
                # В конце концов, поработал - прибери за собой
                # Удаляем из лок-файла свой ИД
                # По логике, он должен быть первым в файле, но чем черт не шутит
                # Поэтому считываем весь файл, а потом перезаписываем его всем его содержимым кроме строки с нашим ИД
                processes = list()
                with open(lock_file, 'r') as lock:
                    for line in lock:
                        if line.replace(
                                '\n', '').strip() != '' and psutil.pid_exists(
                                    int(line.replace('\n', '').strip())):
                            processes.append(line.replace('\n', '').strip())

                with open(lock_file, 'w') as lock:
                    for line in processes:
                        if line != str(pid):
                            lock.write(line + '\n')

                processes.remove(str(pid))

                logging.info('Final process queue is [%s]. ' %
                             ', '.join(processes))

                if os.path.getsize(lock_file) == 0:
                    os.remove(lock_file)
        else:
            logging.debug('Event filtered')
            if 'TrapState' not in oms_event:
                oms_event.update({'TrapState': 'filtered'})
    else:
        logging.debug('Event skipped')
        if 'TrapState' not in oms_event:
            oms_event.update({'TrapState': 'skipped'})

    log_event(oms_event_to_log=oms_event)

    # Возвращаем полученный SequenceID
    return '%s: %s' % (oms_event['oraEMNGEventSequenceId'],
                       oms_event['TrapState'])
Example #26
0
def statistic(localdata):
    #Dict com todos os registros para processamento
    search = {}

    #Filtro principal, com os dados para criação dos objetos
    principal = getconfig(mainconfig, 'principal')
    #Todos os filtros
    filters = getconfig(mainconfig, 'filters')
    #Configurações extras
    extra = getconfig(mainconfig, 'extra')
    #Configurações globais
    config = getconfig(mainconfig, 'config')

    for line in localdata:
        for extras in extra:
            e = extras['name']
            t = extras['regex']
            k = extras['key']

            info = re.search(t, line)
            if info:
                if k in search:
                    #search[k]["total"] += 1
                    search[k][e] += 1
                    #break
                else:
                    search[k] = []
                    #search[k] = ({"total":1})
                    search[k] = ({e: 1})
                    #break

        for pregex in principal:

            s = pregex['name']
            r = pregex['regex']

            port = re.search(r, line)
            if port:
                p = port.group(1)
                if p in search:
                    search[p]["total"] += 1
                else:
                    search[p] = []
                    search[p] = ({"total": 1})
                    #search[p][0]["total"] = 1

                for fregex in filters:

                    e = fregex['name']
                    f = fregex['regex']

                    fname = re.search(f, line)
                    if fname:
                        if e in search[p]:
                            search[p][e] += 1
                        else:
                            search[p][e] = 1

    #Gravar a estatistica em um arquivo
    try:
        if config[0]['exportstatistic'] == "yes":
            sfile = config[0]['statisticpath']
            with open(sfile, "w", encoding="utf8") as outfile:
                json.dump(search,
                          outfile,
                          ensure_ascii=False,
                          indent=4,
                          sort_keys=True)
    except Exception as e:
        #print("Erro ao carregar configurações de estatistica: [%s]" %e)
        logger_error.error(
            "Erro ao carregar configurações de estatistica: [%s]" % e,
            exc_info=True)
    #writestatistic('statistic.json',search)
    #Importar as configurações do DB
    dblist = getconfig(mainconfig, 'mongodb')

    for dbinfo in dblist:
        if dbinfo["sendtodb"] == "yes":
            #yum/dnf install python3-pymongo python3-bson
            from pymongo import MongoClient

            delay = dbinfo["delay"]
            dburl = dbinfo["dburl"]
            dbname = dbinfo["dbname"]

            try:
                #Cliente MongoDB
                client = MongoClient(dburl, serverSelectionTimeoutMS=delay)
                #Informações sobre o servidor MongoDB
                dbserverinfo = client.server_info()
                #Se o MongoDB estiver no ar
                if dbserverinfo:
                    db = client[dbname]

                #Carregar informações de dados
                for info in search:
                    #Info é o nome do collection (se ele existir criar novo collection)
                    if info:
                        stat = db[info]
                        search[info]["date"] = datetime.now()
                        search[info]["year"] = datetime.now().strftime("%Y")
                        search[info]["month"] = datetime.now().strftime("%m")
                        search[info]["day"] = datetime.now().strftime("%d")
                        search[info]["hour"] = datetime.now().strftime("%H")
                        search[info]["minute"] = datetime.now().strftime("%M")
                        stat_id = stat.insert_one(search[info]).inserted_id

                        stat_id = stat.insert_one(search[info]).inserted_id
                #print("Informações enviadas para o DB: [%s], base [%s]" %(dburl,dbname))
                if debug == "yes":
                    logger_debug.debug(
                        "Informações enviadas para o DB: [%s], base [%s]" %
                        (dburl, dbname))
            except Exception as e:
                #print("Erro: [%s]" %e)
                logger_error.error("Erro: [%s]" % e, exc_info=True)
    #Processar estatistica pro Zabbix
    #Diretório com arquivos de filtros
    #dirs = os.listdir( path )
    extension = '.conf$'
    dirs = files(path, extension)

    #Zabbix Server Info
    metrics = []
    m = None

    for ffile in dirs:
        filepath = path + '/' + str(ffile)
        with open(filepath, 'r') as monitorfile:
            #print("Arquivo carregado: [%s]" %ffile)
            if debug == "yes":
                logger_debug.debug("Arquivo carregado: [%s]" % ffile)
            filterdata = json.load(monitorfile)

        for info in filterdata:
            try:
                info['multi_keys']
            except KeyError:
                info['multi_keys'] = None

            #Valor da Multi Keys começa com 0
            value = 0

            try:
                info['key']
            except KeyError:
                info['key'] = None

            if info['key']:
                key = str(info['key'])
                re_item = info['re_item']
                zabbix_item = info['zabbix_item']
                zabbix_client = info['zabbix_client']

                try:
                    search[key]
                except KeyError:
                    search[key] = {'total': 0}

                if search[key]:
                    try:
                        send = search[key][re_item]
                    except:
                        send = 0

                    #print("Sender Info: Cliente: [%s] - Item: [%s] - Value: [%s]" %(cliente,zbxitem,send))
                    if debug == "yes":
                        logger_debug.debug(
                            'Sender Info: Cliente: [%s] - Item: [%s] - Value: [%s]'
                            % (zabbix_client, zabbix_item, send))
                    m = ZabbixMetric(zabbix_client, zabbix_item, send)
                    metrics.append(m)

            if info['multi_keys']:
                soma = 0
                keys = info['multi_keys']
                re_item = info['re_item']
                zabbix_item = info['zabbix_item']
                zabbix_client = info['zabbix_client']

                for key in keys:
                    if debug == "yes":
                        logger_debug.debug("Multi porta: [%s] de (%s)" %
                                           (key, keys))
                    #Só para garantir que o script vai encontrar a porta no dict
                    key = str(key)
                    try:
                        search[key]
                    except KeyError:
                        search[key] = None

                    if search[key]:
                        try:
                            value = search[key][re_item]
                        except:
                            value = 0

                        soma += value

                if debug == "yes":
                    logger_debug.debug(
                        '[M]Sender Info: Cliente: [%s] - Item: [%s] - Value: [%s]'
                        % (zabbix_client, zabbix_item, value))
                m = ZabbixMetric(zabbix_client, zabbix_item, soma)
                metrics.append(m)

    #Get Zabbix Server Info
    zabbix = getconfig(mainconfig, 'zabbix')

    try:
        zabbix
    except KeyError:
        zabbix = {}

    for zbx in zabbix:
        zabbixserver = zbx['zabbixserver']
        zabbixport = zbx['zabbixport']

        try:
            if zbx['sendtozabbix'] == "yes":
                if debug == "yes":
                    logger_debug.debug(
                        'Enviar dados ao Zabbix Server [%s:%s]' %
                        (zabbixserver, zabbixport))
                zbx = ZabbixSender(zabbixserver)
                zbx.send(metrics)
            else:
                #print("Não enviar dados ao Zabbix")
                if debug == "yes":
                    logger_debug.debug(
                        'Não enviar dados ao Zabbix Server [%s:%s]' %
                        (zabbixserver, zabbixport))
        except Exception as e:
            logger_error.error("Erro: [%s]" % e, exc_info=True)
Example #27
0
def on_message(client, userdata, msg):
    packet = [
      ZabbixMetric(zabbix_item_host, zabbix_item_name, str(msg.payload)),
    ]
    sender = ZabbixSender(zabbix_server=zabbix_host, zabbix_port=10051, use_config=None)
    sender.send(packet)
Example #28
0
class zabbix(outputPluginBase):
    def __init__(self, parent):
        self._log = logging.getLogger()
        self._parent = parent

    def addArguments(self):

        self._parent._parser.add_argument(
            "--zabbix-server",
            dest="zabbix_server",
            help="Zabbix server hostname or IP [default: %(default)s]",
            default="127.0.0.1",
        )
        self._parent._parser.add_argument(
            "--zabbix-server-port",
            dest="zabbix_port",
            help="Zabbix server port [default: %(default)s]",
            default=10051,
        )
        self._parent._parser.add_argument(
            "--zabbix-host",
            dest="zabbix_host",
            help="Hostname as it displayed in Zabbix [default: %(default)s]",
            default="",
        )

    def init_plugin_params(self, **kwargs):

        for var in ["zabbix_server", "zabbix_port", "zabbix_host"]:
            if self._parent.configuration._data.get(var):
                setattr(self, var,
                        self._parent.configuration._data.get(var, ""))

    @staticmethod
    def get_json_schema(configuration={}):

        form = tree()
        form["schema"]["type"] = "object"

        form["schema"]["properties"]["zabbix_server"][
            "title"] = "Zabbix Server"
        form["schema"]["properties"]["zabbix_server"]["type"] = "string"
        form["schema"]["properties"]["zabbix_server"][
            "description"] = "Zabbix server hostname or IP"

        form["schema"]["properties"]["zabbix_port"]["title"] = "Port"
        form["schema"]["properties"]["zabbix_port"]["type"] = "integer"
        form["schema"]["properties"]["zabbix_port"][
            "description"] = "Zabbix server port"

        form["schema"]["properties"]["zabbix_port"][
            "default"] = configuration.get("zabbix_port", 10051)

        form["schema"]["properties"]["zabbix_host"][
            "title"] = "Zabbix Hostname"
        form["schema"]["properties"]["zabbix_host"]["type"] = "string"
        form["schema"]["properties"]["zabbix_host"][
            "description"] = "Hostname as it displayed in Zabbix"
        form["schema"]["properties"]["zabbix_host"][
            "default"] = configuration.get("zabbix_host", "")

        form["options"]["fields"]["zabbix_server"]["order"] = 1

        form["options"]["fields"]["zabbix_port"]["order"] = 2
        form["options"]["fields"]["zabbix_host"]["order"] = 3

        return dict(form)

    def connect(self, **kwargs):
        self.init_plugin_params()
        self.zbx = ZabbixSender(
            self._parent.configuration._data.get("zabbix_server"),
            self._parent.configuration._data.get("zabbix_port"),
        )

    def process(self, data, **kwargs):

        try:
            if data in [None, "None", ""]:
                self._log.debug("empty document -> nothing to do -> skipping")
                return True

            data = json.loads(data)
            self._log.debug(json.dumps(data, indent=2))
        except ValueError as e:
            self._log.error(
                "{0}\nis not a valid json document {1} -> invalid configuration -> skipping"
                .format(data, e))
            return False
        except TypeError:
            pass

        metrics = []
        for row in data:
            target_hostname = row.get(
                "hostname",
                self._parent.configuration._data.get("zabbix_host", None))
            if target_hostname:
                for k, v in row.items():
                    if k not in ["hostname"]:
                        m = ZabbixMetric(target_hostname, k, v)
                        metrics.append(m)
            else:
                log.error("Target Hostname not defined -> skipping")
                log.debug(row)
        self.zbx.send(metrics)

        # for item in data["imdata"]:
        #     try:
        #         dn = item[list(item.keys())[0]]["attributes"]["dn"]
        #         self.check_acl(item)
        #     except IndexError:
        #         continue
        #     resp = self.apic_session.push_to_apic(
        #         "/api/node/mo/{0}.json".format(dn), item, timeout=None
        #     )
        #     if not resp.ok:
        #         self._log.error("POST request failed: {0}".format(pformat(resp.text)))
        #         self._log.debug(pformat(item))
        #         if "best_effort" in self._parent.configuration._data.keys():
        #             return True
        #         raise processError

        #     else:
        #         self._log.info("successfully sent config for dn {0}".format(dn))
        #         self._log.debug(json.dumps(item, indent=2))
        return True
Example #29
0
class CheckKubernetesDaemon:
    data = {'zabbix_discovery_sent': {}}
    thread_lock = threading.Lock()

    def __init__(self, config, config_name, resources, resources_excluded,
                 resources_excluded_web, resources_excluded_zabbix,
                 discovery_interval, data_resend_interval):
        self.manage_threads = []

        self.logger = logging.getLogger(self.__class__.__name__)
        self.config_name = config_name
        self.discovery_interval = int(discovery_interval)
        self.data_resend_interval = int(data_resend_interval)

        self.api_zabbix_interval = 60
        self.rate_limit_seconds = 30
        self.api_configuration = client.Configuration()
        self.api_configuration.host = config.k8s_api_host
        self.api_configuration.verify_ssl = str2bool(config.verify_ssl)
        self.api_configuration.api_key = {
            "authorization": "Bearer " + config.k8s_api_token
        }

        # K8S API
        self.debug_k8s_events = False
        self.api_client = client.ApiClient(self.api_configuration)
        self.core_v1 = KubernetesApi(self.api_client).core_v1
        self.apps_v1 = KubernetesApi(self.api_client).apps_v1
        self.extensions_v1 = KubernetesApi(self.api_client).extensions_v1

        self.zabbix_sender = ZabbixSender(zabbix_server=config.zabbix_server)
        self.zabbix_resources = CheckKubernetesDaemon.exclude_resources(
            resources, resources_excluded_zabbix)
        self.zabbix_host = config.zabbix_host
        self.zabbix_debug = str2bool(config.zabbix_debug)
        self.zabbix_single_debug = str2bool(config.zabbix_single_debug)
        self.zabbix_dry_run = str2bool(config.zabbix_dry_run)

        self.web_api_enable = str2bool(config.web_api_enable)
        self.web_api_resources = CheckKubernetesDaemon.exclude_resources(
            resources, resources_excluded_web)

        self.web_api_host = config.web_api_host
        self.web_api_token = config.web_api_token
        self.web_api_cluster = config.web_api_cluster
        self.web_api_verify_ssl = str2bool(config.web_api_verify_ssl)

        self.resources = CheckKubernetesDaemon.exclude_resources(
            resources, resources_excluded)

        init_msg = "INIT K8S-ZABBIX Watcher\n<===>\n" \
                   "K8S API Server: %s\n" \
                   "Zabbix Server: %s\n" \
                   "Zabbix Host: %s\n" \
                   "Resources watching: %s\n" \
                   "web_api_enable => %s (resources: %s)\n" \
                   "web_api_host => %s\n" \
                   "<===>" \
                   % (self.api_configuration.host, config.zabbix_server, self.zabbix_host, ",".join(self.resources),
                      self.web_api_enable, ",".join(self.web_api_resources), self.web_api_host)
        self.logger.info(init_msg)

    @staticmethod
    def exclude_resources(available_types, excluded_types):
        result = []
        for k8s_type_available in available_types:
            if k8s_type_available not in excluded_types:
                result.append(k8s_type_available)
        return result

    def handler(self, signum, *args):
        if signum in [signal.SIGTERM]:
            self.logger.info(
                'Signal handler called with signal %s... stopping (max %s seconds)'
                % (signum, 3))
            exit_flag.set()
            for thread in self.manage_threads:
                thread.join(timeout=3)
            self.logger.info('All threads exited... exit check_kubernetesd')
            sys.exit(0)
        elif signum in [signal.SIGUSR1]:
            self.logger.info(
                '=== Listing count of data hold in CheckKubernetesDaemon.data ==='
            )

            with self.thread_lock:
                for r, d in self.data.items():
                    rd = dict()
                    if hasattr(d, 'objects'):
                        for obj_name, obj_d in d.objects.items():
                            rd[obj_name] = dict(
                                last_sent_zabbix=obj_d.last_sent_zabbix,
                                last_sent_web=obj_d.last_sent_web,
                            )
                    else:
                        rd = d
                    self.logger.info('%s: %s' % (r, rd))
        elif signum in [signal.SIGUSR2]:
            self.logger.info(
                '=== Listing all data hold in CheckKubernetesDaemon.data ===')

            with self.thread_lock:
                for r, d in self.data.items():
                    rd = dict()
                    if hasattr(d, 'objects'):
                        for obj_uid, obj in d.objects.items():
                            rd[obj_uid] = obj.data
                    else:
                        rd = d
                    self.logger.info('%s: %s\n' % (r, rd))

    def run(self):
        self.start_data_threads()
        self.start_api_info_threads()
        self.start_loop_send_discovery_threads()
        self.start_resend_threads()

    def start_data_threads(self):
        for resource in self.resources:
            with self.thread_lock:
                self.data.setdefault(
                    resource,
                    K8sResourceManager(resource, zabbix_host=self.zabbix_host))
                if resource == 'pods':
                    self.data.setdefault('containers',
                                         K8sResourceManager('containers'))

            # watcher threads
            if resource == 'containers':
                pass
            elif resource == 'components':
                thread = TimedThread(resource,
                                     self.data_resend_interval,
                                     exit_flag,
                                     daemon=self,
                                     daemon_method='watch_data')
                self.manage_threads.append(thread)
                thread.start()
            else:
                thread = WatcherThread(resource,
                                       exit_flag,
                                       daemon=self,
                                       daemon_method='watch_data')
                self.manage_threads.append(thread)
                thread.start()

            # additional looping data threads
            if resource == 'services':
                thread = TimedThread(
                    resource,
                    self.data_resend_interval,
                    exit_flag,
                    daemon=self,
                    daemon_method='report_global_data_zabbix',
                    delay_first_run_seconds=self.discovery_interval + 5)
                self.manage_threads.append(thread)
                thread.start()
            elif resource == 'containers':
                thread = TimedThread(
                    resource,
                    self.data_resend_interval,
                    exit_flag,
                    daemon=self,
                    daemon_method='report_global_data_zabbix',
                    delay_first_run_seconds=self.discovery_interval + 5)
                self.manage_threads.append(thread)
                thread.start()

    def start_api_info_threads(self):
        if 'nodes' not in self.resources:
            # only send api heartbeat once
            return

        thread = TimedThread('api_heartbeat',
                             self.api_zabbix_interval,
                             exit_flag,
                             daemon=self,
                             daemon_method='send_heartbeat_info')
        self.manage_threads.append(thread)
        thread.start()

    def start_loop_send_discovery_threads(self):
        for resource in self.resources:
            send_discovery_thread = TimedThread(
                resource,
                self.discovery_interval,
                exit_flag,
                daemon=self,
                daemon_method='send_zabbix_discovery',
                delay_first_run=True,
                delay_first_run_seconds=30)
            self.manage_threads.append(send_discovery_thread)
            send_discovery_thread.start()

    def start_resend_threads(self):
        for resource in self.resources:
            resend_thread = TimedThread(
                resource,
                self.data_resend_interval,
                exit_flag,
                daemon=self,
                daemon_method='resend_data',
                delay_first_run=True,
                delay_first_run_seconds=60,
            )
            self.manage_threads.append(resend_thread)
            resend_thread.start()

    def get_api_for_resource(self, resource):
        if resource in ['nodes', 'components', 'secrets', 'pods', 'services']:
            api = self.core_v1
        elif resource in ['deployments', 'daemonsets', 'statefulsets']:
            api = self.apps_v1
        elif resource in ['ingresses']:
            api = self.extensions_v1
        else:
            raise AttributeError('No valid resource found: %s' % resource)
        return api

    def get_web_api(self):
        if not hasattr(self, '_web_api'):
            from .web_api import WebApi
            self._web_api = WebApi(self.web_api_host,
                                   self.web_api_token,
                                   verify_ssl=self.web_api_verify_ssl)
        return self._web_api

    def watch_data(self, resource, timeout=240):
        api = self.get_api_for_resource(resource)

        if timeout == 0:
            timeout_str = "no timeout"
        else:
            timeout_str = "%i seconds" % timeout

        self.logger.info(
            "Watching for resource >>>%s<<< with a timeout of %s" %
            (resource, timeout_str))
        while True:
            w = watch.Watch()
            if resource == 'nodes':
                for obj in w.stream(api.list_node, timeout_seconds=timeout):
                    self.watch_event_handler(resource, obj)
            elif resource == 'deployments':
                for obj in w.stream(api.list_deployment_for_all_namespaces,
                                    timeout_seconds=timeout):
                    self.watch_event_handler(resource, obj)
            elif resource == 'daemonsets':
                for obj in w.stream(api.list_daemon_set_for_all_namespaces,
                                    timeout_seconds=timeout):
                    self.watch_event_handler(resource, obj)
            elif resource == 'statefulsets':
                for obj in w.stream(api.list_stateful_set_for_all_namespaces,
                                    timeout_seconds=timeout):
                    self.watch_event_handler(resource, obj)
            elif resource == 'components':
                # The api does not support watching on component status
                with self.thread_lock:
                    for obj in api.list_component_status(
                            watch=False).to_dict().get('items'):
                        self.data[resource].add_obj(obj)
                time.sleep(self.data_resend_interval)
            elif resource == 'ingresses':
                for obj in w.stream(api.list_ingress_for_all_namespaces,
                                    timeout_seconds=timeout):
                    self.watch_event_handler(resource, obj)
            elif resource == 'tls':
                for obj in w.stream(api.list_secret_for_all_namespaces,
                                    timeout_seconds=timeout):
                    self.watch_event_handler(resource, obj)
            elif resource == 'pods':
                for obj in w.stream(api.list_pod_for_all_namespaces,
                                    timeout_seconds=timeout):
                    self.watch_event_handler(resource, obj)
            elif resource == 'services':
                for obj in w.stream(api.list_service_for_all_namespaces,
                                    timeout_seconds=timeout):
                    self.watch_event_handler(resource, obj)
            else:
                self.logger.error("No watch handling for resource %s" %
                                  resource)
                time.sleep(60)
            self.logger.debug(
                "Watch/fetch completed for resource >>>%s<<<, restarting" %
                resource)

    def watch_event_handler(self, resource, event):
        event_type = event['type']
        obj = event['object'].to_dict()
        self.logger.debug(event_type + ' [' + resource + ']: ' +
                          obj['metadata']['name'])
        with self.thread_lock:
            if not self.data[resource].resource_class:
                self.logger.error(
                    'Could not add watch_event_handler! No resource_class for "%s"'
                    % resource)
                return

        if event_type.lower() == 'added':
            with self.thread_lock:
                resourced_obj = self.data[resource].add_obj(obj)
            if resourced_obj.is_dirty_zabbix or resourced_obj.is_dirty_web:
                self.send_object(
                    resource,
                    resourced_obj,
                    event_type,
                    send_zabbix_data=resourced_obj.is_dirty_zabbix,
                    send_web=resourced_obj.is_dirty_web)
        elif event_type.lower() == 'modified':
            with self.thread_lock:
                resourced_obj = self.data[resource].add_obj(obj)
            if resourced_obj.is_dirty_zabbix or resourced_obj.is_dirty_web:
                self.send_object(
                    resource,
                    resourced_obj,
                    event_type,
                    send_zabbix_data=resourced_obj.is_dirty_zabbix,
                    send_web=resourced_obj.is_dirty_web)
        elif event_type.lower() == 'deleted':
            with self.thread_lock:
                resourced_obj = self.data[resource].del_obj(obj)
                self.delete_object(resource, resourced_obj)
        else:
            self.logger.info('event type "%s" not implemented' % event_type)

    def report_global_data_zabbix(self, resource):
        """ aggregate and report information for some speciality in resources """
        if self.data['zabbix_discovery_sent'].get(resource) is None:
            self.logger.debug(
                'skipping report_global_data_zabbix for %s, disovery not send yet!'
                % resource)
            return

        data_to_send = list()

        if resource == 'services':
            num_services = 0
            num_ingress_services = 0
            with self.thread_lock:
                for obj_uid, resourced_obj in self.data[
                        resource].objects.items():
                    num_services += 1
                    if resourced_obj.resource_data['is_ingress']:
                        num_ingress_services += 1

            data_to_send.append(
                ZabbixMetric(self.zabbix_host,
                             'check_kubernetes[get,services,num_services]',
                             num_services))
            data_to_send.append(
                ZabbixMetric(
                    self.zabbix_host,
                    'check_kubernetes[get,services,num_ingress_services]',
                    num_ingress_services))
            self.send_data_to_zabbix(resource, None, data_to_send)
        elif resource == 'containers':
            # aggregate pod data to containers for each namespace
            with self.thread_lock:
                containers = dict()
                for obj_uid, resourced_obj in self.data['pods'].objects.items(
                ):
                    ns = resourced_obj.name_space
                    if ns not in containers:
                        containers[ns] = dict()

                    pod_data = resourced_obj.resource_data
                    pod_base_name = resourced_obj.base_name
                    try:
                        container_status = json.loads(
                            pod_data['container_status'])
                    except Exception as e:
                        self.logger.error(e)
                        continue

                    # aggregate container information
                    for container_name, container_data in container_status.items(
                    ):
                        containers[ns].setdefault(pod_base_name, dict())
                        containers[ns][pod_base_name].setdefault(
                            container_name, container_data)

                        for k, v in containers[ns][pod_base_name][
                                container_name].items():
                            if isinstance(v, int):
                                containers[ns][pod_base_name][container_name][
                                    k] += container_data[k]
                            elif k == 'status' and container_data[
                                    k].startswith('ERROR'):
                                containers[ns][pod_base_name][container_name][
                                    k] = container_data[k]

                for ns, d1 in containers.items():
                    for pod_base_name, d2 in d1.items():
                        for container_name, container_data in d2.items():
                            data_to_send += get_container_zabbix_metrics(
                                self.zabbix_host, ns, pod_base_name,
                                container_name, container_data)

                self.send_data_to_zabbix(resource, None, data_to_send)

    def resend_data(self, resource):

        with self.thread_lock:
            try:
                metrics = list()
                if resource not in self.data or len(
                        self.data[resource].objects) == 0:
                    self.logger.debug(
                        "no resource data available for %s , stop delivery" %
                        resource)
                    return

                # Zabbix
                for obj_uid, obj in self.data[resource].objects.items():
                    zabbix_send = False
                    if self.data['zabbix_discovery_sent'].get(
                            resource) is not None:
                        zabbix_send = True
                    elif obj.last_sent_zabbix < (datetime.now() - timedelta(
                            seconds=self.data_resend_interval)):
                        self.logger.debug(
                            "resend zabbix : %s  - %s/%s data because its outdated"
                            % (resource, obj.name_space, obj.name))
                        zabbix_send = True
                    if zabbix_send:
                        metrics += obj.get_zabbix_metrics()
                        obj.last_sent_zabbix = datetime.now()
                        obj.is_dirty_zabbix = False
                if len(metrics) > 0:
                    if self.data['zabbix_discovery_sent'].get(
                            resource) is None:
                        self.logger.debug(
                            'skipping resend_data zabbix , discovery for %s - %s/%s not sent yet!'
                            % (resource, obj.name_space, obj.name))
                    else:
                        self.send_data_to_zabbix(resource, metrics=metrics)

                # Web
                for obj_uid, obj in self.data[resource].objects.items():
                    if obj.is_dirty_web:
                        if obj.is_unsubmitted_web():
                            self.send_to_web_api(resource, obj, 'ADDED')
                        else:
                            self.send_to_web_api(resource, obj, 'MODIFIED')
                    else:
                        if obj.is_unsubmitted_web():
                            self.send_to_web_api(resource, obj, 'ADDED')
                        elif obj.last_sent_web < (datetime.now() - timedelta(
                                seconds=self.data_resend_interval)):
                            self.send_to_web_api(resource, obj, 'MODIFIED')
                            self.logger.debug(
                                "resend web : %s/%s data because its outdated"
                                % (resource, obj.name))
                    obj.last_sent_web = datetime.now()
                    obj.is_dirty_web = False
            except RuntimeError as e:
                self.logger.warning(str(e))

    def delete_object(self, resource_type, resourced_obj):
        # TODO: trigger zabbix discovery, srsly?
        self.send_to_web_api(resource_type, resourced_obj, "deleted")

    def send_zabbix_discovery(self, resource):
        # aggregate data and send to zabbix
        with self.thread_lock:
            if resource not in self.data:
                self.logger.warning(
                    'send_zabbix_discovery: resource "%s" not in self.data... skipping!'
                    % resource)
                return

            data = list()
            for obj_uid, obj in self.data[resource].objects.items():
                data += obj.get_zabbix_discovery_data()

            if data:
                metric = obj.get_discovery_for_zabbix(data)
                self.logger.debug('sending discovery for [%s]: %s' %
                                  (resource, metric))
                self.send_discovery_to_zabbix(resource, metric=[metric])

            self.data['zabbix_discovery_sent'][resource] = datetime.now()

    def send_object(self,
                    resource,
                    resourced_obj,
                    event_type,
                    send_zabbix_data=False,
                    send_web=False):
        # send single object for updates
        with self.thread_lock:
            if send_zabbix_data:
                if resourced_obj.last_sent_zabbix < datetime.now() - timedelta(
                        seconds=self.rate_limit_seconds):
                    self.send_data_to_zabbix(resource, obj=resourced_obj)
                    resourced_obj.last_sent_zabbix = datetime.now()
                    resourced_obj.is_dirty_zabbix = False
                else:
                    self.logger.debug(
                        'obj >>>type: %s, name: %s/%s<<< not sending to zabbix! rate limited (%is)'
                        % (resource, resourced_obj.name_space,
                           resourced_obj.name, self.rate_limit_seconds))
                    resourced_obj.is_dirty_zabbix = True

            if send_web:
                if resourced_obj.last_sent_web < datetime.now() - timedelta(
                        seconds=self.rate_limit_seconds):
                    self.send_to_web_api(resource, resourced_obj, event_type)
                    resourced_obj.last_sent_web = datetime.now()
                    if resourced_obj.is_dirty_web is True and not send_zabbix_data:
                        # only set dirty False if send_to_web_api worked
                        resourced_obj.is_dirty_web = False
                else:
                    self.logger.debug(
                        'obj >>>type: %s, name: %s/%s<<< not sending to web! rate limited (%is)'
                        % (resource, resourced_obj.name_space,
                           resourced_obj.name, self.rate_limit_seconds))
                    resourced_obj.is_dirty_web = True

    def send_heartbeat_info(self, *args):
        result = self.send_to_zabbix([
            ZabbixMetric(self.zabbix_host, 'check_kubernetesd[discover,api]',
                         int(time.time()))
        ])
        if result.failed > 0:
            self.logger.error("failed to send heartbeat to zabbix")
        else:
            self.logger.debug("successfully sent heartbeat to zabbix ")

    def send_to_zabbix(self, metrics):
        if self.zabbix_dry_run:
            result = DryResult()
            result.failed = 0
            if self.debug_k8s_events:
                self.logger.debug('===> Sending to zabbix: %s\n' % metrics)
        else:
            try:
                result = self.zabbix_sender.send(metrics)
            except Exception as e:
                self.logger.error(e)
                result = DryResult()
                result.failed = 1
                result.processed = 0
        return result

    def send_discovery_to_zabbix(self, resource, metric=None, obj=None):
        if resource not in self.zabbix_resources:
            return

        if obj:
            discovery_data = obj.get_discovery_for_zabbix()
            if not discovery_data:
                self.logger.debug(
                    'No discovery_data for obj %s, not sending to zabbix!' %
                    obj.uid)
                return

            discovery_key = 'check_kubernetesd[discover,' + resource + ']'
            result = self.send_to_zabbix([
                ZabbixMetric(self.zabbix_host, discovery_key, discovery_data)
            ])
            if result.failed > 0:
                self.logger.error(
                    "failed to sent zabbix discovery: %s : >>>%s<<<" %
                    (discovery_key, discovery_data))
            elif self.zabbix_debug:
                self.logger.info(
                    "successfully sent zabbix discovery: %s  >>>>%s<<<" %
                    (discovery_key, discovery_data))
        elif metric:
            result = self.send_to_zabbix(metric)

            if result.failed > 0:
                self.logger.error(
                    "failed to sent mass zabbix discovery: >>>%s<<<" % metric)
            elif self.zabbix_debug:
                self.logger.info(
                    "successfully sent mass zabbix discovery: >>>%s<<<" %
                    metric)
        else:
            self.logger.warning(
                'No obj or metrics found for send_discovery_to_zabbix [%s]' %
                resource)

    def send_data_to_zabbix(self, resource, obj=None, metrics=[]):
        if resource not in self.zabbix_resources:
            return

        if obj and len(metrics) == 0:
            metrics = obj.get_zabbix_metrics()

        if len(metrics) == 0 and obj:
            self.logger.debug('No zabbix metrics to send for %s: %s' %
                              (obj.uid, metrics))
            return
        elif len(metrics) == 0:
            self.logger.debug('No zabbix metrics or no obj found for [%s]' %
                              resource)
            return

        if self.zabbix_single_debug:
            for metric in metrics:
                result = self.send_to_zabbix([metric])
                if result.failed > 0:
                    self.logger.error("failed to sent zabbix items: %s",
                                      metric)
                else:
                    self.logger.info("successfully sent zabbix items: %s",
                                     metric)
        else:
            result = self.send_to_zabbix(metrics)
            if result.failed > 0:
                self.logger.error(
                    "failed to sent %s zabbix items, processed %s items [%s: %s]"
                    % (result.failed, result.processed, resource,
                       obj.name if obj else 'metrics'))
                self.logger.debug(metrics)
            else:
                self.logger.debug(
                    "successfully sent %s zabbix items [%s: %s]" %
                    (len(metrics), resource, obj.name if obj else 'metrics'))

    def send_to_web_api(self, resource, obj, action):
        if resource not in self.web_api_resources:
            return

        if self.web_api_enable:
            api = self.get_web_api()
            data_to_send = obj.resource_data
            data_to_send['cluster'] = self.web_api_cluster

            api.send_data(resource, data_to_send, action)
        else:
            self.logger.debug("suppressing submission of %s %s/%s" %
                              (resource, obj.name_space, obj.name))
Example #30
0
class ZabbixSenderService(cotyledon.Service):
    '''listen for the Zabbix_Sender Service'''
    def __init__(self,worker_id):
        self.time_end = timeutils.isotime(datetime.datetime.utcnow())
        self.time_start = timeutils.isotime(datetime.datetime.utcnow() - datetime.timedelta(days=1))
        self.user = cfg.CONF.zabbix.username
        self.password = cfg.CONF.zabbix.keystone_pwd
        self.sender = ZabbixSender(cfg.CONF.zabbix.agent_server)
        super(ZabbixSenderService, self).__init__(worker_id)

    def get_token(self):
        auth_url = 'http://controller:35357/v3/auth/tokens/'
        body = {
            "auth": {
                "identity": {
                    "methods": [
                        "password"
                    ],
                    "password": {
                        "user": {
                            "domain": {
                                "name": "Default"
                            },
                            "name": self.user,
                            "password": self.password,
                        }
                    }
                },
                "scope": {
                    "project": {
                        "domain": {
                            "name": "Default"
                        },
                        "name": "admin"
                    }
                }
            }
        }
        result = requests.post(url=auth_url, data=json.dumps(body))
        if result.status_code != 201:
            LOG.warning('response code is %s' % result.status_code)
            raise Exception('request failed in getting token.')
        else:
            return result.headers['X-Subject-Token']

    def get_sum_memory_and_disk(self):
        token = self.get_token()
        url = ('http://controller:8777/'
                'v2/instancestates/cluster_usage?&timeStart=%s&timeEnd=%s&detail=true'
                % (self.time_start, self.time_end))
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36',
            'Accept': '*/*',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'X-Auth-Token': token,
            }
        res = requests.get(url=url, headers=headers)
        data_dic = defaultdict(lambda: 0)
        for item in res.json():
            data_dic['memory_sum'] += item['memory']['sum']
            data_dic['memory_usage_sum'] += item['memory']['usage']
            data_dic['disk_sum'] += item['disk']['sum']
            data_dic['disk_usage_sum'] += item['disk']['usage']
            data_dic['vm_num_sum'] += item['vm_num']
            data_dic['cpu_sum'] += item['cpu']['sum']
            data_dic['cpu_usage_sum'] += item['cpu']['usage']
        return data_dic

    def send_data_to_zabbix(self):
        data_sum = self.get_sum_memory_and_disk()
        packet = []
        for name, value in data_sum.items():
            packet.append(ZabbixMetric(cfg.CONF.zabbix.host_name,
                                       name,
                                       value))
        response_info = self.sender.send(packet)
        LOG.info('Response from zabbix_server:%s' % response_info)

    def start_task(self):
        LOG.info('Start the polling task of zabbix_sender, interval is %ss' % cfg.CONF.zabbix.interval)
        polling_periodics = periodics.PeriodicWorker.create(
           [], executor_factory=lambda:
           futures.ThreadPoolExecutor(max_workers=1))

        @periodics.periodic(spacing=cfg.CONF.zabbix.interval, run_immediately=True)
        def task(to_run_task):
            to_run_task()
        polling_periodics.add(task, self.send_data_to_zabbix)
        utils.spawn_thread(polling_periodics.start, allow_empty=True)

    def run(self):
        self.start_task()

    def terminate(self):
        super(ZabbixSenderService,self).terminate()
Example #31
0
def send_zabbix_trap(oms_event):
    config = get_config()
    zabbix = config['zabbix']
    # Все поля трапа OEM, которые мы будем передавать, получены из MIBа omstrap.v1
    trap_parameters = config['trap_parameters']
    # Собираем Zabbix трап
    # Чтобы не было одновременной отправки нескольких сообщений
    # Добавляем функционал файла блокировок таким образом, чтобы
    # все наши процессы по отправке заббикс трапов шли по очереди
    # Нужно запомнить ИД процесса
    pid = os.getpid()

    # Разбираемся с лок-файлом
    # Лок-файл лежит в папке .secure
    lock_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             os.pardir, '.secure', '.lock')
    if os.path.isfile(lock_file):
        # Если такой файл есть, дописываем в него ИД процесса
        with open(lock_file, 'a+') as lock:
            lock.write(str(pid) + '\n')
    else:
        # Если нет - создаем и записываем
        with open(lock_file, 'w+') as lock:
            lock.write(str(pid) + '\n')

    logging.info('Sent PID %d to lock file' % pid)

    # Собираем переменные трапа
    trap_variables = dict()

    for trap_variable in trap_parameters:
        if trap_variable in oms_event:
            trap_variables.update({trap_variable: oms_event[trap_variable]})

    # Формируем метрику
    try:
        # В качестве метрики берем тот же набор параметров,
        # что и для SNMP трапа, но сваливаем его в json
        # и в таком виде отправляем в Заббикс
        m = ZabbixMetric(oms_event['oraEMNGEventHostName'], 'data',
                         json.dumps(trap_variables, indent=3, sort_keys=True))
        zbx = ZabbixSender(zabbix['host'])

        # Проверяем, что наша очередь работать
        # Для этого ИД нашего процесса должен стоять первым в списке
        processes = list()
        counter = 0
        with open(lock_file, 'r') as lock:
            for line in lock:
                if line.replace('\n', '').strip() != '' and psutil.pid_exists(
                        int(line.replace('\n', '').strip())):
                    processes.append(line.replace('\n', '').strip())

        # Если не первый - ждем своей очереди
        if processes[0] != str(pid):
            logging.info('First PID is %s. It\'s not equal ours, sleeping' %
                         processes[0])
            logging.info('Process queue is [%s]. ' % ', '.join(processes))

        while processes[0] != str(pid) and counter < 5:
            # Ждем 1 секунду
            # Потому, что Заббикс не может разделить два пришедших события
            # если у них совпадает метка времени
            # А метка времени у него берется с точностью до секунды
            time.sleep(1)
            # Но не более 5 раз
            counter += 1
            processes = list()
            with open(lock_file, 'r') as lock:
                for line in lock:
                    if line.replace('\n',
                                    '').strip() != '' and psutil.pid_exists(
                                        int(line.replace('\n', '').strip())):
                        processes.append(line.replace('\n', '').strip())
            logging.info('Process queue is [%s]. ' % ', '.join(processes))

        # Наша очередь, поехали
        if counter == 5:
            logging.info('Enough waiting, running')
        else:
            logging.info('First PID is ours, running')

        # Отправляем
        response = zbx.send([m])

        # Проверяем ответ
        # Наша отправка не должна зафейлиться, но должна быть обработана
        if response is not None:
            if response.failed == 1:
                oms_event.update({
                    'TrapState':
                    oms_event['TrapState'] + ', exception zabbix'
                })
            elif response.processed == 1:
                oms_event.update(
                    {'TrapState': oms_event['TrapState'] + ', send zabbix'})
    except Exception as e:
        log_event(oms_event_to_log=oms_event)
        raise e
    finally:
        # В конце концов, поработал - прибери за собой
        # Удаляем из лок-файла свой ИД
        # По логике, он должен быть первым в файле, но чем черт не шутит
        # Поэтому считываем весь файл, а потом перезаписываем его всем его содержимым кроме строки с нашим ИД
        processes = list()
        with open(lock_file, 'r') as lock:
            for line in lock:
                if line.replace('\n', '').strip() != '' and psutil.pid_exists(
                        int(line.replace('\n', '').strip())):
                    processes.append(line.replace('\n', '').strip())

        with open(lock_file, 'w') as lock:
            for line in processes:
                if line != str(pid):
                    lock.write(line + '\n')

        processes.remove(str(pid))

        logging.info('Final process queue is [%s]. ' % ', '.join(processes))

        if os.path.getsize(lock_file) == 0:
            os.remove(lock_file)
Example #32
0
def zbx_enviar(host,chave,valor):
    metrics = []
    m = ZabbixMetric(host,chave,valor)
    metrics.append(m)
    zbx = ZabbixSender('s-verona')
    zbx.send(metrics)
Example #33
0
        for i in range(len(GPU_LIST)):
            GPU_HR[i] = int(GPU_LIST[i]) * 1000
            packet.append(
                ZabbixMetric('fs1.zmrn.ru', f'GPU_{i}_claymor_hashrate',
                             int(GPU_LIST[i]) * 1000))
        packet.append(
            ZabbixMetric('fs1.zmrn.ru', 'Total_power_usage_claymor_miner',
                         int(stat2[17])))

        for i in range(len(GPU_POWER_FAN)):
            GPU_NUM = int(i / 2)
            if GPU_NUM * 2 == i:
                packet.append(
                    ZabbixMetric('fs1.zmrn.ru',
                                 f'GPU_{GPU_NUM}_claymor_FanSpeed',
                                 GPU_POWER_FAN[i]))
            else:
                packet.append(
                    ZabbixMetric('fs1.zmrn.ru', f'GPU_{GPU_NUM}_claymor_Temp',
                                 GPU_POWER_FAN[i]))

        packet.append(
            ZabbixMetric('fs1.zmrn.ru', 'claymor_active_mining_pool',
                         stat1[7]))
        zbx = ZabbixSender('192.168.1.101')
        zbx.send(packet)
    except (ConnectionRefusedError, OSError):
        packet.append(ZabbixMetric('fs1.zmrn.ru', 'claymor_live', 0))
        zbx = ZabbixSender('192.168.1.101')
        zbx.send(packet)
Example #34
0
def process_event(helper, *args, **kwargs):
    """
    # IMPORTANT
    # Do not remove the anchor macro:start and macro:end lines.
    # These lines are used to generate sample code. If they are
    # removed, the sample code will not be updated when configurations
    # are updated.

    [sample_code_macro:start]

    # The following example gets the setup parameters and prints them to the log
    zabbix_server_url_ip = helper.get_global_setting("zabbix_server_url_ip")
    helper.log_info("zabbix_server_url_ip={}".format(zabbix_server_url_ip))
    zabbix_server_port = helper.get_global_setting("zabbix_server_port")
    helper.log_info("zabbix_server_port={}".format(zabbix_server_port))

    # The following example gets the alert action parameters and prints them to the log
    zabbix_host = helper.get_param("zabbix_host")
    helper.log_info("zabbix_host={}".format(zabbix_host))

    zabbix_port = helper.get_param("zabbix_port")
    helper.log_info("zabbix_port={}".format(zabbix_port))


    # The following example adds two sample events ("hello", "world")
    # and writes them to Splunk
    # NOTE: Call helper.writeevents() only once after all events
    # have been added
    helper.addevent("hello", sourcetype="sample_sourcetype")
    helper.addevent("world", sourcetype="sample_sourcetype")
    helper.writeevents(index="summary", host="localhost", source="localhost")

    # The following example gets the events that trigger the alert
    events = helper.get_events()
    for event in events:
        helper.log_info("event={}".format(event))

    # helper.settings is a dict that includes environment configuration
    # Example usage: helper.settings["server_uri"]
    helper.log_info("server_uri={}".format(helper.settings["server_uri"]))
    [sample_code_macro:end]
    """

    helper.log_info("Alert action splunk_to_zabbix started.")

    zabbix_server_url_ip = helper.get_global_setting("zabbix_server_url_ip")
    helper.log_debug("zabbix_server_url_ip={}".format(zabbix_server_url_ip))
    zabbix_server_port = helper.get_global_setting("zabbix_server_port")
    helper.log_debug("zabbix_server_port={}".format(zabbix_server_port))

    # The following example gets the alert action parameters and prints them to the log
    zabbix_host = helper.get_param("zabbix_host")
    helper.log_debug("zabbix_host={}".format(zabbix_host))

    zabbix_port = helper.get_param("zabbix_port")
    helper.log_debug("zabbix_port={}".format(zabbix_port))

    final_zabbix_host = zabbix_server_url_ip
    final_zabbix_port = zabbix_server_port

    if (zabbix_host != ""):
        final_zabbix_host = zabbix_host

    if (zabbix_port != ""):
        final_zabbix_port = zabbix_port

    metrics = []

    events = helper.get_events()
    for event in events:
        if event.get("zabbix_key") != None:
            #helper.log_info("event={}".format(event))
            for key in event.keys():
                if key != "zabbix_key" and not key.startswith(
                        "__mv_") and key != "rid" and key != "time":
                    if event.get("time") != None:
                        m = ZabbixMetric(event.get('zabbix_key'), key,
                                         event.get(key),
                                         int(event.get("time")))
                    else:
                        m = ZabbixMetric(event.get('zabbix_key'), key,
                                         event.get(key))
                    metrics.append(m)
        else:
            helper.log_info(
                "\"{}\" is not containing zabbix_key value".format(event))
    helper.log_debug(metrics)
    zbx = ZabbixSender(final_zabbix_host, int(final_zabbix_port))
    helper.log_debug(zbx)
    zabbix_response = zbx.send(metrics)
    helper.log_info("Zabbix Response " + str(zabbix_response))

    # TODO: Implement your alert action logic here
    return 0

    # TODO: Implement your alert action logic here
    return 0
Example #35
0
class ZabbixSink(BaseThreadedModule):
    """
    Send events to zabbix.

    hostname: Hostname for which the metrics should be stored.
    fields: Event fields to send.
    field_prefix: Prefix to prepend to field names. For e.g. cpu_count field with default lumbermill_ prefix, the Zabbix key is lumbermill_cpu_count.
    timestamp_field: Field to provide timestamp. If not provided, current timestamp is used.
    agent_conf: Path to zabbix_agent configuration file. If set to True defaults to /etc/zabbix/zabbix_agentd.conf.
    server: Address of zabbix server. If port differs from default it can be set by appending it, e.g. 127.0.0.1:10052.
    store_interval_in_secs: sending data to es in x seconds intervals.
    batch_size: sending data to es if event count is above, even if store_interval_in_secs is not reached.
    backlog_size: maximum count of events waiting for transmission. Events above count will be dropped.

    Configuration template:

    - ZabbixSink:
       hostname:                        # <type: string; is: required>
       fields:                          # <type: list; is: required>
       field_prefix:                    # <default: "lumbermill_"; type: string; is: optional>
       timestamp_field:                 # <default: "timestamp"; type: string; is: optional>
       agent_conf:                      # <default: True; type: boolean||string; is: optional>
       server:                          # <default: False; type: boolean||string; is: required if agent_conf is False else optional>
       store_interval_in_secs:          # <default: 10; type: integer; is: optional>
       batch_size:                      # <default: 500; type: integer; is: optional>
       backlog_size:                    # <default: 500; type: integer; is: optional>
    """

    module_type = "output"
    """Set module type"""

    def configure(self, configuration):
        BaseThreadedModule.configure(self, configuration)
        self.hostname = self.getConfigurationValue("hostname")
        self.fields = self.getConfigurationValue("fields")
        self.field_prefix = self.getConfigurationValue("field_prefix")
        self.timestamp_field = self.getConfigurationValue("timestamp_field")
        self.batch_size = self.getConfigurationValue('batch_size')
        self.backlog_size = self.getConfigurationValue('backlog_size')
        self.agent_conf = self.getConfigurationValue("agent_conf")
        if self.agent_conf:
            if self.agent_conf is True:
                self.agent_conf = "/etc/zabbix/zabbix_agentd.conf"
            if not os.path.isfile(self.agent_conf):
                self.logger.error("%s does not point to an existing file." % self.agent_conf)
                self.lumbermill.shutDown()
            self.zabbix_sender = ZabbixSender(use_config=self.agent_conf)

        else:
            self.logger.error("asdads")
            server = self.getConfigurationValue("server")
            port = 10051
            if ":" in self.server:
                server, port = self.server.split(":")
            self.zabbix_sender = ZabbixSender(zabbix_server=server, port=port)
        self.buffer = Buffer(self.getConfigurationValue('batch_size'), self.storeData,
                             self.getConfigurationValue('store_interval_in_secs'),
                             maxsize=self.getConfigurationValue('backlog_size'))

    def getStartMessage(self):
        if self.agent_conf:
            return "Config: %s. Max buffer size: %d" % (self.agent_conf, self.getConfigurationValue('backlog_size'))
        else:
            return "Server: %s. Max buffer size: %d" % (self.getConfigurationValue("server"), self.getConfigurationValue('backlog_size'))

    def initAfterFork(self):
        BaseThreadedModule.initAfterFork(self)
        self.buffer = Buffer(self.getConfigurationValue('batch_size'), self.storeData,
                             self.getConfigurationValue('store_interval_in_secs'),
                             maxsize=self.getConfigurationValue('backlog_size'))

    def handleEvent(self, event):
        self.buffer.append(event)
        yield None

    def storeData(self, events):
        packet = []
        for event in events:
            if self.timestamp_field:
                try:
                    timestamp = event[self.timestamp_field]
                except KeyError:
                    timestamp = None
            hostname = mapDynamicValue(self.hostname, mapping_dict=event, use_strftime=True)
            for field_name in self.fields:
                try:
                    packet.append(ZabbixMetric(hostname, "%s%s" % (self.field_prefix, field_name), event[field_name], timestamp))
                except KeyError:
                    pass
                    #self.logger.warning("Could not send metrics for %s:%s. Field not found." % (hostname, field_name))
        response = self.zabbix_sender.send(packet)
        if response.failed != 0:
            self.logger.warning("%d of %d metrics were not processed correctly." % (response.total-response.processed, response.total))

    def shutDown(self):
        self.buffer.flush()
Example #36
0
        f.close()

        df = pd.read_json(json.dumps(timings))

        df2 = df.loc[:, ['name', 'duration']]

        df2 = df2.sort_values('duration', ascending=0)

        df2.index.names = ['Index']

        df2.to_csv('/home/inmetrics/getsitetim/csv/' + tc + ".csv")

        #print(df2)

result_str = tc + "," + str(elapsed) + "," + str(success) + "\n"

with open("/home/inmetrics/getsitetim/results.csv", "a") as myfile:

    myfile.write(result_str)

packet = [
    ZabbixMetric('Site TIM Selenium', 'response_selenium', elapsed),
    ZabbixMetric('Site TIM Selenium', 'success_selenium', success)
]

sender = ZabbixSender(
    use_config='/home/inmetrics/getsitetim/zabbix_agentd.conf')

sender.send(packet)
Example #37
0
def main():
    """main I guess"""
    # Setup logging
    logger = setup_logging(__name__)

    custom_wrapper = functools.partial(PyZabbixPSKSocketWrapper,
                                       identity=PSK_IDENTITY,
                                       psk=bytes(bytearray.fromhex(PSK)))

    zabbix_sender = ZabbixSender(zabbix_server=ZABBIX_SERVER,
                                 socket_wrapper=custom_wrapper)

    host_list = get_hosts()

    all_discovered_domains = []
    all_discovered_vnics = []
    all_discovered_vdisks = []
    combined_metrics = []

    for host in host_list:

        logger.info("Starting to process host: %s", host)
        uri = "qemu+ssh://root@" + host + "/system?keyfile=" + KEY_FILE

        try:
            zbxlibvirt = ZabbixLibvirt(uri)
        except LibvirtConnectionError as err:
            logger.warning(err)
            continue
        except Exception as err:
            logger.exception(err)
            raise

        try:
            all_discovered_domains += zbxlibvirt.discover_domains()
            all_discovered_vnics += zbxlibvirt.discover_all_vnics()
            all_discovered_vdisks += zbxlibvirt.discover_all_vdisks()
            combined_metrics.extend(zbxlibvirt.all_metrics())
        except DomainNotFoundError as err:
            # FIXME: Catching domain not found error here and then continuing the loop
            # here causes us to skip over all other domains on that host.
            # We should catch this in the other module where we are processing each domain.
            logger.warning(err)
            continue
        except Exception as err:
            logger.exception(err)
            raise

    logger.info("Sending packet")
    zabbix_sender.send([
        ZabbixMetric(HOST_IN_ZABBIX, DOMAIN_KEY,
                     json.dumps({"data": all_discovered_domains}))
    ])
    zabbix_sender.send([
        ZabbixMetric(HOST_IN_ZABBIX, VNICS_KEY,
                     json.dumps({"data": all_discovered_vnics}))
    ])
    zabbix_sender.send([
        ZabbixMetric(HOST_IN_ZABBIX, VDISKS_KEY,
                     json.dumps({"data": all_discovered_vdisks}))
    ])
    zabbix_sender.send(combined_metrics)