Esempio n. 1
0
 def __init__(self, config):
     super(LogSender, self).__init__(config)
     self.metric_log = config.fetch('metric_log', 'directory')
     if self.metric_log is None:
         self._enabled = False
     elif not self.config.fetch('metric_log', 'enabled', bool):
         self._enabled = False
     self._metric_log_fds = {}
     self.queue = Queue()
     self.max_queue_size = config.fetch('sender', 'queue', int)
     self.max_size = config.fetch('metric_log', 'max_size_mb', int)
     self.max_size = self.max_size * 1024 * 1024
     self._check_size_counter = 0
Esempio n. 2
0
 def __init__(self, config):
     super(ZbxSender, self).__init__(config)
     self.host = config.fetch('zabbix', 'address')
     if self.host is None:
         self._enabled = False
     elif not config.fetch('zabbix', 'enabled', bool):
         self._enabled = False
     self.port = config.fetch('zabbix', 'port', int)
     self.max_queue_size = config.fetch('sender', 'queue', int)
     self.fqdn = config.fetch('zabbix', 'client')
     self.queue = Queue()
     self.log = logging.getLogger('ZBX-{0}:{1}'.format(
         self.host, self.port))
Esempio n. 3
0
File: zbx.py Progetto: vadv/mamonsu
 def __init__(self, config):
     super(Zbx, self).__init__(config)
     self.host = config.fetch('zabbix', 'address')
     self.port = config.fetch('zabbix', 'port', int)
     self.max_queue_size = config.fetch('sender', 'queue', int)
     self.fqdn = config.fetch('zabbix', 'client')
     self.queue = Queue()
     self.log = logging.getLogger(
         'ZBX-{0}:{1}'.format(self.host, self.port))
Esempio n. 4
0
File: zbx.py Progetto: gsmol/mamonsu
 def __init__(self, config):
     super(ZbxSender, self).__init__(config)
     self.host = config.fetch('zabbix', 'address')
     if self.host is None:
         self._enabled = False
     elif not config.fetch('zabbix', 'enabled', bool):
         self._enabled = False
     self.port = config.fetch('zabbix', 'port', int)
     self.max_queue_size = config.fetch('sender', 'queue', int)
     self.fqdn = config.fetch('zabbix', 'client')
     self.queue = Queue()
     self.log = logging.getLogger(
         'ZBX-{0}:{1}'.format(self.host, self.port))
Esempio n. 5
0
File: log.py Progetto: edib/mamonsu
 def __init__(self, config):
     super(LogSender, self).__init__(config)
     self.metric_log = config.fetch('metric_log', 'directory')
     if self.metric_log is None:
         self._enabled = False
     elif not self.config.fetch('metric_log', 'enabled', bool):
         self._enabled = False
     self._metric_log_fds = {}
     self.queue = Queue()
     self.max_queue_size = config.fetch('sender', 'queue', int)
     self.max_size = config.fetch('metric_log', 'max_size_mb', int)
     self.max_size = self.max_size * 1024 * 1024
     self._check_size_counter = 0
Esempio n. 6
0
File: zbx.py Progetto: gsmol/mamonsu
class ZbxSender(Plugin):

    Interval = 2
    _sender = True

    def __init__(self, config):
        super(ZbxSender, self).__init__(config)
        self.host = config.fetch('zabbix', 'address')
        if self.host is None:
            self._enabled = False
        elif not config.fetch('zabbix', 'enabled', bool):
            self._enabled = False
        self.port = config.fetch('zabbix', 'port', int)
        self.max_queue_size = config.fetch('sender', 'queue', int)
        self.fqdn = config.fetch('zabbix', 'client')
        self.queue = Queue()
        self.log = logging.getLogger(
            'ZBX-{0}:{1}'.format(self.host, self.port))

    def send(self, key, value, host=None, clock=None):
        if host is None:
            host = self.fqdn
        if clock is None:
            clock = int(time.time())
        metric = {
            'host': host, 'key': key,
            'value': str(value), 'clock': clock}
        self._send(metric)

    def _send(self, metric):
        if self.queue.size() > self.max_queue_size:
            self.log.error('Queue size over limit, replace last metric')
            self.queue.replace(metric)
        else:
            self.queue.add(metric)

    def run(self, zbx):
        self._flush()

    def _flush(self):
        metrics = self.queue.flush()
        if len(metrics) == 0:
            return
        data = json.dumps({
            'request': 'sender data',
            'data': metrics,
            'clock': int(time.time())
        })
        self._send_data(data)

    def _send_data(self, data):
        data_len = struct.pack('<Q', len(data))
        if platform.PY3:
            packet = b'ZBXD\x01' + data_len + str.encode(data)
        else:
            packet = 'ZBXD\x01' + data_len + data
        try:
            sock = socket.socket()
            sock.connect((self.host, self.port))
            self.log.debug('request: {0}'.format(data))
            sock.sendall(packet)
            resp_header = self._receive(sock, 13)
            resp_body_len = struct.unpack('<Q', resp_header[5:])[0]
            resp_body = self._receive(sock, resp_body_len)
            self.log.debug('response: {0}'.format(resp_body))
            if 'failed: 0' not in str(resp_body):
                self.log.error(
                    'On request:\n{0}\nget response'
                    ' with failed items:\n{1}'.format(
                        data,
                        resp_body))
        finally:
            sock.close()

    def _receive(self, sock, count):
        buf = str.encode('')
        while len(buf) < count:
            chunk = sock.recv(count - len(buf))
            if not chunk:
                break
            buf += chunk
        return buf
Esempio n. 7
0
class ZbxSender(Plugin):

    Interval = 10
    _sender = True

    def __init__(self, config):
        super(ZbxSender, self).__init__(config)
        self.host = config.fetch('zabbix', 'address')
        if self.host is None:
            self._enabled = False
        elif not config.fetch('zabbix', 'enabled', bool):
            self._enabled = False
        self.port = config.fetch('zabbix', 'port', int)
        self.max_queue_size = config.fetch('sender', 'queue', int)
        self.fqdn = config.fetch('zabbix', 'client')
        self.queue = Queue()
        self.log = logging.getLogger('ZBX-{0}:{1}'.format(
            self.host, self.port))

    def send(self, key, value, host=None, clock=None):
        if host is None:
            host = self.fqdn
        if clock is None:
            clock = int(time.time())
        metric = {
            'host': host,
            'key': key,
            'value': str(value),
            'clock': clock
        }
        self._send(metric)

    def _send(self, metric):
        if self.queue.size() > self.max_queue_size:
            self.log.error('Queue size over limit, replace last metric')
            self.queue.replace(metric)
        else:
            self.queue.add(metric)

    def run(self, zbx):
        self._flush()

    def _flush(self):
        metrics = self.queue.flush()
        if len(metrics) == 0:
            return
        data = json.dumps({
            'request': 'sender data',
            'data': metrics,
            'clock': int(time.time())
        })
        self._send_data(data)

    def _send_data(self, data):
        data_len = struct.pack('<Q', len(data))
        if platform.PY3:
            packet = b'ZBXD\x01' + data_len + str.encode(data)
        else:
            packet = 'ZBXD\x01' + data_len + data
        try:
            sock = socket.socket()
            sock.connect((self.host, self.port))
            self.log.debug('request: {0}'.format(data))
            sock.sendall(packet)
            resp_header = self._receive(sock, 13)
            resp_body_len = struct.unpack('<Q', resp_header[5:])[0]
            resp_body = self._receive(sock, resp_body_len)
            self.log.debug('response: {0}'.format(resp_body))
            if 'failed: 0' not in str(resp_body):
                self.log.error('On request:\n{0}\nget response'
                               ' with failed items:\n{1}'.format(
                                   data, resp_body))
        finally:
            sock.close()

    def _receive(self, sock, count):
        buf = str.encode('')
        while len(buf) < count:
            chunk = sock.recv(count - len(buf))
            if not chunk:
                break
            buf += chunk
        return buf
Esempio n. 8
0
class ZbxSender(Plugin):
    Interval = 10
    _sender = True

    def __init__(self, config):
        super(ZbxSender, self).__init__(config)
        self.host = config.fetch('zabbix', 'address')
        if self.host is None:
            self._enabled = False
        elif not config.fetch('zabbix', 'enabled', bool):
            self._enabled = False
        self.port = config.fetch('zabbix', 'port', int)
        self.max_queue_size = config.fetch('sender', 'queue', int)
        self.fqdn = config.fetch('zabbix', 'client')
        self.queue = Queue()
        self.log = logging.getLogger('ZBX-{0}:{1}'.format(
            self.host, self.port))

    def send(self, key, value, host=None, clock=None):
        if host is None:
            host = self.fqdn
        if clock is None:
            clock = int(time.time())
        metric = {
            'host': host,
            'key': key,
            'value': str(value),
            'clock': clock
        }
        self._send(metric)

    def _send(self, metric):
        if self.queue.size() > self.max_queue_size:
            self.log.error('Queue size over limit, replace last metric')
            self.queue.replace(metric)
        else:
            self.queue.add(metric)

    def run(self, zbx):
        self._flush()

    def _flush(self):
        metrics = self.queue.flush()
        if len(metrics) == 0:
            return
        data = json.dumps({
            'request': 'sender data',
            'data': metrics,
            'clock': int(time.time())
        })
        self._send_data(data)

    def send_file_to_zabbix(self, path):
        zabbix_client = self.config.fetch('zabbix', 'client')
        self.log.setLevel((self.config.fetch('log', 'level')).upper())

        metrics = []
        with open(path, 'r') as f:
            while True:
                lines = list(islice(f, 100))
                for line in lines:
                    try:
                        split_line = line.rstrip('\n').split('\t')
                        if len(split_line) == 3:
                            metric = {
                                'host': zabbix_client,
                                'key': split_line[2],
                                'value': split_line[1],
                                'clock': int(split_line[0])
                            }
                            metrics.append(metric)
                        else:
                            self.log.error(
                                'Can\'t load metric in line: "{0}". The line must have the format: time <tab> value <tab> metric\'s name.'
                                .format(line.rstrip('\n')))
                    except Exception as e:
                        self.log.error(
                            'Can\'t load metric in line: "{0}". Error : {1} '.
                            format(
                                line.rstrip('\n'),
                                e,
                            ))

                data = json.dumps({
                    'request': 'sender data',
                    'data': metrics,
                    'clock': int(time.time())
                })
                self._send_data(data)
                self.log.info('sended {0} metrics'.format(str(len(metrics))))
                metrics = []
                if not lines:
                    break

    def _send_data(self, data):
        data_len = struct.pack('<Q', len(data))
        if platform.PY3:
            packet = b'ZBXD\x01' + data_len + str.encode(data)
        else:
            packet = 'ZBXD\x01' + data_len + data
        try:
            sock = socket.socket()
            sock.connect((self.host, self.port))
            self.log.debug('request: {0}'.format(data))
            sock.sendall(packet)
            resp_header = self._receive(sock, 13)
            resp_body_len = struct.unpack('<Q', resp_header[5:])[0]
            resp_body = self._receive(sock, resp_body_len)
            self.log.debug('response: {0}'.format(resp_body))
            if 'failed: 0' not in str(resp_body):
                self.log.error('On request:\n{0}\nget response'
                               ' with failed items:\n{1}'.format(
                                   data, resp_body))
        finally:
            sock.close()

    def _receive(self, sock, count):
        buf = str.encode('')
        while len(buf) < count:
            chunk = sock.recv(count - len(buf))
            if not chunk:
                break
            buf += chunk
        return buf
Esempio n. 9
0
class LogSender(Plugin):

    Interval = 2
    _sender = True

    def __init__(self, config):
        super(LogSender, self).__init__(config)
        self.metric_log = config.fetch('metric_log', 'directory')
        if self.metric_log is None:
            self._enabled = False
        elif not self.config.fetch('metric_log', 'enabled', bool):
            self._enabled = False
        self._metric_log_fds = {}
        self.queue = Queue()
        self.max_queue_size = config.fetch('sender', 'queue', int)
        self.max_size = config.fetch('metric_log', 'max_size_mb', int)
        self.max_size = self.max_size * 1024 * 1024
        self._check_size_counter = 0

    def run(self, zbx):
        self._flush()

    def send(self, key, value, host=None, clock=None):
        metric = (key, value, host, clock)
        if self.queue.size() > self.max_queue_size:
            self.log.error('Queue size over limit, replace last metrics')
            self.queue.replace(metric)
        else:
            self.queue.add(metric)

    def _flush(self):
        metrics = self.queue.flush()
        if len(metrics) == 0:
            return
        for metric in metrics:
            self._write(metric)

    def _write(self, metric):

        key, value = metric[0], metric[1]
        host, clock = metric[2], metric[3]

        if not os.path.isdir(self.metric_log):
            try:
                os.makedirs(self.metric_log)
            except Exception as e:
                self.log.error('Create directory error: {0}'.format(e))
                sys.exit(7)

        if host is None:
            host = 'localhost'
        metric_log = os.path.join(self.metric_log, '{0}.log'.format(host))

        # rotate if reached max size limit
        if self._check_size_counter > 2:
            size = os.path.getsize(metric_log)
            if size > self.max_size:
                # rotate to file <METRIC_LOG>.archive
                backup_file = '{0}.archive'.format(metric_log)
                self.log.info(
                    'Move file {0} to {1} (max size limit reached: {2} b)'.
                    format(metric_log, backup_file, size))
                # close descriptor
                if host in self._metric_log_fds:
                    self._metric_log_fds[host].close()
                    del self._metric_log_fds[host]
                # rename
                os.rename(metric_log, backup_file)
            self._check_size_counter = 0
        self._check_size_counter += 1

        if host not in self._metric_log_fds:
            self._metric_log_fds[host] = open(metric_log, 'a')

        try:
            self._metric_log_fds[host].write("{0}\t{1}\t{2}\n".format(
                clock, value, key))
            self._metric_log_fds[host].flush()
        except Exception as e:
            self._metric_log_fds[host].close()
            del self._metric_log_fds[host]
            self.log.error('Write metric error: {0}'.format(e))
Esempio n. 10
0
File: log.py Progetto: edib/mamonsu
class LogSender(Plugin):

    Interval = 2
    _sender = True

    def __init__(self, config):
        super(LogSender, self).__init__(config)
        self.metric_log = config.fetch('metric_log', 'directory')
        if self.metric_log is None:
            self._enabled = False
        elif not self.config.fetch('metric_log', 'enabled', bool):
            self._enabled = False
        self._metric_log_fds = {}
        self.queue = Queue()
        self.max_queue_size = config.fetch('sender', 'queue', int)
        self.max_size = config.fetch('metric_log', 'max_size_mb', int)
        self.max_size = self.max_size * 1024 * 1024
        self._check_size_counter = 0

    def run(self, zbx):
        self._flush()

    def send(self, key, value, host=None, clock=None):
        metric = (key, value, host, clock)
        if self.queue.size() > self.max_queue_size:
            self.log.error('Queue size over limit, replace last metrics')
            self.queue.replace(metric)
        else:
            self.queue.add(metric)

    def _flush(self):
        metrics = self.queue.flush()
        if len(metrics) == 0:
            return
        for metric in metrics:
            self._write(metric)

    def _write(self, metric):

        key, value = metric[0], metric[1]
        host, clock = metric[2], metric[3]

        if not os.path.isdir(self.metric_log):
            try:
                os.makedirs(self.metric_log)
            except Exception as e:
                self.log.error('Create directory error: {0}'.format(e))
                sys.exit(7)

        if host is None:
            host = 'localhost'
        metric_log = os.path.join(
                self.metric_log, '{0}.log'.format(host))

        # rotate if reached max size limit
        if self._check_size_counter > 2:
            size = os.path.getsize(metric_log)
            if size > self.max_size:
                # rotate to file <METRIC_LOG>.archive
                backup_file = '{0}.archive'.format(metric_log)
                self.log.info(
                    'Move file {0} to {1} (max size limit reached: {2} b)'.
                    format(metric_log, backup_file, size))
                # close descriptor
                if host in self._metric_log_fds:
                    self._metric_log_fds[host].close()
                    del self._metric_log_fds[host]
                # rename
                os.rename(metric_log, backup_file)
            self._check_size_counter = 0
        self._check_size_counter += 1

        if host not in self._metric_log_fds:
            self._metric_log_fds[host] = open(metric_log, 'a')

        try:
            self._metric_log_fds[host].write("{0}\t{1}\t{2}\n".format(
                clock, value, key))
            self._metric_log_fds[host].flush()
        except Exception as e:
            self._metric_log_fds[host].close()
            del self._metric_log_fds[host]
            self.log.error('Write metric error: {0}'.format(e))