Exemplo n.º 1
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self,
                     host="localhost",
                     port=8125,
                     prefix="superset",
                     statsd_client=None):
            """
            Initializes from either params or a supplied, pre-constructed statsd client.

            If statsd_client argument is given, all other arguments are ignored and the
            supplied client will be used to emit metrics.
            """
            if statsd_client:
                self.client = statsd_client
            else:
                self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def timing(self, key, value):
            self.client.timing(key, value)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Exemplo n.º 2
0
def main():
    args = parseArguments()

    # initialize statsdclient
    global statsd_client
    statsd_client = StatsClient(host=args.server,
                                port=args.port,
                                prefix=args.source)

    value = None
    try:
        with open(args.value, 'r') as yamlfile:
            server_state = yaml.load(yamlfile)
            value = server_state['code']
    except yaml.YAMLError as ex:
        if hasattr(ex, 'problem_mark'):
            mark = ex.problem_mark
            print "YAML load error at position (%s:%s)" % (mark.line + 1,
                                                           mark.column + 1)
        sys.exit(1)

    print "%s sends metric [%s] with value [%s] to %s:%d" % (
        args.source, args.metric, value, args.server, args.port)

    statsd_client.gauge(args.metric, int(value))
    return 0
Exemplo n.º 3
0
class StatsdClient():
    def __init__(self):
        self.statsd_client = None

    def init_app(self, app, *args, **kwargs):
        self.active = app.config.get('STATSD_ENABLED')
        self.namespace = "{}.notifications.{}.".format(
            app.config.get('NOTIFY_ENVIRONMENT'),
            app.config.get('NOTIFY_APP_NAME'))

        if self.active:
            self.statsd_client = StatsClient(
                app.config.get('STATSD_HOST'),
                app.config.get('STATSD_PORT'),
                prefix=app.config.get('STATSD_PREFIX'))

    def format_stat_name(self, stat):
        return self.namespace + stat

    def incr(self, stat, count=1, rate=1):
        if self.active:
            self.statsd_client.incr(self.format_stat_name(stat), count, rate)

    def gauge(self, stat, count):
        if self.active:
            self.statsd_client.gauge(self.format_stat_name(stat), count)

    def timing(self, stat, delta, rate=1):
        if self.active:
            self.statsd_client.timing(self.format_stat_name(stat), delta, rate)

    def timing_with_dates(self, stat, start, end, rate=1):
        if self.active:
            delta = (start - end).total_seconds()
            self.statsd_client.timing(self.format_stat_name(stat), delta, rate)
Exemplo n.º 4
0
    class StatsdStatsLogger(BaseStatsLogger):

        def __init__(self, host='localhost', port=8125,
                     prefix='superset', statsd_client=None):
            """
            Initializes from either params or a supplied, pre-constructed statsd client.

            If statsd_client argument is given, all other arguments are ignored and the
            supplied client will be used to emit metrics.
            """
            if statsd_client:
                self.client = statsd_client
            else:
                self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def timing(self, key, value):
            self.client.timing(key, value)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Exemplo n.º 5
0
class StatsdWrapper:
    """Simple wrapper around the statsd client."""
    statsd = None

    def __init__(self, host, port, prefix):
        if host:
            self.statsd = StatsClient(
                host=host,
                port=port,
                prefix=prefix,
            )

    def incr(self, *args):
        if self.statsd:
            self.statsd.incr(*args)

    def decr(self, *args):
        if self.statsd:
            self.statsd.decr(*args)

    def gauge(self, *args):
        if self.statsd:
            self.statsd.gauge(*args)

    def timing(self, *args):
        if self.statsd:
            self.statsd.timing(*args)

    def timer(self, *args):
        if self.statsd:
            self.statsd.timer(*args)

    def set(self, *args):
        if self.statsd:
            self.statsd.set(*args)
Exemplo n.º 6
0
def main():
  args = parseArguments()

  # initialize statsdclient
  global statsd_client
  statsd_client = StatsClient(host=args.server, port=args.port,
                              prefix=args.source)

  value = None
  try:
    with open(args.value, 'r') as yamlfile:
      server_state = yaml.load(yamlfile)
      value = server_state['code']
  except yaml.YAMLError as ex:
    if hasattr(ex, 'problem_mark'):
      mark = ex.problem_mark
      print "YAML load error at position (%s:%s)" % (mark.line + 1,
                                                     mark.column + 1)
    sys.exit(1)

  print "%s sends metric [%s] with value [%s] to %s:%d" % (
        args.source, args.metric, value, args.server,
        args.port)

  statsd_client.gauge(args.metric, int(value))
  return 0
Exemplo n.º 7
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(  # pylint: disable=super-init-not-called
            self,
            host: str = "localhost",
            port: int = 8125,
            prefix: str = "superset",
            statsd_client: Optional[StatsClient] = None,
        ) -> None:
            """
            Initializes from either params or a supplied, pre-constructed statsd client.

            If statsd_client argument is given, all other arguments are ignored and the
            supplied client will be used to emit metrics.
            """
            if statsd_client:
                self.client = statsd_client
            else:
                self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key: str) -> None:
            self.client.incr(key)

        def decr(self, key: str) -> None:
            self.client.decr(key)

        def timing(self, key: str, value: float) -> None:
            self.client.timing(key, value)

        def gauge(self, key: str, value: float) -> None:
            self.client.gauge(key, value)
Exemplo n.º 8
0
class StatsD:
    def __init__(self, host, port=8125, prefix=None):
        from statsd import StatsClient
        self.client = StatsClient(host, port)

    def send(self, value):
        import socket
        key = prefix + "." + socket.gethostname() + "." + "peak"
        self.client.gauge(key, value)
Exemplo n.º 9
0
def _gauge_metric(statsd_metric, value):
    """
    Send messages to statsd, this is similar to:
    echo "airflow.operator_successes_PythonOperator:1|c" | nc -u -w0 127.0.0.1 8125
    """
    statsd = StatsClient(host="127.0.0.1", port=8125, prefix="airflow")
    statsd.gauge(statsd_metric, value)
    # Avoid race conditions in our testing. After sending the data to
    # statsd, we should allow time for statsd exporter to collect
    # and serve new values
    sleep(0.5)
Exemplo n.º 10
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self, host, port, prefix='superset'):
            self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Exemplo n.º 11
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self, host, port, prefix='superset'):
            self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
def rtl_433_probe():
    statsd = StatsClient(host=STATSD_HOST,
                         port=STATSD_PORT,
                         prefix=STATSD_PREFIX)

    while True:
        line, addr = sock.recvfrom(1024)

        try:
            line = parse_syslog(line)
            data = json.loads(line)

            label = sanitize(data["model"])
            if "channel" in data:
                label += ".CH" + str(data["channel"])

            if "battery" in data:
                if data["battery"] == "OK":
                    statsd.gauge(label + '.battery', 1)
                else:
                    statsd.gauge(label + '.battery', 0)

            if "humidity" in data:
                statsd.gauge(label + '.humidity', data["humidity"])

            statsd.gauge(label + '.temperature', data["temperature_C"])

        except KeyError:
            pass

        except ValueError:
            pass
Exemplo n.º 13
0
def rtl_433_probe():
    statsd = StatsClient(host=STATSD_HOST,
                         port=STATSD_PORT,
                         prefix=STATSD_PREFIX)

    while True:
        line, addr = sock.recvfrom(1024)

        try:
            line = parse_syslog(line)
            data = json.loads(line)

            label = sanitize(data["model"])
            if "channel" in data:
                label += ".CH" + str(data["channel"])

            if "battery" in data:
                if data["battery"] == "OK":
                    statsd.gauge(label + '.battery', 1)
                else:
                    statsd.gauge(label + '.battery', 0)

            if "humidity" in data:
                statsd.gauge(label + '.humidity', data["humidity"])

            statsd.gauge(label + '.temperature', data["temperature_C"])

        except KeyError:
            pass

        except ValueError:
            pass
Exemplo n.º 14
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self, host, port, prefix='superset'):
            self.client = StatsClient(
                  host=host,
                  port=port,
                  prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def gauge(self, key):
            self.client.gauge(key)
Exemplo n.º 15
0
class StatsDBackend(BaseBackend):

    name = 'statsd'

    def __init__(self, config):
        self.config = config
        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.statsd = StatsClient(self.config['STATSD_HOST'],
                                  self.config['STATSD_PORT'],
                                  self.config['STATSD_PREFIX'])

    def timing(self, stat_name, delta):
        return self.statsd.timing(stat_name, delta, self.config['STATS_RATE'])

    def incr(self, stat_name, count=1):
        return self.statsd.incr(stat_name, count, self.config['STATS_RATE'])

    def decr(self, stat_name, count=1):
        return self.statsd.decr(stat_name, count, self.config['STATS_RATE'])

    def gauge(self, stat_name, value, delta=False):
        return self.statsd.gauge(stat_name, value, self.config['STATS_RATE'], delta)
Exemplo n.º 16
0
def check_nginx_status(coll_type, file, server, port, local):
    nginx_type = "nginx_" + coll_type.split('.')[0].strip()

    if file_seek.has_key(nginx_type):
        offset_values = int(file_seek[nginx_type][0])
        file_size = int(file_seek[nginx_type][1])
    else:
        offset_values = 0
        file_size = 0

    logfile = open(file, 'r')
    '''seeklines信息是指从上次关闭文件的位置到此次打开文件的位置所包含的数据'''
    seeklines = seekfile(nginx_type, logfile, offset_values, file_size)
    logfile.close()

    nginx_status = {'2XX': 0, '3XX': 0, '4XX': 0, '5XX': 0}

    if seeklines == "":
        nginx_status['2XX'] = 0
        nginx_status['3XX'] = 0
        nginx_status['4XX'] = 0
        nginx_status['5XX'] = 0
    else:
        for line in seeklines:
            status_tmp = line.strip().split('')[6]
            if int(status_tmp[:1]) in [2, 3, 4, 5]:
                status = status_tmp[:1] + "XX"

            if nginx_status.has_key(status):
                nginx_status[status] += 1
            else:
                nginx_status[status] = 1

    #print nginx_status
    local_ip = local

    if local_ip:
        graphite_ip = local_ip.replace(".", "_")

    sc = StatsClient(server, port)
    for nginx_status, status_count in nginx_status.items():
        print nginx_status, status_count
        sc.gauge(
            graphite_ip + ".nginx." + coll_type.split('.')[0].strip() + "." +
            nginx_status, int(status_count))
Exemplo n.º 17
0
def Pressure_Data_Handler(jsonData):
    #Parse Data
    json_Dict = json.loads(jsonData)
    SensorID = json_Dict['Sensor_ID']
    Pressure = json_Dict['Pressure']
    Light = json_Dict['Light']

    #Push into statsd
    pres_statsd = StatsClient(host='172.18.138.55',
                              port=8125,
                              prefix='Pressure')
    pres_statsd.gauge(SensorID, Pressure)
    print("Inserted Pressure Data into Database.")
    print("")
    light_statsd = StatsClient(host='localhost', port=8125, prefix='Light')
    light_statsd.gauge(SensorID, Light)
    print("Inserted Light Data into Database.")
    print("")
Exemplo n.º 18
0
def check_nginx_status(coll_type, file, server, port, local):
    nginx_type = "nginx_" + coll_type.split('.')[0].strip()

    if file_seek.has_key(nginx_type):
        offset_values = int(file_seek[nginx_type][0])
        file_size = int(file_seek[nginx_type][1])
    else:
        offset_values = 0
        file_size = 0

    logfile = open(file, 'r')

    '''seeklines信息是指从上次关闭文件的位置到此次打开文件的位置所包含的数据'''
    seeklines = seekfile(nginx_type, logfile, offset_values, file_size)
    logfile.close()

    nginx_status={'2XX':0,'3XX':0,'4XX':0,'5XX':0}

    if seeklines == "":
        nginx_status['2XX'] = 0
        nginx_status['3XX'] = 0
        nginx_status['4XX'] = 0
        nginx_status['5XX'] = 0
    else:
        for line in seeklines:
            status_tmp=line.strip().split('')[6]
            if int(status_tmp[:1]) in [2,3,4,5]:
                status = status_tmp[:1]+"XX"

            if nginx_status.has_key(status):
                nginx_status[status] += 1
            else:
                nginx_status[status] = 1

    #print nginx_status
    local_ip = local

    if local_ip:
        graphite_ip = local_ip.replace(".", "_")

    sc = StatsClient(server,port)
    for nginx_status, status_count in nginx_status.items():
        print nginx_status, status_count
        sc.gauge(graphite_ip+".nginx."+coll_type.split('.')[0].strip()+"."+nginx_status, int(status_count))
Exemplo n.º 19
0
def check_fpm_slow(coll_type, file, server, port, local):
    fpm_slow_type = "fpm_slow_" + coll_type.split('.')[0].strip()

    if file_seek.has_key(fpm_slow_type):
        offset_values = int(file_seek[fpm_slow_type][0])
        file_size = int(file_seek[fpm_slow_type][1])
    else:
        offset_values = 0
        file_size = 0
    try:
        logfile = open(file, 'r')
        '''seeklines信息是指从上次关闭文件的位置到此次打开文件的位置所包含的数据'''
        seeklines = seekfile(fpm_slow_type, logfile, offset_values, file_size)
        logfile.close()
    except IOError as ioerr:
        print ioerr

    fpm_slow_status = {'slow_num': 0}

    if seeklines == "":
        fpm_slow_status['slow_num'] = 0
    else:
        for line in seeklines:
            fpm_slow_match = re.match(
                r'(^\[+\d+-\w+-\d+\s+\d+:\d+:\d+\])\s(.*)', line)
            if fpm_slow_match != None:
                fpm_slow_status['slow_num'] += 1

    #print nginx_status
    local_ip = local

    if local_ip:
        graphite_ip = local_ip.replace(".", "_")

    sc = StatsClient(server, port)
    for fpm_status, fpm_count in fpm_slow_status.items():
        print fpm_status, fpm_count
        sc.gauge(
            graphite_ip + ".fpm_slow." + coll_type.split('.')[0].strip() +
            "." + fpm_status, int(fpm_count))
Exemplo n.º 20
0
def rtl_433_probe():
    statsd_host = "127.0.0.1"
    statsd_port = 8125
    statsd_prefix = 'rtlsdr'

    statsd = StatsClient(host=statsd_host,
                         port=statsd_port,
                         prefix=statsd_prefix)

    while True:
        line = sys.stdin.readline()
        if not line:
            break
        try:
            data = json.loads(line)

            label = sanitize(data["model"])
            if "channel" in data:
                label += ".CH" + str(data["channel"])

            if "battery_ok" in data:
                statsd.gauge(label + '.battery', data["battery_ok"])

            if "humidity" in data:
                statsd.gauge(label + '.humidity', data["humidity"])

            statsd.gauge(label + '.temperature', data["temperature_C"])

        except KeyError:
            pass

        except ValueError:
            pass
Exemplo n.º 21
0
def check_fpm_slow(coll_type, file, server, port, local):
    fpm_slow_type = "fpm_slow_" + coll_type.split('.')[0].strip()

    if file_seek.has_key(fpm_slow_type):
        offset_values = int(file_seek[fpm_slow_type][0])
        file_size = int(file_seek[fpm_slow_type][1])
    else:
        offset_values = 0
        file_size = 0
    try:
        logfile = open(file, 'r')
        '''seeklines信息是指从上次关闭文件的位置到此次打开文件的位置所包含的数据'''
        seeklines = seekfile(fpm_slow_type, logfile, offset_values, file_size)
        logfile.close()
    except IOError as ioerr:
        print ioerr

    fpm_slow_status = {'slow_num' : 0}

    if seeklines == "":
        fpm_slow_status['slow_num'] = 0
    else:
        for line in seeklines:
            fpm_slow_match = re.match(r'(^\[+\d+-\w+-\d+\s+\d+:\d+:\d+\])\s(.*)',line)
            if fpm_slow_match != None:
                fpm_slow_status['slow_num'] += 1

    #print nginx_status
    local_ip = local

    if local_ip:
        graphite_ip = local_ip.replace(".", "_")

    sc = StatsClient(server,port)
    for fpm_status, fpm_count in fpm_slow_status.items():
        print fpm_status, fpm_count
        sc.gauge(graphite_ip+".fpm_slow."+coll_type.split('.')[0].strip()+"."+fpm_status, int(fpm_count))
Exemplo n.º 22
0
def main():

    statsd_client = StatsClient(statsd_host, statsd_port, prefix="wifi.parse.data")
    statsd_client.gauge("WA_SOURCE_FJ_1001.success", 0)
    statsd_client.gauge("WA_SOURCE_FJ_1001.failed", 0)
    statsd_client.gauge("WA_BASIC_FJ_1003.success", 0)
    statsd_client.gauge("WA_BASIC_FJ_1003.failed", 0)
    statsd_client.gauge("file.failed", 0)
    list = os.listdir(config["monitor_path"])  # 列出文件夹下所有的目录与文件
    for i in list:
        com_path = os.path.join(config["monitor_path"], i)
        Monitor(stastd=statsd_client, zipinfo="True").operate_change(com_path)
    event_handler = Monitor(stastd=statsd_client)
    observer = Observer()
    observer.schedule(event_handler, path=config["monitor_path"], recursive=True)  # recursive递归的
    observer.start()
    observer.join()
Exemplo n.º 23
0
class StatsD(object):
    def __init__(self, app=None, config=None):
        self.config = None
        self.statsd = None
        if app is not None:
            self.init_app(app)
        else:
            self.app = None

    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.app = app

        self.statsd = StatsClient(self.config['STATSD_HOST'],
                                  self.config['STATSD_PORT'],
                                  self.config['STATSD_PREFIX'])

    def timer(self, *args, **kwargs):
        return self.statsd.timer(*args, **kwargs)

    def timing(self, *args, **kwargs):
        return self.statsd.timing(*args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.statsd.incr(*args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.statsd.decr(*args, **kwargs)

    def gauge(self, *args, **kwargs):
        return self.statsd.gauge(*args, **kwargs)

    def set(self, *args, **kwargs):
        return self.statsd.set(*args, **kwargs)
Exemplo n.º 24
0
class StatsD(object):
    def __init__(self, app=None, config=None):
        self.config = None
        self.statsd = None
        if app is not None:
            self.init_app(app)
        else:
            self.app = None

    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.app = app

        self.statsd = StatsClient(self.config['STATSD_HOST'],
            self.config['STATSD_PORT'], self.config['STATSD_PREFIX'])

    def timer(self, *args, **kwargs):
        return self.statsd.timer(*args, **kwargs)

    def timing(self, *args, **kwargs):
        return self.statsd.timing(*args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.statsd.incr(*args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.statsd.decr(*args, **kwargs)

    def gauge(self, *args, **kwargs):
        return self.statsd.gauge(*args, **kwargs)

    def set(self, *args, **kwargs):
        return self.statsd.set(*args, **kwargs)
def rtl_433_probe():
    statsd_host = "localhost"
    statsd_host = "127.0.0.1"
    statsd_port = 8125
    statsd_prefix = 'rtlsdr'

    statsd = StatsClient(host=statsd_host,
                         port=statsd_port,
                         prefix=statsd_prefix)

    while True:
        line = sys.stdin.readline()
        if not line:
            break
        try:
            data = json.loads(line)

            label = sanitize(data["model"])
            if "channel" in data:
                label += ".CH" + str(data["channel"])

            if "battery" in data:
                if data["battery"] == "OK":
                    statsd.gauge(label + '.battery', 1)
                else:
                    statsd.gauge(label + '.battery', 0)

            if "humidity" in data:
                statsd.gauge(label + '.humidity', data["humidity"])

            statsd.gauge(label + '.temperature', data["temperature_C"])

        except KeyError:
            pass

        except ValueError:
            pass
Exemplo n.º 26
0
class StatsD(object):
    def __init__(self, app=None, config=None):
        self.config = None
        self.statsd = None
        if app is not None:
            self.init_app(app)
        else:
            self.app = None

    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault("STATSD_HOST", "localhost")
        self.config.setdefault("STATSD_PORT", 8125)
        self.config.setdefault("STATSD_PREFIX", None)

        self.app = app

        self.statsd = StatsClient(
            host=self.config["STATSD_HOST"], port=self.config["STATSD_PORT"], prefix=self.config["STATSD_PREFIX"]
        )

    def timer(self, *args, **kwargs):
        return self.statsd.timer(*args, **kwargs)

    def timing(self, *args, **kwargs):
        return self.statsd.timing(*args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.statsd.incr(*args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.statsd.decr(*args, **kwargs)

    def gauge(self, *args, **kwargs):
        return self.statsd.gauge(*args, **kwargs)
Exemplo n.º 27
0
def Temp_Data_Handler(jsonData):
    #Parse Data
    host = "172.18.138.55"
    json_Dict = json.loads(jsonData)
    SensorID = json_Dict['Sensor_ID']
    Temperature = json_Dict['Temperature']
    r, g, b = json_Dict['Colour']
    #Push into statsd
    temp_statsd = StatsClient(host='172.18.138.55',
                              port=8125,
                              prefix='Temperature')
    temp_statsd.gauge(SensorID, Temperature)

    print("Inserted Temperature Data into Database.")
    print("")
    light_statsd = StatsClient(host='localhost', port=8125, prefix='Light')
    light_statsd.gauge(SensorID + " " + str("r"), r)
    light_statsd.gauge(SensorID + " " + str("g"), g)
    light_statsd.gauge(SensorID + " " + str("b"), b)
    print("Inserted Light Data into Database.")
    print("")
Exemplo n.º 28
0
class Server():
    def __init__(self, args):
        # Setup logging - Generate a default rotating file log handler and stream handler
        logFileName = 'connector-statsd.log'
        fhFormatter = logging.Formatter(
            '%(asctime)-25s %(levelname)-7s %(message)s')
        sh = logging.StreamHandler()
        sh.setFormatter(fhFormatter)

        self.logger = logging.getLogger("server")
        self.logger.addHandler(sh)
        self.logger.setLevel(logging.DEBUG)

        self.port = int(os.getenv('VCAP_APP_PORT', '9666'))
        self.host = str(os.getenv('VCAP_APP_HOST', 'localhost'))

        if args.bluemix == True:
            self.options = ibmiotf.application.ParseConfigFromBluemixVCAP()
        else:
            if args.token is not None:
                self.options = {'auth-token': args.token, 'auth-key': args.key}
            else:
                self.options = ibmiotf.application.ParseConfigFile(args.config)

        # Bottle
        self._app = Bottle()
        self._route()

        # Init IOTF client
        self.client = ibmiotf.application.Client(self.options,
                                                 logHandlers=[sh])

        # Init statsd client
        if args.statsd:
            self.statsdHost = args.statsd
        else:
            self.statsdHost = "localhost"

        self.statsd = StatsClient(self.statsdHost, prefix=self.client.orgId)

    def _route(self):
        self._app.route('/', method="GET", callback=self._status)

    def myEventCallback(self, evt):
        try:
            flatData = flattenDict(evt.data, join=lambda a, b: a + '.' + b)

            self.logger.debug(
                "%-30s%s" %
                (evt.device, evt.event + ": " + json.dumps(flatData)))

            eventNamespace = evt.deviceType + "." + evt.deviceId + "." + evt.event

            self.statsd.incr("events.meta." + eventNamespace)
            for datapoint in flatData:
                eventDataNamespace = "events.data." + eventNamespace + "." + datapoint[
                    0]
                # Pass through numeric data
                # Convert boolean datapoints to numeric 0|1 representation
                # Throw away everything else (e.g. String data)
                if isinstance(datapoint[1], bool):
                    if datapoint[1] == True:
                        self.statsd.gauge(eventDataNamespace, 1)
                    else:
                        self.statsd.gauge(eventDataNamespace, 0)
                elif isinstance(datapoint[1], Number):
                    self.statsd.gauge(eventDataNamespace, datapoint[1])
        except Exception as e:
            self.logger.critical("%-30s%s" %
                                 (evt.device, evt.event +
                                  ": Exception processing event - " + str(e)))
            #self.logger.critical(json.dumps(evt.data))

    def start(self):
        self.client.connect()
        self.client.deviceEventCallback = self.myEventCallback
        self.client.subscribeToDeviceEvents()
        self.logger.info("Serving at %s:%s" % (self.host, self.port))
        self._app.run(host=self.host, port=self.port)

    def stop(self):
        self.client.disconnect()

    def _status(self):
        return template('status', env_options=os.environ)
Exemplo n.º 29
0
  GPIO.setup(ECHO,GPIO.IN)
  GPIO.output(TRIG, False)
  time.sleep(2)
  GPIO.output(TRIG, True)
  time.sleep(0.00001)
  GPIO.output(TRIG, False)
  while GPIO.input(ECHO)==0:
    pulse_start = time.time()
  while GPIO.input(ECHO)==1:
    pulse_end = time.time()
  pulse_duration = pulse_end - pulse_start
  distance3 = round(pulse_duration * 16960,2)

  debug("Distance 3 %s" % distance3)

  distance = round(avg(distance1,distance2,distance3),2)
  debug("Average %s" % distance)

  j = {}
  j["distance"] = distance
  j["updated"] = int(time.time())
  
  sc = StatsClient()
  sc.gauge('watersoftener.distance',distance)
  
  with open('/var/www/distance.json', 'w') as outfile:
    json.dump(j,outfile) 
  
finally:  
  GPIO.cleanup()
def report_to_graphite(host, port, prefix, results):
    statsd = StatsClient(host=host, port=port, prefix=prefix, maxudpsize=512)
    for entry in results:
        statsd.gauge(entry['graphite key'], entry['count'])
Exemplo n.º 31
0
class StatsdMonitor(object):
    def __init__(self, broker, interval=1):
        # self.interval = interval
        self.state = app.events.State()
        self.statsd_conn = StatsClient(host='localhost', port=8125)
        self.broker_conn = BrokerConnection(broker)
        self.timers_list = []

    # monitor the task and status of worker with functions
    def run_loop(self):
        while True:
            try:
                with self.broker_conn as conn:
                    recv = EventReceiver(conn,
                                         handlers={
                                             'task-sent':
                                             self.on_task_sent,
                                             'task-failed':
                                             self.on_task_failed,
                                             'task-retried':
                                             self.on_task_retried,
                                             'task-started':
                                             self.on_task_started,
                                             'task-succeeded':
                                             self.on_task_succeeded,
                                             'task-received':
                                             self.on_task_received,
                                             'task-rejected':
                                             self.on_task_rejected,
                                             'task-revoked':
                                             self.on_task_revoked,
                                             'worker-online':
                                             self.on_worker_online,
                                             'worker-heartbeat':
                                             self.on_worker_heartbeat,
                                             'worker-offline':
                                             self.on_worker_offline,
                                         })
                    recv.capture(limit=None, timeout=None, wakeup=True)
            except (KeyboardInterrupt, SystemExit):
                raise
            except Exception:
                raise
            # time.sleep(self.interval)

    # all about the tasks

    def on_task_sent(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        self.statsd_conn.incr('tasks.sent')

    def on_task_received(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        self.statsd_conn.incr('tasks.received')

    def on_task_started(self, event):
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        logger.info('Task {}[{}] started'.format(task.name, task.uuid))
        self.statsd_conn.incr('tasks.started')
        mark = 'task.{}.recorder'.format(task.uuid)
        self.timer_start(mark)

    def on_task_succeeded(self, event):
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        logger.info('Task {}[{}] succeeded'.format(task.name, task.uuid))
        self.statsd_conn.incr('tasks.succeeded')
        mark = 'task.{}.recorder'.format(task.uuid)
        self.timer_stop(mark)

    def on_task_failed(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        logger.warning('Task {}[{}] failed'.format(task.name, task.uuid))
        self.statsd_conn.incr('tasks.failed')

    def on_task_retried(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        logger.warning('Task {}[{}] retried'.format(task.name, task.uuid))
        self.statsd_conn.incr('tasks.retried')

    def on_task_rejected(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])

    def on_task_revoked(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])

    # all about the status of the workers

    def on_worker_online(self, event):  # TODO
        self.state.event(event)
        worker = self.state.workers.get(event['hostname'])
        mark = 'worker.{}.recorder'.format(worker.hostname)
        self.timer_start(mark)

    def on_worker_heartbeat(self, event):
        self.state.event(event)
        worker = self.state.workers.get(event['hostname'])
        key_pro = 'worker.{}.processed'.format(worker.hostname)
        key_act = 'worker.{}.active'.format(worker.hostname)
        if worker.processed is None: worker.processed = 0
        if worker.active is None: worker.active = 0
        self.statsd_conn.gauge(key_pro, worker.processed)
        self.statsd_conn.gauge(key_act, worker.active)

    def on_worker_offline(self, event):  # TODO
        self.state.event(event)
        worker = self.state.workers.get(event['hostname'])
        mark = 'worker.{}.recorder'.format(worker.hostname)
        self.timer_stop(mark)

    # statsd timer record start
    def timer_start(self, mark):
        timer = self.statsd_conn.timer(mark)
        timer.start()
        self.timers_list.append(timer)

    # statsd timer record stop
    def timer_stop(self, mark):
        for timer in self.timers_list:
            if timer.stat == mark:
                timer.stop()
                self.timers_list.remove(timer)
Exemplo n.º 32
0
import fmi_weather_client as fmi
from fmi_weather_client.errors import ClientError, ServerError
import statsd
from statsd import StatsClient

try:
    weather = fmi.weather_by_place_name("Karhi, Hausjärvi")
    if weather is not None:
        #print(f"Temperature in {weather.place} is {weather.data.temperature}")
        #print(f"Pressure in {weather.place} is {weather.data.pressure}")
        pres_statd = StatsClient(host='localhost',
                                 port=8125,
                                 prefix='Pressure')
        pres_statd.gauge('FMI.karhi', weather.data.pressure[0])
        temp_statd = StatsClient(host='localhost',
                                 port=8125,
                                 prefix='Temperature')
        temp_statd.gauge('FMI.karhi', weather.data.temperature[0])
except ClientError as err:
    print(f"Client error with status {err.status_code}: {err.message}")
except ServerError as err:
    print(f"Server error with status {err.status_code}: {err.body}")
Exemplo n.º 33
0
#!/usr/bin/env python
from sense_hat import SenseHat
import time
from statsd import StatsClient
sense = SenseHat()

client = StatsClient(host="docker-master.beia-consult.ro", port=8125)
while True:
    t = sense.get_temperature()
    p = sense.get_pressure()
    h = sense.get_humidity()

    t = round(t, 1)
    p = round(p, 1)
    h = round(h, 1)

    msg = "Temperature = {0}, Pressure = {1}, Humidity = {2}".format(t,p,h)
    
    client.gauge("beia.raspberry.temperature", t)
    client.gauge("beia.raspberry.pressure", p)
    client.gauge("beia.raspberry.humidity", h)
    print (msg)
    time.sleep(15)
Exemplo n.º 34
0
class PerIP(object):
    def __init__(self):
        self.statsd = StatsClient(
            host=STATSD_HOST,
            port=STATSD_PORT,
            prefix=STATSD_PREFIX,
            maxudpsize=512)
        self.last_vals = self.get_stats()
        sleep(5)

    def run_command(self, command):
        try:
            p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
            return iter(p.stdout.readline, b'')
        except Exception as e:
            raise ValueError(command, e)

    def get_iptables_data(self, table):
        results = list()
        command = ['/sbin/iptables', '-L', table, '-nvx']
        #command = "/sbin/iptables -L {} -nvx".format(table)
        try:
            command_results = self.run_command(command)
        except ValueError as e:
            raise
        else:
            for line in command_results:
                results.append(line)
            res = dict()
            for line in results[2:]:
                result = line.split()
                if result[7] != '0.0.0.0/0':
                    try:
                        res[result[7]]['out_packets'] = result[0]
                        res[result[7]]['out_bytes'] = result[1]
                    except KeyError:
                        res[result[7]] = dict()
                        res[result[7]]['out_packets'] = result[0]
                        res[result[7]]['out_bytes'] = result[1]
                elif result[8] != '0.0.0.0/0':
                    try:
                        res[result[8]]['in_packets'] = result[0]
                        res[result[8]]['in_bytes'] = result[1]
                    except KeyError:
                        res[result[8]] = dict()
                        res[result[8]]['in_packets'] = result[0]
                        res[result[8]]['in_bytes'] = result[1]
            return res

    def get_stats(self):
        stats = dict()
        for table in TABLES:
            stats.update(self.get_iptables_data(table))
        return stats

    def calc_stats(self):
        self.change = dict()
        self.current = self.get_stats()
        for key in self.current.keys():
            self.change[key] = dict()
            for stat in self.current[key].keys():
                # print "{}.{} == {} - {}".format(
                #    key,
                #    stat,
                #    int(self.current[key][stat]),
                #    int(self.last_vals[key].get(stat, 0)))
                try:
                    self.change[key][stat] = int(self.current[key][stat]) - int(self.last_vals[key].get(stat, 0))
                except ValueError:
                    self.change[key][stat] = int(self.current[key][stat])
        self.last_vals = self.current
        return self.change

    def post_stats(self, stats):
        names = self.get_current_leases()
        for stat in stats.keys():
            for name in stats[stat]:
                if stat in names.keys():
                    host = names[stat]['hostname'].replace(".", "_")
                elif stat == '10.0.42.1':
                    host = 'gateway'
                else:
                    host = stat.replace(".", "_")
                self.statsd.gauge("byHost.{}.{}".format(host, name), stats[stat][name])
                print "    Posting byHost.{}.{} = {}".format(host, name, stats[stat][name])

    def get_current_leases(self):
        fh = open(DHCP_LEASES_FILE, 'r')
        leases = dict()
        entries = fh.readlines()
        fh.close()
        for entry in entries:
            # Sample Entry
            # 0          1                 2           3        4
            # 1433393540 04:f7:e4:8c:c3:11 10.0.42.174 HOSTNAME 01:04:f7:e4:8c:c3:11
            parts = entry.split()
            leases[parts[2]] = dict()
            if parts[3] == "*":
                leases[parts[2]]['hostname'] = parts[1]
            else:
                leases[parts[2]]['hostname'] = parts[3]
            leases[parts[2]]['mac'] = parts[1]
        fh = open(HOSTS, 'r')
        hosts = fh.readlines()
        fh.close()
        for line in hosts:
            if len(re.sub('\s*', '', line)) and not line.startswith('#'):
                parts = line.split()
                # print parts
                leases[parts[0]] = dict()
                leases[parts[0]]['hostname'] = parts[1]
        return leases
class Server():

	def __init__(self, args):
		# Setup logging - Generate a default rotating file log handler and stream handler
		logFileName = 'connector-statsd.log'
		fhFormatter = logging.Formatter('%(asctime)-25s %(levelname)-7s %(message)s')
		sh = logging.StreamHandler()
		sh.setFormatter(fhFormatter)
		
		self.logger = logging.getLogger("server")
		self.logger.addHandler(sh)
		self.logger.setLevel(logging.DEBUG)
		
		
		self.port = int(os.getenv('VCAP_APP_PORT', '9666'))
		self.host = str(os.getenv('VCAP_APP_HOST', 'localhost'))

		if args.bluemix == True:
			self.options = ibmiotf.application.ParseConfigFromBluemixVCAP()
		else:
			if args.token is not None:
				self.options = {'auth-token': args.token, 'auth-key': args.key}
			else:
				self.options = ibmiotf.application.ParseConfigFile(args.config)
		
		# Bottle
		self._app = Bottle()
		self._route()
		
		# Init IOTF client
		self.client = ibmiotf.application.Client(self.options, logHandlers=[sh])
	
		# Init statsd client
		if args.statsd:
			self.statsdHost = args.statsd
		else: 
			self.statsdHost = "localhost"
		
		self.statsd = StatsClient(self.statsdHost, prefix=self.client.orgId)
		
	
	def _route(self):
		self._app.route('/', method="GET", callback=self._status)
	
	
	def myEventCallback(self, evt):
		try:
			flatData = flattenDict(evt.data, join=lambda a,b:a+'.'+b)
			
			self.logger.debug("%-30s%s" % (evt.device, evt.event + ": " + json.dumps(flatData)))
			
			eventNamespace = evt.deviceType +  "." + evt.deviceId + "." + evt.event
			
			self.statsd.incr("events.meta." + eventNamespace)
			for datapoint in flatData:
				eventDataNamespace = "events.data." + eventNamespace + "." + datapoint[0]
				# Pass through numeric data
				# Convert boolean datapoints to numeric 0|1 representation
				# Throw away everything else (e.g. String data)
				if isinstance(datapoint[1], bool):
					if datapoint[1] == True:
						self.statsd.gauge(eventDataNamespace, 1)
					else:
						self.statsd.gauge(eventDataNamespace, 0)
				elif isinstance(datapoint[1], Number):
					self.statsd.gauge(eventDataNamespace, datapoint[1])
		except Exception as e:
			self.logger.critical("%-30s%s" % (evt.device, evt.event + ": Exception processing event - " + str(e)))
			#self.logger.critical(json.dumps(evt.data))

	def start(self):
		self.client.connect()
		self.client.deviceEventCallback = self.myEventCallback
		self.client.subscribeToDeviceEvents()
		self.logger.info("Serving at %s:%s" % (self.host, self.port))
		self._app.run(host=self.host, port=self.port)
	
	def stop(self):
		self.client.disconnect()
		
	def _status(self):
		return template('status', env_options=os.environ)
Exemplo n.º 36
0
    print "Temp storage file doesn't exist yet. Will tweet the current value and save it."

print "Previous number of cups was " + str(cups_old)

#Get current number of cups.
service = Config.get("PyCoffeeD", "url")
r = requests.get(service + "/servings")
assert r.status_code < 400, "Request to PyCoffeeD server failed."
cups_current = float(r.text)
print "Current number of cups is " + str(cups_current)

#Send the current number of cups to StatsD.
url = Config.get("StatsD", "url")
port = Config.get("StatsD", "port")
client = StatsClient(host=url, port=int(port), prefix="coffee.")
client.gauge("cups", value=cups_current, rate=1)

#Tweet the current number of cups, if it's changed significantly since the last tweet.
if (abs(cups_current - cups_old) > 1):
    print ("More than one cup difference between " + str(cups_current) 
        + " and " + str(cups_old) + ". Tweeting an update.")

    consumer_key = Config.get("Twitter", "consumer_key")
    consumer_secret = Config.get("Twitter", "consumer_secret")
    app_name = Config.get("Twitter", "app_name")

    twitter_creds = os.path.expanduser('./twitter_credentials')
    if not os.path.exists(twitter_creds):
        oauth_dance(app_name, consumer_key, consumer_secret,
                    twitter_creds)
Exemplo n.º 37
0
from statsd import StatsClient
from time import time, sleep


def load_temp():
    with open('/sys/class/thermal/thermal_zone0/temp') as f:
        return float(f.read()) / 1000.0


# Average a large number of samples to get a more accurate measurement
def sample_temp(average=10, delay=1.0):
    tick = time() + delay
    while True:
        temps = []
        while len(temps) < average:
            temps.append(load_temp())
            sleep(tick - time())
            tick = tick + delay
        yield statistics.mean(temps)


parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()

statsd = StatsClient()
for temp in sample_temp():
    statsd.gauge('pi.temp', temp)
    if args.verbose:
        print(temp)
Exemplo n.º 38
0
class Export(GlancesExport):

    """This class manages the Statsd export module."""

    def __init__(self, config=None, args=None):
        """Init the Statsd export IF."""
        super(Export, self).__init__(config=config, args=args)

        # Load the InfluxDB configuration file
        self.host = None
        self.port = None
        self.prefix = None
        self.export_enable = self.load_conf()
        if not self.export_enable:
            sys.exit(2)

        # Default prefix for stats is 'glances'
        if self.prefix is None:
            self.prefix = 'glances'

        # Init the Statsd client
        self.client = StatsClient(self.host,
                                  int(self.port),
                                  prefix=self.prefix)

    def load_conf(self, section="statsd"):
        """Load the Statsd configuration in the Glances configuration file."""
        if self.config is None:
            return False
        try:
            self.host = self.config.get_value(section, 'host')
            self.port = self.config.get_value(section, 'port')
        except NoSectionError:
            logger.critical("No Statsd configuration found")
            return False
        except NoOptionError as e:
            logger.critical("Error in the Statsd configuration (%s)" % e)
            return False
        else:
            logger.debug("Load Statsd from the Glances configuration file")
        # Prefix is optional
        try:
            self.prefix = self.config.get_value(section, 'prefix')
        except NoOptionError:
            pass
        return True

    def init(self, prefix='glances'):
        """Init the connection to the Statsd server."""
        if not self.export_enable:
            return None
        return StatsClient(self.host,
                           self.port,
                           prefix=prefix)

    def export(self, name, columns, points):
        """Export the stats to the Statsd server."""
        for i in range(len(columns)):
            if not isinstance(points[i], Number):
                continue
            stat_name = '{0}.{1}'.format(name, columns[i])
            stat_value = points[i]
            try:
                self.client.gauge(stat_name, stat_value)
            except Exception as e:
                logger.error("Can not export stats to Statsd (%s)" % e)
        logger.debug("Export {0} stats to Statsd".format(name))
Exemplo n.º 39
0
class Export(GlancesExport):

    """This class manages the Statsd export module."""

    def __init__(self, config=None, args=None):
        """Init the Statsd export IF."""
        GlancesExport.__init__(self, config=config, args=args)

        # Load the InfluxDB configuration file
        self.host = None
        self.port = None
        self.prefix = None
        self.export_enable = self.load_conf()
        if not self.export_enable:
            sys.exit(2)

        # Default prefix for stats is 'glances'
        if self.prefix is None:
            self.prefix = 'glances'

        # Init the Statsd client
        self.client = StatsClient(self.host,
                                  int(self.port),
                                  prefix=self.prefix)

    def load_conf(self, section="statsd"):
        """Load the Statsd configuration in the Glances configuration file."""
        if self.config is None:
            return False
        try:
            self.host = self.config.get_value(section, 'host')
            self.port = self.config.get_value(section, 'port')
        except NoSectionError:
            logger.critical("No Statsd configuration found")
            return False
        except NoOptionError as e:
            logger.critical("Error in the Statsd configuration (%s)" % e)
            return False
        else:
            logger.debug("Load Statsd from the Glances configuration file")
        # Prefix is optional
        try:
            self.prefix = self.config.get_value(section, 'prefix')
        except NoOptionError:
            pass
        return True

    def init(self, prefix='glances'):
        """Init the connection to the Statsd server."""
        if not self.export_enable:
            return None
        return StatsClient(self.host,
                           self.port,
                           prefix=prefix)

    def export(self, name, columns, points):
        """Export the stats to the Statsd server."""
        for i in range(0, len(columns)):
            if not isinstance(points[i], Number):
                continue
            stat_name = '{0}.{1}'.format(name, columns[i])
            stat_value = points[i]
            try:
                self.client.gauge(stat_name, stat_value)
            except Exception as e:
                logger.error("Can not export stats to Statsd (%s)" % e)
        logger.debug("Export {0} stats to Statsd".format(name))
Exemplo n.º 40
0
#! /usr/bin/env python

from statsd import StatsClient

statsd = StatsClient('statsd-exporter-rucio-statsd-exporter', 8125)

count = 0
with open('count.txt', 'r') as oracle_output:
    for line in oracle_output:
        if line.strip():
            count = int(line)

statsd.gauge('rucio.db.connections', count)

Exemplo n.º 41
0
class PostDB():

	def __init__(self,db_queue,database_config):
		mysql_endpoint = database_config['mysql_endpoint']
		mysql_user = database_config['mysql_user']
		mysql_port = database_config['mysql_port']
		mysql_dbname = database_config['mysql_dbname']
		mysql_pass = database_config['mysql_pass']
		statsd_ip = database_config['statsd_ip']
		statsd_port = database_config['statsd_port']
		self.opentsdb_url = database_config['opentsdb_url']
		query_tabname = database_config['mysql_table']['query_tab']
		table_tabname = database_config['mysql_table']['table_tab']
		query_scan_tab = database_config['mysql_table']['query_scan_tab']


		self.mysql_engine_string = mysql_endpoint+mysql_user+':'+mysql_pass+'@'+mysql_port+'/'+mysql_dbname
		self.db_queue = db_queue
		self.engine = create_engine(self.mysql_engine_string)
		self.Session = sessionmaker(bind=self.engine)
		self.session = self.Session()
		self.statsd = StatsClient(statsd_ip,statsd_port)
		self.RSQueryMonitor = self.get_query_object(query_tabname,self.engine)
		self.RSTableMonitor = self.get_table_object(table_tabname,self.engine)
		self.RSQueryScan = self.get_scan_object(query_scan_tab,self.engine)
	
	def post_db(self,metric_list):
		logging.info('%s : Waiting for json response on %d', log_identifier, os.getpid())
		#print '%s : Waiting for json response on %d' %(log_identifier, os.getpid())
		for i in range(0,len(metric_list)):
			payload = self.db_queue.get()
			logging.info('%s : JSON response received is %s', log_identifier, payload)
			#print '%s : JSON response received is %s' %(log_identifier, payload)
			for key in payload:
				if key == 'opentsdb':
					r = self.post_opentsdb(payload[key])
				elif key == 'mysql':
					self.post_mysql(payload[key])
				elif key == 'statsd':
					self.post_statsd(payload[key])

	def post_statsd(self,payload):
		for metric in range(0,len(payload)):
			for key in payload[metric]:
				self.statsd.gauge(key, payload[metric][key])

	def post_opentsdb(self,payload):
		r = requests.post(self.opentsdb_url,data=json.dumps(payload))
		logging.info('%s : HTTP response received is %s', log_identifier, r)
		print '%s : HTTP response received is %s' %(log_identifier, r)
		return r

	def post_mysql(self,payload):
		for key in payload:
			if key == 'rs_query_monitor':
				table_rows_list = payload[key]
				for row in table_rows_list:
					table_rows_dict = table_rows_list[row]
					if self.session.query(self.RSQueryMonitor).filter(self.RSQueryMonitor.query_id == table_rows_dict['query_id']).count() == 0:
						self.session.add(self.RSQueryMonitor(query_id=table_rows_dict['query_id'],username=table_rows_dict['username'].strip(),
							workmem=table_rows_dict['workmem'], num_diskhits=table_rows_dict['num_diskhits'], exec_time=table_rows_dict['exec_time'],
							queue_time=table_rows_dict['queue_time'], slot_count=table_rows_dict['slot_count'],
							starttime=table_rows_dict['starttime'],state=table_rows_dict['state'].strip(),queue=table_rows_dict['queue'],
							inner_bcast_count=table_rows_dict['inner_bcast_count'],bcast_rows=table_rows_dict['bcast_rows'],
							last_modified_on=datetime.datetime.utcnow()+ist_delta))
					else:
						row = self.session.query(self.RSQueryMonitor).filter(self.RSQueryMonitor.query_id == table_rows_dict['query_id']).first()
						row.queue = table_rows_dict['username'].strip()
						row.workmem = table_rows_dict['workmem']
						row.num_diskhits = table_rows_dict['num_diskhits'] + row.num_diskhits
						row.exec_time = table_rows_dict['exec_time']
						row.queue_time = table_rows_dict['queue_time']
						row.slot_count = table_rows_dict['slot_count']
						row.starttime = table_rows_dict['starttime']
						row.state = table_rows_dict['state'].strip()
						row.queue = table_rows_dict['queue']
						row.inner_bcast_count = table_rows_dict['inner_bcast_count']
						row.bcast_rows = table_rows_dict['bcast_rows']
						row.last_modified_on = datetime.datetime.utcnow()+ist_delta
				self.session.commit()
				max_lmd = self.session.query(func.max(self.RSQueryMonitor.last_modified_on)).all()[0][0]
				done_rows = self.session.query(self.RSQueryMonitor).filter(~self.RSQueryMonitor.state.in_('Done ')).filter(self.RSQueryMonitor.last_modified_on < max_lmd).all()
				for row in range(0,len(done_rows)):
					done_rows[row].state = 'Done'
				self.session.commit()
			if key == 'rs_table_monitor':
				table_rows_list = payload[key]
				for row in table_rows_list:
					table_rows_dict = table_rows_list[row]
					if self.session.query(self.RSTableMonitor).filter(and_(self.RSTableMonitor.schemaname == table_rows_dict['schemaname'].strip(), self.RSTableMonitor.tablename == table_rows_dict['tablename'].strip())).count() == 0:
						self.session.add(self.RSTableMonitor(schemaname=table_rows_dict['schemaname'].strip(),tablename=table_rows_dict['tablename'].strip(),pct_mem_used=table_rows_dict['pct_mem_used'],
							unsorted_rows=table_rows_dict['unsorted_rows'], statistics=table_rows_dict['statistics'], is_encoded=table_rows_dict['is_encoded'],diststyle=table_rows_dict['diststyle'],
							sortkey1=table_rows_dict['sortkey1'],skew_sortkey1=table_rows_dict['skew_sortkey1'],skew_rows=table_rows_dict['skew_rows'],m1_num_scan=table_rows_dict['m1_num_scan'],
							m1_row_scan=table_rows_dict['m1_row_scan'],m1_avg_time=table_rows_dict['m1_avg_time'],w1_num_scan=table_rows_dict['w1_num_scan'],w1_row_scan=table_rows_dict['w1_row_scan'],
							w1_avg_time=table_rows_dict['w1_avg_time'],d1_num_scan=table_rows_dict['d1_num_scan'],d1_row_scan=table_rows_dict['d1_row_scan'],d1_avg_time=table_rows_dict['d1_avg_time'],
							h6_num_scan=table_rows_dict['h6_num_scan'],h6_row_scan=table_rows_dict['h6_row_scan'], h6_avg_time=table_rows_dict['h6_avg_time'],h3_num_scan=table_rows_dict['h3_num_scan'],
							h3_row_scan=table_rows_dict['h3_row_scan'],h3_avg_time=table_rows_dict['h3_avg_time'],last_modified_on=datetime.datetime.utcnow()+ist_delta))
					else:
						row = self.session.query(self.RSTableMonitor).filter(and_(self.RSTableMonitor.schemaname == table_rows_dict['schemaname'].strip(), self.RSTableMonitor.tablename == table_rows_dict['tablename'].strip()))
						row.pct_mem_used = table_rows_dict['pct_mem_used']
						row.unsorted_rows = table_rows_dict['unsorted_rows']
						row.statistics = table_rows_dict['statistics']
						row.is_encoded = table_rows_dict['is_encoded']
						row.diststyle = table_rows_dict['diststyle']
						row.sortkey1 = table_rows_dict['sortkey1']
						row.skew_sortkey1 = table_rows_dict['skew_sortkey1']
						row.skew_rows = table_rows_dict['skew_rows']
						row.m1_num_scan = table_rows_dict['m1_num_scan']
						row.m1_avg_time = table_rows_dict['m1_avg_time']
						row.m1_row_scan = table_rows_dict['m1_row_scan']
						row.w1_num_scan = table_rows_dict['w1_num_scan']
						row.w1_row_scan = table_rows_dict['w1_row_scan']
						row.w1_avg_time = table_rows_dict['w1_avg_time']
						row.d1_num_scan = table_rows_dict['d1_num_scan']
						row.d1_row_scan = table_rows_dict['d1_row_scan']
						row.d1_avg_time = table_rows_dict['d1_avg_time']
						row.h6_num_scan = table_rows_dict['h6_num_scan']
						row.h6_row_scan = table_rows_dict['h6_row_scan']
						row.h6_avg_time = table_rows_dict['h6_avg_time']
						row.h3_num_scan = table_rows_dict['h3_num_scan']
						row.h3_row_scan = table_rows_dict['h3_row_scan']
						row.h3_avg_time = table_rows_dict['h3_avg_time']
						row.last_modified_on = datetime.datetime.utcnow()+ist_delta
				self.session.commit()
			if key == 'rs_scan_monitor':
				table_rows_list = payload[key]
				for row in table_rows_list:
					table_rows_dict = table_rows_list[row]
					if self.session.query(self.RSQueryScan).filter(and_(RSQueryScan.query_id == table_rows_dict['query_id'],self.RSQueryScan.tablename == table_rows_dict['tablename'].strip(), self.RSQueryScan.queue == table_rows_dict['queue'])):
						self.session.add(self.RSQueryScan(query_id=table_rows_dict['query_id'],queue=table_rows_dict['queue'],tablename=table_rows_dict['tablename'].strip(),
							query_start_time=table_rows_dict['query_start_time'], range_scan_pct=table_rows_dict['range_scan_pct'], rows_scan=table_rows_dict['rows_scan'],
							avg_time=table_rows_dict['avg_time'],last_modified_on=datetime.datetime.utcnow()+ist_delta))
					else:
						row = self.session.query(self.RSQueryScan).filter(and_(RSQueryScan.query_id == table_rows_dict['query_id'],self.RSQueryScan.tablename == table_rows_dict['tablename'].strip(), self.RSQueryScan.queue == table_rows_dict['queue']))
						row.range_scan_pct = table_rows_dict['range_scan_pct']
						row.rows_scan = table_rows_dict['rows_scan']
						row.avg_time = table_rows_dict['avg_time']
						row.last_modified_on = datetime.datetime.utcnow()+ist_delta
				self.session.commit()

	def get_query_object(self,tablename,engine):
		metadata = MetaData(bind=engine)
		rs_query_monitor = Table(tablename, metadata,
    	    Column('query_id', Integer(), primary_key=True),
    	    Column('username', String(255)),
    	    Column('workmem', String(255)),
    	    Column('num_diskhits', Integer()),
    	    Column('inner_bcast_count', Integer()),
    	    Column('bcast_rows', Integer()),
    	    Column('exec_time', Integer()),
    	    Column('slot_count', Integer()),
    	    Column('queue_time', Integer()),
    	    Column('starttime', DateTime(), default=datetime.datetime.utcnow()+ist_delta),
    	    Column('state', String(255)),
    	    Column('queue', Integer()),
    	    Column('last_modified_on', DateTime(), default=datetime.datetime.utcnow()+ist_delta))
		rs_query_monitor.create(checkfirst=True)
		clear_mappers()
		mapper(RSQueryMonitor, rs_query_monitor)
		return RSQueryMonitor

	def get_table_object(self,tablename,engine):
		metadata = MetaData(bind=engine)
		rs_tab_monitor = Table(tablename, metadata,
			Column('id', Integer(), primary_key=True),
			Column('schemaname', String(255), nullable=False),
			Column('tablename', String(255), nullable=False),
			Column('pct_mem_used', Float()),
			Column('unsorted_rows', Float()),
			Column('statistics', Float()),
			Column('is_encoded', String(255)),
			Column('diststyle', String(255)),
			Column('sortkey1', String(255)),
			Column('skew_sortkey1', String(255)),
			Column('skew_rows', Float()),
			Column('m1_num_scan', Float()),
			Column('m1_row_scan', Float()),
			Column('m1_avg_time', Float()),
			Column('w1_num_scan', Float()),
			Column('w1_row_scan', Float()),
			Column('w1_avg_time', Float()),
			Column('d1_num_scan', Float()),
			Column('d1_row_scan', Float()),
			Column('d1_avg_time', Float()),
			Column('h6_num_scan', Float()),
			Column('h6_row_scan', Float()),
			Column('h6_avg_time', Float()),
			Column('h3_num_scan', Float()),
			Column('h3_row_scan', Float()),
			Column('h3_avg_time', Float()),
			Column('last_modified_on', DateTime(), default=datetime.datetime.utcnow()+ist_delta))
		rs_tab_monitor.create(checkfirst=True)
		#clear_mappers()
		mapper(RSTableMonitor, rs_tab_monitor)
		return RSTableMonitor

	def get_scan_object(self,tablename,engine):
		metadata = MetaData(bind=engine)
		rs_scan_monitor = Table(tablename,metadata,
			Column('id', Integer(), primary_key=True),
			Column('query_id', Integer(),nullable=False),
			Column('queue', Integer(),nullable=False),
			Column('tablename', String(255),nullable=False),
			Column('query_start_time',DateTime(),default=datetime.datetime.utcnow()+ist_delta),
			Column('range_scan_pct', Integer()),
			Column('rows_scan', Integer()),
			Column('avg_time', Integer()),
			Column('last_modified_on',DateTime(),default=datetime.datetime.utcnow()+ist_delta))
		rs_scan_monitor.create(checkfirst=True)
		mapper(RSQueryScan,rs_scan_monitor)
		return RSQueryScan
Exemplo n.º 42
0
        "system.cpu.soft_interrupts": cpu_stats_change["soft_interrupts"],
        "system.cpu.syscalls": cpu_stats_change["syscalls"],
        "system.load": psutil.getloadavg()[0],
        "system.disk.size.used": disk.used,
        "system.disk.size.free": disk.free,
        "system.disk.size.percent": disk.percent,
        "system.disk.read.bytes": disk_io_change["read_bytes"],
        "system.disk.read.count": disk_io_change["read_count"],
        "system.disk.read.merged_count": disk_io_change["read_merged_count"],
        "system.disk.read.time": disk_io_change["read_time"],
        "system.disk.write.bytes": disk_io_change["write_bytes"],
        "system.disk.write.count": disk_io_change["write_count"],
        "system.disk.write.merged_count": disk_io_change["write_merged_count"],
        "system.disk.write.time": disk_io_change["write_time"],
        "system.disk.busy_time": disk_io_change["busy_time"],
        "system.net.in.packets": net_io_change["packets_recv"],
        "system.net.in.bytes": net_io_change["bytes_recv"],
        "system.net.in.errors": net_io_change["errin"],
        "system.net.in.dropped": net_io_change["dropin"],
        "system.net.out.packets": net_io_change["packets_sent"],
        "system.net.out.bytes": net_io_change["bytes_sent"],
        "system.net.out.errors": net_io_change["errout"],
        "system.net.out.dropped": net_io_change["dropout"],
        "system.uptime": uptime,
    }

    for name, value in gauges.items():
        statsd.gauge(name, value)

    time.sleep(sleep_seconds)
class ReplicationLagChecker(object):

    def __init__(self, args):
        """ initialize the args and setup a stats client """
        self._source_host = args.source_host
        self._target_host = args.target_host
        self._replica_set = args.replica_set
        self._user = args.user
        self._password = args.password
        self._poll_interval = args.interval
        self._lag_key = args.region + '_' + args.replica_set + '_lag'
        # We assume a local collectd installation
        self._stat_client = StatsClient()

    def setup_source_db(self):
        """ setup the source mongo connection which is a replica set """
        conn = MongoReplicaSetClient(host=self._source_host,
                                     replicaSet=self._replica_set,
                                     read_preference=ReadPreference.PRIMARY)
        conn['admin'].authenticate(self._user, self._password)
        return conn

    def setup_target_db(self):
        """ setup the target mongo connection which is a standalone client """
        conn = MongoClient(host=self._target_host)
        conn['admin'].authenticate(self._user, self._password)
        return conn

    def run(self):

        """ Check the latest oplog from source oplog collection
            and the latest oplog from target mongo connector collection
            and compute the lag """
        try:
            source_conn = self.setup_source_db()
            target_conn = self.setup_target_db()
            target_collection = 'oplog' + self._replica_set

            while True:
                try:
                    # Induce an operation on the replication test database
                    db_name = 'ReplTest_' + self._replica_set.upper()
                    source_conn[db_name]['operation'].replace_one({'replica': self._replica_set}, {
                                                                  'replica': self._replica_set, 'ts': int(time.time())}, upsert=True)

                    # Wait a bit for it to replicate
                    time.sleep(10)

                    # check latest oplog of source
                    entry = source_conn['local'][
                        'oplog.rs'].find().sort('$natural', -1).limit(1)
                    source_oplog = entry[0]['ts'].time

                    # get latest oplog from connector target oplog collection
                    entry = target_conn['__mongo_connector'][
                        target_collection].find().sort('_ts', -1).limit(1)
                    target_oplog = entry[0]['_ts'] >> 32

                    lag = source_oplog - target_oplog
                    self._stat_client.gauge(self._lag_key, lag)

                    time.sleep(self._poll_interval)
                except Exception as ex:
                    logger.exception('Connection Failed, retrying..')
                    time.sleep(5)

        except Exception as ex:
            logger.exception('Critical Error, bailing out..')
Exemplo n.º 44
0
class ClfTrainer:
    def __init__(self, clf_model, clf_dataset):
        self.clf_model = clf_model
        self.clf_dataset = clf_dataset
        self.statsd = StatsClient(host='10.219.30.153', port=8125)

    def __run_train__(self,
                      sess,
                      input,
                      output,
                      batch_i,
                      batch_size,
                      cost_func,
                      train_op,
                      scale_to_imagenet=False):

        total_loss = 0
        count = 0

        for batch_features, batch_labels in self.clf_dataset.get_training_batches_from_preprocessed(
                batch_i, batch_size, scale_to_imagenet):
            loss, _ = sess.run([cost_func, train_op],
                               feed_dict={
                                   input: batch_features,
                                   output: batch_labels
                               })
            total_loss = total_loss + loss
            count = count + 1

        return total_loss / count

    def __run_accuracy_in_valid_set__(self,
                                      sess,
                                      input,
                                      output,
                                      accuracy,
                                      batch_size,
                                      scale_to_imagenet=False):
        valid_features, valid_labels = self.clf_dataset.get_valid_set(
            scale_to_imagenet)

        valid_acc = 0
        for batch_valid_features, batch_valid_labels in self.clf_dataset.get_batches_from(
                valid_features, valid_labels, batch_size):
            valid_acc += sess.run(accuracy,
                                  feed_dict={
                                      input: batch_valid_features,
                                      output: batch_valid_labels
                                  })

        tmp_num = valid_features.shape[0] / batch_size
        return valid_acc / tmp_num

    def __train__(self,
                  input,
                  output,
                  cost_func,
                  train_op,
                  accuracy,
                  epochs,
                  batch_size,
                  save_model_path,
                  model_name,
                  save_every_epoch=1):
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            print('starting training ... ')
            for epoch in range(epochs):
                n_batches = self.clf_dataset.num_batch

                for batch_i in range(1, n_batches + 1):
                    loss = self.__run_train__(sess, input, output, batch_i,
                                              batch_size, cost_func, train_op,
                                              self.clf_model.scale_to_imagenet)
                    print('Epoch {:>2}, {} Batch {}: '.format(
                        epoch + 1, self.clf_dataset.name, batch_i),
                          end='')

                    metric_name = "{},project={},dataset={},model={}".format(
                        'epoch', 'deepmodels', self.clf_dataset.name,
                        model_name)
                    self.statsd.gauge(metric_name, epoch + 1)

                    metric_name = "{},project={},dataset={},model={}".format(
                        'batch', 'deepmodels', self.clf_dataset.name,
                        model_name)
                    self.statsd.gauge(metric_name, batch_i)

                    print('Avg. Loss: {} '.format(loss), end='')

                    metric_name = "{},project={},dataset={},model={}".format(
                        'loss', 'deepmodels', self.clf_dataset.name,
                        model_name)
                    self.statsd.gauge(metric_name, loss)

                    valid_acc = self.__run_accuracy_in_valid_set__(
                        sess, input, output, accuracy, batch_size,
                        self.clf_model.scale_to_imagenet)
                    print('Validation Accuracy {:.6f}'.format(valid_acc))

                    metric_name = "{},project={},dataset={},model={}".format(
                        'accuracy', 'deepmodels', self.clf_dataset.name,
                        model_name)
                    self.statsd.gauge(metric_name, valid_acc)

                if epoch % save_every_epoch == 0:
                    print('epoch: {} is saved...'.format(epoch + 1))
                    saver = tf.train.Saver()
                    saver.save(sess,
                               save_model_path,
                               global_step=epoch + 1,
                               write_meta_graph=False)

    def __get_simple_losses_and_accuracy__(self,
                                           out_layers,
                                           output,
                                           learning_rate,
                                           options=None):
        is_loss_weights_considered = False
        label_smoothings = [0 for i in range(len(out_layers))]

        if options is not None:
            if 'loss_weights' in options and \
                len(options['loss_weights']) is len(out_layers):
                is_loss_weights_considered = True

            if 'label_smoothings' in options and \
                len(options['label_smoothings']) is len(out_layers):
                label_smoothings = options['label_smoothings']

        aux_cost_sum = 0
        if is_loss_weights_considered:
            for i in range(len(out_layers) - 1):
                aux_out_layer = out_layers[i]
                aux_label_smoothing = label_smoothings[i]
                aux_cost = tf.losses.softmax_cross_entropy(
                    output,
                    aux_out_layer,
                    label_smoothing=aux_label_smoothing,
                    reduction=tf.losses.Reduction.MEAN)
                aux_cost_sum += aux_cost * options['loss_weights'][i]

        final_out_layer = out_layers[len(out_layers) - 1]
        final_label_smoothing = label_smoothings[len(out_layers) - 1]
        cost = tf.losses.softmax_cross_entropy(
            output,
            final_out_layer,
            label_smoothing=final_label_smoothing,
            reduction=tf.losses.Reduction.MEAN)

        if is_loss_weights_considered:
            cost = cost * options['loss_weights'][len(out_layers) - 1]

        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        gradients = optimizer.compute_gradients(cost + aux_cost_sum)
        train_op = optimizer.apply_gradients(gradients)

        correct_pred = tf.equal(tf.argmax(final_out_layer, 1),
                                tf.argmax(output, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        return cost, train_op, accuracy

    def __get_losses_and_accuracy__(self,
                                    model,
                                    output,
                                    out_layers,
                                    learning_rate,
                                    options=None):
        from_paper_flag = True

        if options is None or options['optimizer_from_paper'] is False:
            optimizer_from_paper_flag = False

        if isinstance(model, AlexNet):
            return get_alexnet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \
                   self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)
        elif isinstance(model, VGG):
            return get_vgg_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \
                   self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)
        elif isinstance(model, GoogLeNet):
            return get_googlenet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \
                   self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.3, 0.3, 1.0]})
        elif isinstance(model, ResNet):
            return get_resnet_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \
                   self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, None)
        elif isinstance(model, InceptionV2):
            return get_inceptionv2_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \
                   self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.4, 1.0]})
        elif isinstance(model, InceptionV3):
            return get_inceptionv3_trainer(output, out_layers, learning_rate) if optimizer_from_paper_flag else \
                   self.__get_simple_losses_and_accuracy__(out_layers, output, learning_rate, {'loss_weights': [0.4, 1.0], 'label_smoothings': [0.1, 0.1]})
        else:
            return self.__get_simple_losses_and_accuracy__(
                out_layers, output, learning_rate, options)

    # default to use AdamOptimizer w/ softmax_cross_entropy_with_logits_v2
    def run_training(self,
                     epochs,
                     batch_size,
                     learning_rate,
                     save_model_to,
                     model_name,
                     save_every_epoch=1,
                     options=None):
        input, output = self.clf_model.set_dataset(self.clf_dataset)
        out_layers = self.clf_model.create_model(input)

        cost, train_op, accuracy = self.__get_losses_and_accuracy__(
            self.clf_model, output, out_layers, learning_rate)

        self.__train__(input, output, cost, train_op, accuracy, epochs,
                       batch_size, save_model_to, model_name, save_every_epoch)

    def resume_training_from_ckpt(self,
                                  epochs,
                                  batch_size,
                                  learning_rate,
                                  save_model_from,
                                  save_model_to,
                                  save_every_epoch=1,
                                  options=None):
        graph = tf.Graph()
        with graph.as_default():
            input, output = self.clf_model.set_dataset(self.clf_dataset)
            out_layers = self.clf_model.create_model(input)

            cost, train_op, accuracy = self.__get_losses_and_accuracy__(
                self.clf_model, output, out_layers, learning_rate)

        with tf.Session(graph=graph) as sess:
            sess.run(tf.global_variables_initializer())

            saver = tf.train.Saver(tf.trainable_variables())
            saver.restore(sess, save_model_from)

            print('starting training ... ')
            for epoch in range(epochs):
                n_batches = self.clf_dataset.num_batch

                for batch_i in range(1, n_batches + 1):
                    loss = self.__run_train__(sess, input, output, batch_i,
                                              batch_size, cost, train_op,
                                              self.clf_model.scale_to_imagenet)
                    print('Epoch {:>2}, {} Batch {}: '.format(
                        epoch + 1, self.clf_dataset.name, batch_i),
                          end='')
                    print('Avg. Loss: {} '.format(loss), end='')

                    valid_acc = self.__run_accuracy_in_valid_set__(
                        sess, input, output, accuracy, batch_size,
                        self.clf_model.scale_to_imagenet)
                    print('Validation Accuracy {:.6f}'.format(valid_acc))

                if epoch % save_every_epoch == 0:
                    print('epoch: {} is saved...'.format(epoch + 1))
                    saver1 = tf.train.Saver()
                    saver1.save(sess,
                                save_model_to,
                                global_step=epoch + 1,
                                write_meta_graph=False)

    def run_transfer_learning(self,
                              epochs,
                              batch_size,
                              learning_rate,
                              save_model_from,
                              save_model_to,
                              save_every_epoch=1,
                              options=None):
        graph = tf.Graph()
        with graph.as_default():
            input, output = self.clf_model.set_dataset(self.clf_dataset)
            out_layers = self.clf_model.create_model(input)

            cost, train_op, accuracy = self.__get_losses_and_accuracy__(
                self.clf_model, output, out_layers, learning_rate)

        with tf.Session(graph=graph) as sess:
            sess.run(tf.global_variables_initializer())

            var_list = []
            for var in tf.model_variables():
                if 'final' not in var.name:
                    var_list.append(var)

            saver = tf.train.Saver(var_list)
            saver.restore(sess, save_model_from)

            print('starting training ... ')
            for epoch in range(epochs):
                n_batches = self.clf_dataset.num_batch

                for batch_i in range(1, n_batches + 1):
                    loss = self.__run_train__(sess, input, output, batch_i,
                                              batch_size, cost, train_op,
                                              self.clf_model.scale_to_imagenet)
                    print('Epoch {:>2}, {} Batch {}: '.format(
                        epoch + 1, self.clf_dataset.name, batch_i),
                          end='')
                    print('Avg. Loss: {} '.format(loss), end='')

                    valid_acc = self.__run_accuracy_in_valid_set__(
                        sess, input, output, accuracy, batch_size,
                        self.clf_model.scale_to_imagenet)
                    print('Validation Accuracy {:.6f}'.format(valid_acc))

                if epoch % save_every_epoch == 0:
                    print('epoch: {} is saved...'.format(epoch + 1))
                    saver2 = tf.train.Saver()
                    saver2.save(sess,
                                save_model_to,
                                global_step=epoch + 1,
                                write_meta_graph=False)

    def run_testing(self, data, save_model_from, options=None):
        graph = tf.Graph()
        with graph.as_default():
            input, _ = self.clf_model.set_dataset(self.clf_dataset)
            out_layers = self.clf_model.create_model(input)

            final_out_layer = out_layers[len(out_layers) - 1]
            softmax_result = tf.nn.softmax(final_out_layer)

        with tf.Session(graph=graph) as sess:
            sess.run(tf.global_variables_initializer())

            saver = tf.train.Saver(tf.trainable_variables())
            saver.restore(sess, save_model_from)

            results = sess.run(softmax_result, feed_dict={input: data})

        return results
Exemplo n.º 45
0
                    f"saving position {log_file}/{log_pos}, {time.time() - timestamp:,.3f} seconds behind"
                )
                with stats.timer('save_pos'):
                    with open(SAVE_LOC, 'w') as f:
                        json.dump(
                            {
                                "log_file": posted_log_file,
                                "log_pos": posted_log_pos
                            }, f)
                last_save = time.time()
                since_last = 0
                posted_log_file = None
                posted_log_pos = None


# in-memory queue between binlog and es. The bigger it is, the more events we
# can parse in memory while waiting for es to catch up, at the expense of heap.
buf = Queue(maxsize=INTERNAL_QUEUE_DEPTH)

reader = BinlogReader(buf)
reader.daemon = True
writer = EsPoster(buf, chunk_size=ES_CHUNK_SIZE, flush_interval=FLUSH_INTERVAL)
writer.daemon = True
reader.start()
writer.start()

# on the main thread, poll the queue size for monitoring
while True:
    stats.gauge('queue_depth', buf.qsize())
    time.sleep(1)
class LocalMQTTClient:
    """
    This class connects to the local broker and interacts with it as needed
    """
    def __init__(
            self, username: str=None, password: str=None
    ):
        logging.config.dictConfig(logging_config.get_logging_conf())
        self._logger = structlog.getLogger(__name__)
        self._logger.addHandler(logging.NullHandler())

        # we are now using a retain session to get all missed messages in
        # case we disconnect
        self._client = mqtt.Client(
            clean_session=True, userdata=None,
            protocol=mqtt.MQTTv311
        )

        self._client.on_connect = self._on_connect
        self._client.on_disconnect = self._on_disconnect
        self._client.on_message = self.on_message

        self._set_message_callbacks()

        self._client.username_pw_set(
            username=str(username), password=str(password)
        )

        # run the connect function
        self._connect()
        self._stats_client = StatsClient(host=STATSD_ADDRESS, port=STATSD_PORT)
        self._logger.info("Local MQTT Client init called")

    def _set_message_callbacks(self):
        """

        :return:
        """
        self._client.message_callback_add(
            "$SYS/broker/bytes/received", self.on_bytes_received
        )

        self._client.message_callback_add(
            "$SYS/broker/bytes/sent", self.on_bytes_sent
        )
        self._client.message_callback_add(
            "$SYS/broker/clients/connected", self.on_clients_connected
        )
        self._client.message_callback_add(
            "$SYS/broker/clients/expired", self.on_clients_expired
        )
        self._client.message_callback_add(
            "$SYS/broker/clients/disconnected", self.on_clients_disconnected
        )
        self._client.message_callback_add(
            "$SYS/broker/clients/maximum", self.on_clients_maximum
        )
        self._client.message_callback_add(
            "$SYS/broker/clients/total", self.on_clients_total
        )
        self._client.message_callback_add(
            "$SYS/broker/heap/current", self.on_heap_current_size
        )
        self._client.message_callback_add(
            "$SYS/broker/heap/maximum", self.on_heap_maximum_size
        )
        self._client.message_callback_add(
            "$SYS/broker/load/connections/1min", self.on_connections_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/connections/5min", self.on_connections_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/connections/15min", self.on_connections_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/bytes/received/1min", self.on_bytes_received_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/bytes/received/5min", self.on_bytes_received_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/bytes/received/15min",
            self.on_bytes_received_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/bytes/sent/1min", self.on_bytes_sent_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/bytes/sent/5min", self.on_bytes_sent_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/bytes/sent/15min", self.on_bytes_sent_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/messages/received/1min",
            self.on_messages_received_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/messages/received/5min",
            self.on_messages_received_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/messages/received/15min",
            self.on_messages_received_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/messages/sent/1min", self.on_messages_sent_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/messages/sent/5min", self.on_messages_sent_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/messages/sent/15min", self.on_messages_sent_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/dropped/1min",
            self.on_publish_dropped_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/dropped/5min",
            self.on_publish_dropped_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/dropped/15min",
            self.on_publish_dropped_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/received/1min",
            self.on_publish_received_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/received/5min",
            self.on_publish_received_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/received/15min",
            self.on_publish_received_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/sent/1min", self.on_publish_sent_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/sent/5min", self.on_publish_sent_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/publish/sent/15min", self.on_publish_sent_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/sockets/1min", self.on_sockets_1min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/sockets/5min", self.on_sockets_5min
        )
        self._client.message_callback_add(
            "$SYS/broker/load/sockets/15min", self.on_sockets_15min
        )
        self._client.message_callback_add(
            "$SYS/broker/messages/inflight", self.on_inflight
        )
        self._client.message_callback_add(
            "$SYS/broker/messages/received", self.on_messages_received
        )
        self._client.message_callback_add(
            "$SYS/broker/messages/sent", self.on_messages_sent
        )
        self._client.message_callback_add(
            "$SYS/broker/messages/stored", self.on_messages_stored
        )
        self._client.message_callback_add(
            "$SYS/broker/publish/messages/dropped", self.on_publish_dropped
        )
        self._client.message_callback_add(
            "$SYS/broker/publish/messages/received", self.on_publish_received
        )
        self._client.message_callback_add(
            "$SYS/broker/publish/messages/sent", self.on_publish_sent
        )
        self._client.message_callback_add(
            "$SYS/broker/retained messages/count", self.on_retain_messages_count
        )
        self._client.message_callback_add(
            "$SYS/broker/subscriptions/count", self.on_subscription_count
        )
        self._client.message_callback_add(
            "$SYS/broker/uptime", self.on_broker_uptime
        )

    # The callback for when a PUBLISH message is received from the server.
    def on_message(self, client, userdata, msg):
        self._logger.info("Received nessage", mqtt_msg=msg)

    def _connect(self) -> None:
        """
        This function calls the connect function on the client object,
        this will block till there is an ok connection
        :return: None
        """
        # checking if connection is ok (first we assume connection is not ok)
        try:
            self._logger.info(
                "Attempting to connect to local MQTT server.",
                server=LOCAL_MQTT_ADDRESS, port=LOCAL_MQTT_PORT, kepalive=60
            )

            self._client.connect_async(
                host=LOCAL_MQTT_ADDRESS, port=LOCAL_MQTT_PORT, keepalive=60
            )

        except Exception as e:
            # exception was raised during the connect function, we must wait
            # for ok connection
            self._logger.error("Unable to connect to MQTT Broker", error=e)

    def run_loop(
            self, timeout: float=.25, in_thread: bool=False, loop_var: int=1,
            forever: bool=False
    ) -> None:
        """
        This function starts the loop in a separate thread for this object.
        :param timeout: passes the timeout to the loop function
        :param in_thread: if this is set true, then timeout and loop_var is
        ignored and a new thread is launched
        :param loop_var: use this variable to call loop with timeout n number
        of times
        :param forever: if used, then  the function blocks, all other
        parameters are ignored
        :return: None
        """
        try:
            if forever is True:
                self._client.loop_forever()

            if in_thread is False:
                for i in range(0, loop_var):
                    self._client.loop(timeout=timeout)
            else:
                # in thread is set to True, we should start an asynchronous loop
                self._logger.info("Running MQTT loop in a separate loop")
                self._client.loop_start()
        except Exception as e:
            self._logger.error("Error starting the loop", error=e)
            raise e

    def _on_connect(self, client, userdata, flags, rc):
        """
        This function executed on on_connect, Args are left blank because
        they are predetermined
        :param client:
        :param userdata:
        :param flags:
        :param rc:
        :return:
        """
        self._logger.info("Connection to local MQTT server made")

        self._subscribe()

    def _subscribe(self) -> None:
        """
        This function subscribes to provided topic
        :return: None
        """
        self._logger.info("Subscribing to endpoint", ep="$SYS/#")
        self._client.subscribe("$SYS/#", qos=1)

    def _on_disconnect(self, client, userdata, rc) -> None:
        """
        This function is called on disconnect event
        :param client: the client object
        :param userdata: the data set by user on startup
        :param rc: The return error code
        :return: None
        """
        self._logger.info(
            "Local MQTT Client has disconnected from the MQTT Server",
            userdata=userdata, rc=rc, rc_string=mqtt.error_string(rc)
        )

    def on_bytes_received(self, client, userdata, msg):
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.bytes_received', float(msg.payload)
        )

    def on_bytes_sent(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.bytes_sent', float(msg.payload)
        )

    def on_clients_connected(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.clients_connected', float(msg.payload)
        )

    def on_clients_expired(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.clients_expired', float(msg.payload)
        )

    def on_clients_disconnected(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.clients_disconnected', float(msg.payload)
        )

    def on_clients_maximum(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.clients_maximum', float(msg.payload)
        )

    def on_clients_total(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.clients_total', float(msg.payload)
        )

    def on_heap_current_size(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.heap_current', float(msg.payload)
        )

    def on_heap_maximum_size(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.heap_maximum', float(msg.payload)
        )

    def on_connections_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.connections_1min', float(msg.payload)
        )

    def on_connections_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.connections_5min', float(msg.payload)
        )

    def on_connections_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.connections_15min', float(msg.payload)
        )

    def on_bytes_received_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.bytes_received_1min', float(msg.payload)
        )

    def on_bytes_received_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.bytes_received_5min', float(msg.payload)
        )

    def on_bytes_received_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.bytes_received_15min', float(msg.payload)
        )

    def on_bytes_sent_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.bytes_sent_1min', float(msg.payload)
        )

    def on_bytes_sent_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.bytes_sent_5min', float(msg.payload)
        )

    def on_bytes_sent_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.bytes_sent_15min', float(msg.payload)
        )

    def on_messages_received_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_received_1min', float(msg.payload)
        )

    def on_messages_received_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_received_5min', float(msg.payload)
        )

    def on_messages_received_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_received_15min', float(msg.payload)
        )

    def on_messages_sent_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_sent_1min', float(msg.payload)
        )

    def on_messages_sent_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_sent_5min', float(msg.payload)
        )

    def on_messages_sent_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_sent_15min', float(msg.payload)
        )

    def on_publish_dropped_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_dropped_1min', float(msg.payload)
        )

    def on_publish_dropped_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_dropped_5min', float(msg.payload)
        )

    def on_publish_dropped_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_dropped_15min', float(msg.payload)
        )

    def on_publish_received_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_received_1min', float(msg.payload)
        )

    def on_publish_received_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_received_5min', float(msg.payload)
        )

    def on_publish_received_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_received_15min', float(msg.payload)
        )

    def on_publish_sent_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_sent_1min', float(msg.payload)
        )

    def on_publish_sent_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_sent_5min', float(msg.payload)
        )

    def on_publish_sent_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_sent_15min', float(msg.payload)
        )

    def on_sockets_1min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.sockets_1min', float(msg.payload)
        )

    def on_sockets_5min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.sockets_5min', float(msg.payload)
        )

    def on_sockets_15min(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.sockets_15min', float(msg.payload)
        )

    def on_inflight(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.inflight', float(msg.payload)
        )

    def on_messages_received(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_received', float(msg.payload)
        )

    def on_messages_sent(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_sent', float(msg.payload)
        )

    def on_messages_stored(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.messages_stored', float(msg.payload)
        )

    def on_publish_dropped(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_dropped', float(msg.payload)
        )

    def on_publish_received(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_received', float(msg.payload)
        )

    def on_publish_sent(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.publish_sent', float(msg.payload)
        )

    def on_retain_messages_count(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.retain_messages_count', float(msg.payload)
        )

    def on_subscription_count(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.subscription_count', float(msg.payload)
        )

    def on_broker_uptime(self, client, userdata, msg) -> None:
        """

        :param client:
        :param userdata:
        :param msg:
        :return:
        """
        self._logger.info(
            "Received SYS message", topic=msg.topic, payload=msg.payload
        )
        self._stats_client.gauge(
            'mosquito_monitor.broker_uptime', float(msg.payload.split(b' ')[0])
        )