Exemplo n.º 1
0
class _Statsd(object):
    def __init__(self, config):
        if config.get('datadog', True):
            initialize(statsd_host=config['host'],
                       statsd_port=config['port'],
                       prefix=config['prefix'])
            self.datadog = True
            self._statsd = statsd
        else:
            self.datadog = False
            self._statsd = StatsClient(config['host'],
                                       config['port'],
                                       config['prefix'])

    def incr(self, metric, count=1, rate=1, **kw):
        if self.datadog:
            return self._statsd.increment(metric, value=count,
                                          sample_rate=rate, **kw)
        else:
            return self._statsd.incr(metric, count=count, rate=rate)

    def timer(self, metric, rate=1, **kw):
        if self.datadog:
            return self._statsd.timed(metric, sample_rate=rate, **kw)
        else:
            return self._statsd.timer(metric, rate=rate)
Exemplo n.º 2
0
def main():
  args = parseArguments()

  # initialize statsdclient
  global statsd_client
  statsd_client = StatsClient(host=args.server, port=args.port,
                              prefix=args.source)

  value = None
  try:
    with open(args.value, 'r') as yamlfile:
      server_state = yaml.load(yamlfile)
      value = server_state['code']
  except yaml.YAMLError as ex:
    if hasattr(ex, 'problem_mark'):
      mark = ex.problem_mark
      print "YAML load error at position (%s:%s)" % (mark.line + 1,
                                                     mark.column + 1)
    sys.exit(1)

  print "%s sends metric [%s] with value [%s] to %s:%d" % (
        args.source, args.metric, value, args.server,
        args.port)

  statsd_client.gauge(args.metric, int(value))
  return 0
Exemplo n.º 3
0
class StatsDBackend(BaseBackend):

    name = 'statsd'

    def __init__(self, config):
        self.config = config
        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.statsd = StatsClient(self.config['STATSD_HOST'],
                                  self.config['STATSD_PORT'],
                                  self.config['STATSD_PREFIX'])

    def timing(self, stat_name, delta):
        return self.statsd.timing(stat_name, delta, self.config['STATS_RATE'])

    def incr(self, stat_name, count=1):
        return self.statsd.incr(stat_name, count, self.config['STATS_RATE'])

    def decr(self, stat_name, count=1):
        return self.statsd.decr(stat_name, count, self.config['STATS_RATE'])

    def gauge(self, stat_name, value, delta=False):
        return self.statsd.gauge(stat_name, value, self.config['STATS_RATE'], delta)
Exemplo n.º 4
0
class FlaskStat(object):

    _xstat_title = None
    _xstat_host = None
    _xstat_port = None

    _stat_client = None

    def __init__(self, app=None):
        super(FlaskStat, self).__init__()

        if app:
            self.init_app(app)

    def init_app(self, app):
        from flask import request, g
        """
        绑定app
        """
        self._xstat_title = app.config.get('XSTAT_TITLE')
        self._xstat_host = app.config.get('XSTAT_HOST')
        self._xstat_port = app.config.get('XSTAT_PORT') or constants.XSTAT_PORT
        self._stat_client = StatsClient(host=self._xstat_host, port=self._xstat_port)

        @app.before_request
        @catch_exc
        def prepare_stat():
            if not request.endpoint:
                return

            g.xstat_timers = []
            g.xstat_timers.append(
                self._stat_client.timer('.'.join([
                    self._xstat_title,
                    'endpoint',
                    request.endpoint,
                    ])
                )
            )

            g.xstat_timers.append(
                self._stat_client.timer('.'.join([
                    self._xstat_title,
                    'all',
                    ])
                )
            )

            for stat in g.xstat_timers:
                stat.start()

        @app.teardown_request
        @catch_exc
        def send_stat(exc):
            if not hasattr(g, 'xstat_timers'):
                return

            for stat in g.xstat_timers:
                stat.stop()
Exemplo n.º 5
0
def _udp_client(prefix=None, addr=None, port=None, ipv6=False):
    if not addr:
        addr = ADDR[0]
    if not port:
        port = ADDR[1]
    sc = StatsClient(host=addr, port=port, prefix=prefix, ipv6=ipv6)
    sc._sock = mock.Mock()
    return sc
def send_stats(last_timestamp, last_message_count, json_filename):
	with open(json_filename) as data_file:
		data = json.load(data_file)

	current_timestamp = data["now"]
	current_message_count = data["messages"]

	secs = False
	msgs = False

	if last_timestamp is False:
		print "Starting up, first pass...."
	elif current_message_count < last_message_count:
		print "Looks like dump1090 restarted, message count reset (%d)" % current_message_count
	else:
		secs = current_timestamp - last_timestamp
		msgs = current_message_count - last_message_count
		
		print "{0} sec\t{1} messages\t{2} messages per sec avg".format(secs, msgs, (msgs / secs))

	last_timestamp = current_timestamp
	last_message_count = current_message_count
	threading.Timer(INTERVAL, send_stats, [last_timestamp, last_message_count, json_filename]).start()

	aircrafts_5s = []
	aircrafts_10s = []
	aircrafts_30s = []
	aircrafts_60s = []

	for aircraft in data["aircraft"]:
		if aircraft["seen"] < 5:
			aircrafts_5s.append(aircraft["hex"])
		if aircraft["seen"] < 10:
			aircrafts_10s.append(aircraft["hex"])
		if aircraft["seen"] < 30:
			aircrafts_30s.append(aircraft["hex"])
		if aircraft["seen"] < 60:
			aircrafts_60s.append(aircraft["hex"])

	print "\t5s:{0}\t10s:{1}\t30s:{2}\t60s:{3}".format(len(aircrafts_5s), len(aircrafts_10s), len(aircrafts_30s), len(aircrafts_60s))

	radio_name = sys.argv[1]

	if secs:
		client = StatsClient(STATSD_HOST)
		client.incr("radios.%s.message_rate" % radio_name, msgs)

		pipe = client.pipeline()
		c = 0
		max_msg_size = 20
		for hex in aircrafts_10s:
			pipe.set("radios.%s.aircraft" % radio_name, hex)
			c = c + 1
			if c == max_msg_size:
				pipe.send()
				c = 0
		if c != max_msg_size:
			pipe.send()
Exemplo n.º 7
0
class MapleStat(object):

    _xstat_title = None
    _xstat_host = None
    _xstat_port = None

    _stat_client = None

    def __init__(self, app=None, config=None):
        super(MapleStat, self).__init__()

        if app:
            self.init_app(app, config)

    def init_app(self, app, config):
        """
        绑定app
        """
        self._xstat_title = config.get('XSTAT_TITLE')
        self._xstat_host = config.get('XSTAT_HOST')
        self._xstat_port = config.get('XSTAT_PORT') or constants.XSTAT_PORT
        self._stat_client = StatsClient(host=self._xstat_host, port=self._xstat_port)

        @app.before_request
        @catch_exc
        def prepare_stat(request):
            if not request.endpoint:
                return

            request.xstat_timers = []
            request.xstat_timers.append(
                self._stat_client.timer('.'.join([
                    self._xstat_title,
                    'endpoint',
                    request.endpoint,
                    ])
                )
            )

            request.xstat_timers.append(
                self._stat_client.timer('.'.join([
                    self._xstat_title,
                    'all',
                    ])
                )
            )

            for stat in request.xstat_timers:
                stat.start()

        @app.after_request
        @catch_exc
        def send_stat(request, exc):
            if not hasattr(request, 'xstat_timers'):
                return

            for stat in request.xstat_timers:
                stat.stop()
Exemplo n.º 8
0
class DjangoStat(MiddlewareMixin):
    _xstat_title = None
    _xstat_host = None
    _xstat_port = None

    _stat_client = None

    def __init__(self, *args, **kwargs):
        from django.conf import settings

        super(DjangoStat, self).__init__(*args, **kwargs)

        self._xstat_title = getattr(settings, 'XSTAT_TITLE', None)
        self._xstat_host = getattr(settings, 'XSTAT_HOST', None)
        self._xstat_port = getattr(settings, 'XSTAT_PORT', None) or constants.XSTAT_PORT

        self._stat_client = StatsClient(host=self._xstat_host, port=self._xstat_port)

    @catch_exc
    def process_view(self, request, view_func, view_args, view_kwargs):
        """
        request.resolver_match.url_name 在process_view才可以取到
        :return:
        """
        request.xstat_timers = []
        request.xstat_timers.append(
            self._stat_client.timer('.'.join([
                self._xstat_title,
                'endpoint',
                request.resolver_match.url_name,
                ])
            )
        )

        request.xstat_timers.append(
            self._stat_client.timer('.'.join([
                self._xstat_title,
                'all',
                ])
            )
        )

        for stat in request.xstat_timers:
            stat.start()

    @catch_exc
    def process_response(self, request, response):
        """
        无论是否抛出异常,都会执行这一步
        """
        if not hasattr(request, 'xstat_timers'):
            return response

        for stat in request.xstat_timers:
            stat.stop()

        return response
Exemplo n.º 9
0
def _udp_client(prefix=None, addr=None, port=None, ipv6=False):
    if not addr:
        addr = ADDR[0]
    if not port:
        port = ADDR[1]
    sc = StatsClient(host=addr, port=port, prefix=prefix, ipv6=ipv6)
    sc._pool = fake_socket_pool()
    sc._sock = sc._pool.get()
    return sc
Exemplo n.º 10
0
def test_disabled_client():
    """ Assert that a cliend with disabled=True does not send any data to
    statsd.
    """
    sc = StatsClient(host=ADDR[0], port=ADDR[1], disable=True)
    sc._sock = mock.Mock()

    sc.incr('foo')

    eq_(sc._sock.call_count, 0)
Exemplo n.º 11
0
def time_stack_list(username, password, tenant, auth_url, heat_url, region,
                    statsd_server):
    keystone = keystone_client(username=username, password=password,
                               tenant_name=tenant, auth_url=auth_url)
    token = keystone.auth_token
    heat = heat_client('1', endpoint=heat_url, region_name=region, token=token)
    statsd = StatsClient(host=statsd_server)

    with statsd.timer('uptime.{}'.format(region)):
        list(heat.stacks.list())
Exemplo n.º 12
0
def rtl_433_probe():
    statsd = StatsClient(host=STATSD_HOST,
                         port=STATSD_PORT,
                         prefix=STATSD_PREFIX)

    while True:
        line, addr = sock.recvfrom(1024)

        try:
            line = parse_syslog(line)
            data = json.loads(line)

            label = sanitize(data["model"])
            if "channel" in data:
                label += ".CH" + str(data["channel"])

            if "battery" in data:
                if data["battery"] == "OK":
                    statsd.gauge(label + '.battery', 1)
                else:
                    statsd.gauge(label + '.battery', 0)

            if "humidity" in data:
                statsd.gauge(label + '.humidity', data["humidity"])

            statsd.gauge(label + '.temperature', data["temperature_C"])

        except KeyError:
            pass

        except ValueError:
            pass
Exemplo n.º 13
0
    class StatsdStatsLogger(BaseStatsLogger):

        def __init__(self, host='localhost', port=8125,
                     prefix='superset', statsd_client=None):
            """
            Initializes from either params or a supplied, pre-constructed statsd client.

            If statsd_client argument is given, all other arguments are ignored and the
            supplied client will be used to emit metrics.
            """
            if statsd_client:
                self.client = statsd_client
            else:
                self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def timing(self, key, value):
            self.client.timing(key, value)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Exemplo n.º 14
0
class StaticticStatsD(object):
    """
    Send stats to statsd.
    """
    def __init__(self, hostname, host, port, prefix=None):
        self.client = StatsClient(host, port, prefix=prefix)
        self.hostname = hostname

    def incr(self, metric, value=1, prefix=None):
        """
        Increment 'metric' counter with 'value'.
        """
        if prefix is not None:
            metric = '%s.%s' % (prefix, metric)

        self.client.incr(metric, value)

        # separate metric for hostname
        if self.hostname is not None:
            metric = '%s.%s' % (self.hostname, metric)
            self.client.incr(metric, value)

    def timing(self, metric, value, prefix=None):
        """
        Send 'metric' timing.
        """
        if prefix is not None:
            metric = '%s.%s' % (prefix, metric)

        self.client.timing(metric, value)

        # separate metric for hostname
        if self.hostname is not None:
            metric = '%s.%s' % (self.hostname, metric)
            self.client.timing(metric, value)
Exemplo n.º 15
0
	def setUp(self):

		sqsregioninfo = SQSRegionInfo(name='localhost_region', endpoint='localhost')
		self.connection = sqsregioninfo.connect(
			port=8001,
			aws_access_key_id='id',
			aws_secret_access_key='secret',
			is_secure=False
		)

		self.queue = self.connection.create_queue('test_queue')

		client = StatsClient(host='localhost', port=8125, prefix=None, maxudpsize=512)

		self.statsd = client.pipeline()
Exemplo n.º 16
0
 def __init__(self):
     self.statsd = StatsClient(
         host=STATSD_HOST,
         port=STATSD_PORT,
         prefix=STATSD_PREFIX,
         maxudpsize=512)
     self.last_vals = self.get_stats()
     sleep(5)
Exemplo n.º 17
0
    def __init__(self, config):
        self.config = config
        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.statsd = StatsClient(self.config['STATSD_HOST'],
                                  self.config['STATSD_PORT'],
                                  self.config['STATSD_PREFIX'])
Exemplo n.º 18
0
def check_nginx_status(coll_type, file, server, port, local):
    nginx_type = "nginx_" + coll_type.split('.')[0].strip()

    if file_seek.has_key(nginx_type):
        offset_values = int(file_seek[nginx_type][0])
        file_size = int(file_seek[nginx_type][1])
    else:
        offset_values = 0
        file_size = 0

    logfile = open(file, 'r')

    '''seeklines信息是指从上次关闭文件的位置到此次打开文件的位置所包含的数据'''
    seeklines = seekfile(nginx_type, logfile, offset_values, file_size)
    logfile.close()

    nginx_status={'2XX':0,'3XX':0,'4XX':0,'5XX':0}

    if seeklines == "":
        nginx_status['2XX'] = 0
        nginx_status['3XX'] = 0
        nginx_status['4XX'] = 0
        nginx_status['5XX'] = 0
    else:
        for line in seeklines:
            status_tmp=line.strip().split('')[6]
            if int(status_tmp[:1]) in [2,3,4,5]:
                status = status_tmp[:1]+"XX"

            if nginx_status.has_key(status):
                nginx_status[status] += 1
            else:
                nginx_status[status] = 1

    #print nginx_status
    local_ip = local

    if local_ip:
        graphite_ip = local_ip.replace(".", "_")

    sc = StatsClient(server,port)
    for nginx_status, status_count in nginx_status.items():
        print nginx_status, status_count
        sc.gauge(graphite_ip+".nginx."+coll_type.split('.')[0].strip()+"."+nginx_status, int(status_count))
Exemplo n.º 19
0
    def __init__(self, *args, **kwargs):
        from django.conf import settings

        super(DjangoStat, self).__init__(*args, **kwargs)

        self._xstat_title = getattr(settings, 'XSTAT_TITLE', None)
        self._xstat_host = getattr(settings, 'XSTAT_HOST', None)
        self._xstat_port = getattr(settings, 'XSTAT_PORT', None) or constants.XSTAT_PORT

        self._stat_client = StatsClient(host=self._xstat_host, port=self._xstat_port)
 def __init__(self, args):
     """ initialize the args and setup a stats client """
     self._source_host = args.source_host
     self._target_host = args.target_host
     self._replica_set = args.replica_set
     self._user = args.user
     self._password = args.password
     self._poll_interval = args.interval
     self._lag_key = args.region + '_' + args.replica_set + '_lag'
     # We assume a local collectd installation
     self._stat_client = StatsClient()
Exemplo n.º 21
0
        def __init__(self, host='localhost', port=8125,
                     prefix='superset', statsd_client=None):
            """
            Initializes from either params or a supplied, pre-constructed statsd client.

            If statsd_client argument is given, all other arguments are ignored and the
            supplied client will be used to emit metrics.
            """
            if statsd_client:
                self.client = statsd_client
            else:
                self.client = StatsClient(host=host, port=port, prefix=prefix)
Exemplo n.º 22
0
 def __init__(self, config):
     if config.get('datadog', True):
         initialize(statsd_host=config['host'],
                    statsd_port=config['port'],
                    prefix=config['prefix'])
         self.datadog = True
         self._statsd = statsd
     else:
         self.datadog = False
         self._statsd = StatsClient(config['host'],
                                    config['port'],
                                    config['prefix'])
Exemplo n.º 23
0
def send_radio_stats(n, s):
	client = StatsClient(STATSD_HOST)
	pipe = client.pipeline()

	pipe.gauge("radios.%s.accepted" % n, s["local"]["accepted"][0])
	pipe.gauge("radios.%s.accepted_corrected" % n, s["local"]["accepted"][1])
	# If you use the "aggressive" setting in dump1090-mutability, there may
	# be a third entry in the accepted set. Maybe you want to do something with that data?
	#pipe.gauge("radios.%s.accepted_corrected_2bit" % n, s["local"]["accepted"][2])

	pipe.gauge("radios.%s.bad" % n, s["local"]["bad"])
	pipe.gauge("radios.%s.blocks_dropped" % n, s["local"]["blocks_dropped"])
	pipe.gauge("radios.%s.blocks_processed" % n, s["local"]["blocks_processed"])
	pipe.gauge("radios.%s.modeac" % n, s["local"]["modeac"])
	pipe.gauge("radios.%s.modes" % n, s["local"]["modes"])
	pipe.gauge("radios.%s.strong_signals" % n, s["local"]["strong_signals"])
	pipe.gauge("radios.%s.unknown_icao" % n, s["local"]["unknown_icao"])
	pipe.gauge("radios.%s.cpr.airborne" % n, s["cpr"]["airborne"])
	pipe.gauge("radios.%s.cpr.filtered" % n, s["cpr"]["filtered"])
	pipe.send()
	pipe.gauge("radios.%s.cpr.global_bad" % n, s["cpr"]["global_bad"])
	pipe.gauge("radios.%s.cpr.global_ok" % n, s["cpr"]["global_ok"])
	pipe.gauge("radios.%s.cpr.global_range" % n, s["cpr"]["global_range"])
	pipe.gauge("radios.%s.cpr.global_skipped" % n, s["cpr"]["global_skipped"])
	pipe.gauge("radios.%s.cpr.global_speed" % n, s["cpr"]["global_speed"])
	pipe.gauge("radios.%s.cpr.local_aircraft_relative" % n, s["cpr"]["local_aircraft_relative"])
	pipe.gauge("radios.%s.cpr.local_ok" % n, s["cpr"]["local_ok"])
	pipe.gauge("radios.%s.cpr.local_range" % n, s["cpr"]["local_range"])
	pipe.gauge("radios.%s.cpr.local_receiver_relative" % n, s["cpr"]["local_receiver_relative"])
	pipe.gauge("radios.%s.cpr.local_skipped" % n, s["cpr"]["local_skipped"])
	pipe.send()
	pipe.gauge("radios.%s.cpr.local_speed" % n, s["cpr"]["local_speed"])
	pipe.gauge("radios.%s.cpr.surface" % n, s["cpr"]["surface"])
	pipe.gauge("radios.%s.messages" % n, s["messages"])
	pipe.gauge("radios.%s.tracks_all" % n, s["tracks"]["all"])
	pipe.gauge("radios.%s.tracks_single_message" % n, s["tracks"]["single_message"])
	pipe.timing("radios.%s.cpu.background" % n, s["cpu"]["background"])
	pipe.timing("radios.%s.cpu.demodulation" % n, s["cpu"]["demod"])
	pipe.timing("radios.%s.cpu.usb" % n, s["cpu"]["reader"])
	pipe.send()
Exemplo n.º 24
0
class StatsD(object):
    def __init__(self, app=None, config=None):
        self.config = None
        self.statsd = None
        if app is not None:
            self.init_app(app)
        else:
            self.app = None

    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault("STATSD_HOST", "localhost")
        self.config.setdefault("STATSD_PORT", 8125)
        self.config.setdefault("STATSD_PREFIX", None)

        self.app = app

        self.statsd = StatsClient(
            host=self.config["STATSD_HOST"], port=self.config["STATSD_PORT"], prefix=self.config["STATSD_PREFIX"]
        )

    def timer(self, *args, **kwargs):
        return self.statsd.timer(*args, **kwargs)

    def timing(self, *args, **kwargs):
        return self.statsd.timing(*args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.statsd.incr(*args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.statsd.decr(*args, **kwargs)

    def gauge(self, *args, **kwargs):
        return self.statsd.gauge(*args, **kwargs)
Exemplo n.º 25
0
    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.app = app

        self.statsd = StatsClient(self.config['STATSD_HOST'],
            self.config['STATSD_PORT'], self.config['STATSD_PREFIX'])
Exemplo n.º 26
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self, host, port, prefix='superset'):
            self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def timing(self, key, value):
            self.client.timing(key, value)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Exemplo n.º 27
0
def check_fpm_slow(coll_type, file, server, port, local):
    fpm_slow_type = "fpm_slow_" + coll_type.split('.')[0].strip()

    if file_seek.has_key(fpm_slow_type):
        offset_values = int(file_seek[fpm_slow_type][0])
        file_size = int(file_seek[fpm_slow_type][1])
    else:
        offset_values = 0
        file_size = 0
    try:
        logfile = open(file, 'r')
        '''seeklines信息是指从上次关闭文件的位置到此次打开文件的位置所包含的数据'''
        seeklines = seekfile(fpm_slow_type, logfile, offset_values, file_size)
        logfile.close()
    except IOError as ioerr:
        print ioerr

    fpm_slow_status = {'slow_num' : 0}

    if seeklines == "":
        fpm_slow_status['slow_num'] = 0
    else:
        for line in seeklines:
            fpm_slow_match = re.match(r'(^\[+\d+-\w+-\d+\s+\d+:\d+:\d+\])\s(.*)',line)
            if fpm_slow_match != None:
                fpm_slow_status['slow_num'] += 1

    #print nginx_status
    local_ip = local

    if local_ip:
        graphite_ip = local_ip.replace(".", "_")

    sc = StatsClient(server,port)
    for fpm_status, fpm_count in fpm_slow_status.items():
        print fpm_status, fpm_count
        sc.gauge(graphite_ip+".fpm_slow."+coll_type.split('.')[0].strip()+"."+fpm_status, int(fpm_count))
Exemplo n.º 28
0
    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault("STATSD_HOST", "localhost")
        self.config.setdefault("STATSD_PORT", 8125)
        self.config.setdefault("STATSD_PREFIX", None)

        self.app = app

        self.statsd = StatsClient(
            host=self.config["STATSD_HOST"], port=self.config["STATSD_PORT"], prefix=self.config["STATSD_PREFIX"]
        )
Exemplo n.º 29
0
 def __init__(self, config, name):
     super(StatisticsForStatsd, self).__init__()
     self.config = config
     if config.prefix and name:
         self.prefix = '.'.join((config.prefix, name))
     elif config.prefix:
         self.prefix = config.prefix
     elif name:
         self.prefix = name
     else:
         self.prefix = ''
     self.statsd = StatsClient(
         config.statsd_host,
         config.statsd_port,
         self.prefix
     )
def rtl_433_probe():
    statsd_host = "localhost"
    statsd_host = "127.0.0.1"
    statsd_port = 8125
    statsd_prefix = 'rtlsdr'

    statsd = StatsClient(host=statsd_host,
                         port=statsd_port,
                         prefix=statsd_prefix)

    while True:
        line = sys.stdin.readline()
        if not line:
            break
        try:
            data = json.loads(line)

            label = sanitize(data["model"])
            if "channel" in data:
                label += ".CH" + str(data["channel"])

            if "battery" in data:
                if data["battery"] == "OK":
                    statsd.gauge(label + '.battery', 1)
                else:
                    statsd.gauge(label + '.battery', 0)

            if "humidity" in data:
                statsd.gauge(label + '.humidity', data["humidity"])

            statsd.gauge(label + '.temperature', data["temperature_C"])

        except KeyError:
            pass

        except ValueError:
            pass
Exemplo n.º 31
0
from django.contrib.auth import update_session_auth_hash, login, authenticate
from django.http import HttpResponseRedirect
from django.urls import reverse
from blog.views import home
from django import forms
import boto3
import json
from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
Users = get_user_model()
from django.contrib.auth.models import User
import uuid
import logging
logger = logging.getLogger(__name__)
from statsd import StatsClient
metric = StatsClient()
# from django.core.mail import EmailMessage


def register(request):
    if request.method == 'POST':
        form = UserRegisterForm(request.POST)
        if form.is_valid():
            reg = form.save(commit=False)
            username = form.cleaned_data.get('username')
            password = form.cleaned_data.get('password1')
            # reg.email = username
            reg.save()
            user = authenticate(username=username, password=password)
            login(request, user)
            timer = metric.timer('registration of user')
Exemplo n.º 32
0
import settings
from statsd import StatsClient

client = StatsClient('127.0.0.1', 8125, prefix=settings.STATSD_PREFIX)

if not settings.STATSD:
    client._send = lambda data: None
Exemplo n.º 33
0
def _client(prefix=None):
    sc = StatsClient(host=ADDR[0], port=ADDR[1], prefix=prefix)
    sc._sock = mock.Mock()
    return sc
Exemplo n.º 34
0
# coding: utf-8

import json
import falcon
import sys
from statsd import StatsClient 

from hmtlPredictor import HMTLPredictor

STATSD = StatsClient()


class AllResource(object):
    def __init__(self, model_name = "conll_full_elmo", mode = "demo"):
        self.jmd = HMTLPredictor(model_name = model_name)
        self.mode = mode
        print(f"Server loaded with model {model_name}")
        self.response = None

    def on_get(self, req, resp):
        self.response = {}
        text = req.get_param("text") #Input text
        STATSD.incr(f"huggingNLP-{self.mode}.msg")
        
        raw_format = req.get_param_as_bool("raw", required = False, blank_as_true = False) #Non-formatted output
        raw_format = False if raw_format is None else raw_format
        
        
        self.response["text"] = text
        
        
Exemplo n.º 35
0
import requests
from requests.exceptions import RequestException, HTTPError
import yaml

from . import schemas
from .servicebase import ListenerService, ServiceBase, ListenerServiceEvent, \
    SelfserveClient, TaskNotFound, lock_table
from .tcutils import createJsonArtifact, createReferenceArtifact
from .timeutils import parseDateString

from statsd import StatsClient

import logging
log = logging.getLogger(__name__)

statsd = StatsClient(prefix='bbb.services')

# Buildbot status'- these must match http://mxr.mozilla.org/build/source/buildbot/master/buildbot/status/builder.py#25
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, CANCELLED = range(7)

# Where we can get the list of all the buildbot state
ALL_THE_THINGS_URL = "https://secure.pub.build.mozilla.org/builddata/reports/allthethings.json"


def matches_pattern(s, patterns):
    """Returns True if "s" matches any of the given patterns. False otherwise."""
    for pat in patterns:
        if re.match(pat, s):
            return True
    return False
from statsd import StatsClient

statsd = StatsClient(prefix="govuk.topic-taxonomy")
Exemplo n.º 37
0
def client():
    return StatsClient(host='statsd-server',
                       port=8125,
                       prefix='hello',
                       maxudpsize=512)
Exemplo n.º 38
0
 def __init__(self, host='localhost', port=8125, prefix=None):
     self.statsd = StatsClient(host, port, prefix)
Exemplo n.º 39
0
def udp_statsd_client(**client_params):
    return StatsClient(**client_params)
Exemplo n.º 40
0
from rucio.common.config import config_get, config_get_bool, config_get_int

SERVER = config_get('monitor',
                    'carbon_server',
                    raise_exception=False,
                    default='localhost')
PORT = config_get('monitor',
                  'carbon_port',
                  raise_exception=False,
                  default=8125)
SCOPE = config_get('monitor',
                   'user_scope',
                   raise_exception=False,
                   default='rucio')
CLIENT = StatsClient(host=SERVER, port=PORT, prefix=SCOPE)

ENABLE_METRICS = config_get_bool('monitor',
                                 'enable_metrics',
                                 raise_exception=False,
                                 default=False)
if ENABLE_METRICS:
    METRICS_PORT = config_get_int('monitor',
                                  'metrics_port',
                                  raise_exception=False,
                                  default=8080)
    start_http_server(METRICS_PORT)


def record_counter(counters, delta=1):
    """
Exemplo n.º 41
0
import chardet
from statsd import StatsClient
import os
import logging
from logging.config import fileConfig
import logging.handlers
import json


class factual_pair:
    def __init__(self, original_id, duplicate_id):
        self.original_id = original_id
        self.duplicate_id = duplicate_id


statsd = StatsClient()
start = time.time()

conn = dbapi2.connect(database="socialtopias-new",
                      user="******",
                      password="******")
cur = conn.cursor()

sourcelist = dict()
comparelist1 = dict()
comparelist2 = dict()
sourcegroup = dict()
comparegroup1 = dict()
comparegroup2 = dict()
results = dict()
source = dict()
from subprocess import PIPE, Popen, STDOUT
from statsd import StatsClient
import sys

keyspace = sys.argv[1]

GRAPHITE_URL = '10.1.1.5'
GRAPHITE_PORT = 8125
GRAPHITE_PREFIX = 'cmb_cs'

statsd_client = StatsClient(GRAPHITE_URL,
                            GRAPHITE_PORT,
                            prefix=GRAPHITE_PREFIX)

GRAFANA_MAP = {
    '99%': 'percentile_99',
    '95%': 'percentile_95',
    '50%': 'percentile_50',
}

COLUMN_INDEX = {'Write_Latency': 3, 'Read_Latency': 4}

KEYSPACE_MAP = {
    'weights':
    ['weights_from_profile', 'weights_to_profile', 'potentials_by_profile']
}


def run_command(cmd_to_run, get_output=True, raise_on_error=False):
    p = Popen(cmd_to_run, stdout=PIPE, stderr=STDOUT, shell=True)
    output = []
Exemplo n.º 43
0
 def __init__(self, host, port, prefix='rook'):
     self.client = StatsClient(host=host, port=port, prefix=prefix)
Exemplo n.º 44
0
 def _get_client(self, host, port, prefix, maxudpsize):
     return StatsClient(host=host, port=port, prefix=prefix, maxudpsize=maxudpsize)
Exemplo n.º 45
0
from statsd import StatsClient
from inferno.lib.rule import chunk_json_stream
from inferno.lib.rule import InfernoRule
from inferno.lib.rule import Keyset
from infernyx.database import insert_redshift
from functools import partial
from config_infernyx import *
import datetime
import logging

log = logging.getLogger(__name__)
AUTORUN = True
statsd = StatsClient(**STATSD)


def combiner(key, value, buf, done, params):
    if not done:
        i = len(value)
        buf[key] = [a + b for a, b in zip(buf.get(key, [0] * i), value)]
    else:
        return buf.iteritems()


def impression_stats_init(input_iter, params):
    import geoip2.database
    import re
    try:
        geoip_file = params.geoip_file
    except Exception as e:
        # print "GROOVY: %s" % e
        geoip_file = './GeoLite2-Country.mmdb'
Exemplo n.º 46
0
class StatsdMetrics(Metrics):
    def __init__(self, host='localhost', port=8125, prefix=None):
        self.statsd = StatsClient(host, port, prefix)

    def fanout_timer(self, feed_class):
        return self.statsd.timer('%s.fanout_latency' % feed_class.__name__)

    def feed_reads_timer(self, feed_class):
        return self.statsd.timer('%s.read_latency' % feed_class.__name__)

    def on_feed_read(self, feed_class, activities_count):
        self.statsd.incr('%s.reads' % feed_class.__name__, activities_count)

    def on_feed_write(self, feed_class, activities_count):
        self.statsd.incr('%s.writes' % feed_class.__name__, activities_count)

    def on_feed_remove(self, feed_class, activities_count):
        self.statsd.incr('%s.deletes' % feed_class.__name__, activities_count)

    def on_fanout(self, feed_class, operation):
        metric = (feed_class.__name__, operation.__name__)
        self.statsd.incr('%s.fanout.%s' % metric)

    def on_activity_published(self):
        self.statsd.incr('activities.published')

    def on_activity_removed(self):
        self.statsd.incr('activities.removed')
Exemplo n.º 47
0
def main():
    statsd_client = StatsClient()
    maple_gateway_statsd_client = StatReporter(
        'path1', 'path2', statsd_client, lambda name: 'cn.vimer.%s' % name)
    maple_gateway_statsd_client.report()
Exemplo n.º 48
0
def statsd_init():
    global statsd
    statsd = StatsClient(config.STATSD['host'],
                         config.STATSD['port'],
                         prefix=config.STATSD['prefix'])
Exemplo n.º 49
0
def configure(app):
    config_file = os.environ.get('GRAPHITE_API_CONFIG',
                                 '/etc/graphite-api.yaml')
    if os.path.exists(config_file):
        with open(config_file) as f:
            config = yaml.safe_load(f)
            config['path'] = config_file
    else:
        warnings.warn("Unable to find configuration file at {0}, using "
                      "default config.".format(config_file))
        config = {}

    configure_logging(config)

    for key, value in list(default_conf.items()):
        config.setdefault(key, value)

    if config['carbon'] is not None:
        # carbon section having a bunch of values, keep default ones if
        # they're not provided in an overriden config.
        for key, value in list(default_conf['carbon'].items()):
            config['carbon'].setdefault(key, value)

    app.statsd = None
    if 'statsd' in config:
        try:
            from statsd import StatsClient
        except ImportError:
            warnings.warn("'statsd' is provided in the configuration but "
                          "the statsd client is not installed. Please `pip "
                          "install statsd`.")
        else:
            c = config['statsd']
            app.statsd = StatsClient(c['host'], c.get('port', 8125))

    app.cache = None
    if 'cache' in config:
        try:
            from flask.ext.cache import Cache
        except ImportError:
            warnings.warn("'cache' is provided in the configuration but "
                          "Flask-Cache is not installed. Please `pip install "
                          "Flask-Cache`.")
        else:
            cache_conf = {
                'CACHE_DEFAULT_TIMEOUT': 60,
                'CACHE_KEY_PREFIX': 'graphite-api:'
            }
            for key, value in config['cache'].items():
                cache_conf['CACHE_{0}'.format(key.upper())] = value
            app.cache = Cache(app, config=cache_conf)

    loaded_config = {'functions': {}}
    for functions in config['functions']:
        loaded_config['functions'].update(load_by_path(functions))

    if config['carbon'] is not None:
        if 'hashing_keyfunc' in config['carbon']:
            config['carbon']['hashing_keyfunc'] = load_by_path(
                config['carbon']['hashing_keyfunc'])
        else:
            config['carbon']['hashing_keyfunc'] = lambda x: x
    loaded_config['carbon'] = config['carbon']

    finders = []
    for finder in config['finders']:
        finders.append(load_by_path(finder)(config))
    loaded_config['store'] = Store(finders)
    loaded_config['searcher'] = IndexSearcher(config['search_index'])
    app.config['GRAPHITE'] = loaded_config
    app.config['TIME_ZONE'] = config['time_zone']
    logger.info("configured timezone", timezone=app.config['TIME_ZONE'])

    if 'sentry_dsn' in config:
        try:
            from raven.contrib.flask import Sentry
        except ImportError:
            warnings.warn("'sentry_dsn' is provided in the configuration but "
                          "the sentry client is not installed. Please `pip "
                          "install raven[flask]`.")
        else:
            Sentry(app, dsn=config['sentry_dsn'])

    app.wsgi_app = TrailingSlash(
        CORS(app.wsgi_app, config.get('allowed_origins')))
Exemplo n.º 50
0
WEIGHT_PAGE = 0.9
with open(CONFIG_FILE, 'r') as f:
    data = json.load(f)
    REDIS_SERVER_HOST = data['redis']['redisServerHost']
    REDIS_SERVER_PORT = int(data['redis']['redisServerPort'])
    REDIS_NEWS_EXPIRE_IN_SECONDS = int(data['redis']['newsExpireInSeconds'])
    NEWS_LIMIT = int(data['news']['newsLimit'])
    NEWS_PER_PAGE_SIZE = int(data['news']['newsPageSize'])
    NEWS_DB_COLLECTION = data['mongoDb']['newsMongoDbCollection']
    CLICKS_DB_COLLECTION = data['mongoDb']['clicksMongoDbCollection']
    LOG_CLICKS_TASK_QUEUE_URL = data['queue']['logClicksTaskQueueUrl']
    LOG_CLICKS_TASK_QUEUE_NAME = data['queue']['logClicksTaskQueueName']

redis_client = redis.StrictRedis(REDIS_SERVER_HOST, REDIS_SERVER_PORT)
CloudAMQPClient = CloudAMQPClient(LOG_CLICKS_TASK_QUEUE_URL, LOG_CLICKS_TASK_QUEUE_NAME)
statsd = StatsClient()
news_timer = statsd.timer('news')

def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
    return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) 

def getNewssummaryForuser(userId, pageNum):
    news_timer.start()
    print "get news for user: %s @ page %s" % (userId, pageNum)
    statsd.incr("user.news")
    pageNum = int(pageNum)
    startIndex = (pageNum - 1) * NEWS_PER_PAGE_SIZE
    endIndex = pageNum * NEWS_PER_PAGE_SIZE

    sliced_news = []
    #get preference for user
Exemplo n.º 51
0
class Worker(multiprocessing.Process):
    def __init__(self,
                 worker_id,
                 manager,
                 redis_connection_params,
                 sleep_time=0.1):
        self.manager = manager

        self.statsd_client = StatsClient(host=settings.STATSD_HOST,
                                         port=settings.STATSD_PORT,
                                         prefix=settings.STATSD_PREFIX)

        self.redis_connection_params = {
            k: v
            for k, v in redis_connection_params.iteritems()
            if k in ('host', 'db', 'password', 'port')
        }

        self.worker_id = None
        self.continue_working = True
        self.sleep_time = sleep_time
        self.child_pid = None
        self.current_job_id = None
        self.status = {
            'jobs_count': 0,
            'cancelled_jobs_count': 0,
            'done_jobs_count': 0,
            'updated_at': time.time(),
            'started_at': time.time()
        }

        super(Worker, self).__init__(name="Worker")

    def set_title(self, title=None):
        base_title = "redash worker:%s" % self.worker_id
        if title:
            full_title = "%s - %s" % (base_title, title)
        else:
            full_title = base_title

        setproctitle.setproctitle(full_title)

    def run(self):
        self.worker_id = os.getpid()
        self.status['id'] = self.worker_id
        self.name = "Worker:%d" % self.worker_id
        self.manager.redis_connection.sadd('workers', self._key)
        self._save_status()
        self.set_title()

        logging.info("[%s] started.", self.name)

        signal.signal(signal.SIGINT, self._stop)
        signal.signal(signal.SIGTERM, self._stop)

        self._wait_for_jobs()

    def _stop(self, signum, frame):
        self.continue_working = False
        if self.current_job_id:
            job = Job.load(self.manager.redis_connection, self.current_job_id)
            if job:
                job.cancel()

    def _wait_for_jobs(self):
        while self.continue_working:
            job_id = self.manager.queue.pop()
            if job_id:
                self._update_status('jobs_count')
                logging.info("[%s] Processing %s", self.name, job_id)
                self._fork_and_process(job_id)
                if self.child_pid == 0:
                    return
            else:
                time.sleep(self.sleep_time)

    def _update_status(self, counter):
        self.status['updated_at'] = time.time()
        self.status[counter] += 1
        self._save_status()

    @property
    def _key(self):
        return 'worker:%s' % self.worker_id

    def _save_status(self):
        self.manager.redis_connection.hmset(self._key, self.status)

    def _fork_and_process(self, job_id):
        self.current_job_id = job_id
        self.child_pid = os.fork()
        if self.child_pid == 0:
            self.set_title("processing %s" % job_id)
            self._process(job_id)
        else:
            logging.info("[%s] Waiting for pid: %d", self.name, self.child_pid)

            try:
                _, status = os.waitpid(self.child_pid, 0)
            except OSError:
                logging.info("[%s] OSError while waiting for child to finish",
                             self.name)
                # setting status to >0, so the job cleanup is triggered
                status = 1

            self._update_status('done_jobs_count')

            job = Job.load(self.manager.redis_connection, job_id)
            if status > 0 and not job.is_finished():
                self._update_status('cancelled_jobs_count')
                logging.info(
                    "[%s] process interrupted and job %s hasn't finished; registering interruption in job",
                    self.name, job_id)
                job.done(None, "Interrupted/Cancelled while running.")

            job.expire(settings.JOB_EXPIRY_TIME)

            logging.info("[%s] Finished Processing %s (pid: %d status: %d)",
                         self.name, job_id, self.child_pid, status)

            self.child_pid = None
            self.current_job_id = None

    def _process(self, job_id):
        redis_connection = redis.StrictRedis(**self.redis_connection_params)
        job = Job.load(redis_connection, job_id)
        if job.is_finished():
            logging.warning("[%s][%s] tried to process finished job.",
                            self.name, job)
            return

        pid = os.getpid()
        job.processing(pid)

        logging.info("[%s][%s] running query; query_hash=%s", self.name,
                     job.id, job.query_hash)
        start_time = time.time()
        self.set_title('running query; job_id={0}; query_hash={1}'.format(
            job_id, job.query_hash))

        logging.info("[%s][%s] Loading query runner (%s, %s)...", self.name,
                     job.id, job.data_source_name, job.data_source_type)

        query_runner = get_query_runner(job.data_source_type,
                                        job.data_source_options)

        if getattr(query_runner, 'annotate_query', True):
            annotated_query = "/* Pid: %s, Job Id: %s, Query hash: %s, Priority: %s */ %s" % \
                              (pid, job.id, job.query_hash, job.priority, job.query)
        else:
            annotated_query = job.query

        # TODO: here's the part that needs to be forked, not all of the worker process...
        with self.statsd_client.timer(
                'worker_{}.query_runner.{}.{}.run_time'.format(
                    self.worker_id, job.data_source_type,
                    job.data_source_name)):
            data, error = query_runner(annotated_query)

        run_time = time.time() - start_time
        logging.info("[%s][%s] query finished... data length=%s, error=%s",
                     self.name, job.id, data and len(data), error)

        # Adam
        #if error != 'None':
        if error:
            logging.info(
                "[%s][%s] query failed: hash: %s, wasted time: %i, error: %s",
                self.name, job.id, job.query_hash, run_time, error)
        else:
            logging.info("[%s][%s] query succeeded: hash: %s, used time: %i",
                         self.name, job.id, job.query_hash, run_time)

        # TODO: it is possible that storing the data will fail, and we will need to retry
        # while we already marked the job as done
        query_result_id = None
        if not error:
            self.set_title("storing results %s" % job_id)
            query_result_id = self.manager.store_query_result(
                job.data_source_id, job.query, data, run_time,
                datetime.datetime.utcnow())

        self.set_title("marking job as done %s" % job_id)
        job.done(query_result_id, error)
Exemplo n.º 52
0
def process_event(
    event: dict,
    render: Render,
    thread_store: ThreadStore,
    logger: Logger,
    retry_delay_seconds: int,
    stats: StatsClient,
    mail,
) -> int:
    """Reliably send emails for the provided event.

    Attempts to send all emails with "full context". If rendering fails or some
    emails can't be sent to some recipients, then they're retried with a
    "minimal context" to ensure that users still receive a notification.

    Note that it's still possible for a user to not get an email for an
    event if there's an issue with rendering or sending a "minimal context"
    email. However, due to the deliberately simple nature of these emails,
    the risk should be minimal.


    Returns the number of emails successfully sent.
    """
    timestamp = event["timestamp"]
    full_context = event.get("context")
    recipient_filter_list = None
    if not full_context:
        stats.incr(STAT_FAILED_TO_RENDER_FULL_CONTEXT_EVENT)
        logger.warning(
            'Phabricator event didn\'t have "full" context. '
            "Falling back to sending a simpler, more resilient email."
        )
        successful_full_email_count = 0
    else:
        process_full_result = process_emails_full(
            timestamp,
            event["isSecure"],
            full_context,
            render,
            thread_store,
            stats,
            logger,
            retry_delay_seconds,
            mail,
        )
        successful_full_email_count = process_full_result.successfully_sent_email_count
        if process_full_result.state == ProcessEventState.SUCCESS:
            return successful_full_email_count
        elif process_full_result.state == ProcessEventState.FAILED_TO_RENDER:
            stats.incr(STAT_FAILED_TO_RENDER_FULL_CONTEXT_EVENT)
            logger.warning(
                "Failed to render emails for a Phabricator event with full "
                "context. Falling back to sending a simpler, more resilient email."
            )
        else:
            stats.incr(
                STAT_FAILED_TO_SEND_FULL_CONTEXT_MAIL,
                count=len(process_full_result.failed_to_send_recipients),
            )
            logger.warning(
                "Failed to send at least one email with full context. Falling "
                "back to sending a simpler, more resilient email for the "
                "affected recipient(s)."
            )
            recipient_filter_list = process_full_result.failed_to_send_recipients

    # If we've reached this point, we've failed to render or send emails with
    # full context.
    process_minimal_result = process_events_minimal(
        timestamp,
        event["minimalContext"],
        render,
        thread_store,
        stats,
        logger,
        retry_delay_seconds,
        recipient_filter_list,
        mail,
    )

    if process_minimal_result.state == ProcessEventState.FAILED_TO_RENDER:
        stats.incr(STAT_FAILED_TO_RENDER_MINIMAL_CONTEXT_EVENT)
        logger.error(
            "Failed to render emails for a Phabricator event with minimal "
            "context. Skipping these emails."
        )
    elif process_minimal_result.state == ProcessEventState.FAILED_TO_SEND:
        stats.incr(
            STAT_FAILED_TO_SEND_MINIMAL_CONTEXT_MAIL,
            count=len(process_minimal_result.failed_to_send_recipients),
        )
        logger.error(
            "Failed to send at least one email with minimal context. Skipping "
            "these emails."
        )
    return (
        successful_full_email_count
        + process_minimal_result.successfully_sent_email_count
    )
Exemplo n.º 53
0
 def __init__(self, host, port, prefix='superset'):
     self.client = StatsClient(host=host, port=port, prefix=prefix)
Exemplo n.º 54
0
def run_with_config(sync, config):
    """
    Execute the cartography.sync.Sync.run method with parameters built from the given configuration object.

    This function will create a Neo4j driver object from the given Neo4j configuration options (URI, auth, etc.) and
    will choose a sensible update tag if one is not specified in the given configuration.

    :type sync: cartography.sync.Sync
    :param sync: A sync task to run.
    :type config: cartography.config.Config
    :param config: The configuration to use to run the sync task.
    """
    # Initialize statsd client if enabled
    if config.statsd_enabled:
        cartography.util.stats_client = StatsClient(
            host=config.statsd_host,
            port=config.statsd_port,
            prefix=config.statsd_prefix,
        )

    neo4j_auth = None
    if config.neo4j_user or config.neo4j_password:
        neo4j_auth = (config.neo4j_user, config.neo4j_password)
    try:
        neo4j_driver = GraphDatabase.driver(
            config.neo4j_uri,
            auth=neo4j_auth,
            max_connection_lifetime=config.neo4j_max_connection_lifetime,
        )
    except neobolt.exceptions.ServiceUnavailable as e:
        logger.debug("Error occurred during Neo4j connect.", exc_info=True)
        logger.error(
            (
                "Unable to connect to Neo4j using the provided URI '%s', an error occurred: '%s'. Make sure the Neo4j "
                "server is running and accessible from your network."
            ),
            config.neo4j_uri,
            e,
        )
        return 1
    except neobolt.exceptions.AuthError as e:
        logger.debug("Error occurred during Neo4j auth.", exc_info=True)
        if not neo4j_auth:
            logger.error(
                (
                    "Unable to auth to Neo4j, an error occurred: '%s'. cartography attempted to connect to Neo4j "
                    "without any auth. Check your Neo4j server settings to see if auth is required and, if it is, "
                    "provide cartography with a valid username and password."
                ),
                e,
            )
        else:
            logger.error(
                (
                    "Unable to auth to Neo4j, an error occurred: '%s'. cartography attempted to connect to Neo4j with "
                    "a username and password. Check your Neo4j server settings to see if the username and password "
                    "provided to cartography are valid credentials."
                ),
                e,
            )
        return 1
    default_update_tag = int(time.time())
    if not config.update_tag:
        config.update_tag = default_update_tag
    return sync.run(neo4j_driver, config)
Exemplo n.º 55
0
    redis_url = urlparse.urlparse(settings.REDIS_URL)
    if redis_url.path:
        redis_db = redis_url.path[1]
    else:
        redis_db = 0

    r = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db, password=redis_url.password)

    return r


setup_logging()
redis_connection = create_redis_connection()
mail = Mail()
mail.init_mail(settings.all_settings())
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)

import_query_runners(settings.QUERY_RUNNERS)

from redash.version_check import reset_new_version_status
reset_new_version_status()


class SlugConverter(BaseConverter):
    def to_python(self, value):
        # This is an ugly workaround for when we enable multi-org and some files are being called by the index rule:
        if value in ('google_login.png', 'favicon.ico', 'robots.txt', 'views'):
            raise ValidationError()

        return value
Exemplo n.º 56
0
    @classmethod
    def gauge(cls, stat, value, rate=1, delta=False):
        pass

    @classmethod
    def timing(cls, stat, dt):
        pass


Stats = DummyStatsLogger

if conf.getboolean('scheduler', 'statsd_on'):
    from statsd import StatsClient
    statsd = StatsClient(host=conf.get('scheduler', 'statsd_host'),
                         port=conf.getint('scheduler', 'statsd_port'),
                         prefix=conf.get('scheduler', 'statsd_prefix'))
    Stats = statsd
else:
    Stats = DummyStatsLogger

HEADER = """\
  ____________       _____________
 ____    |__( )_________  __/__  /________      __
____  /| |_  /__  ___/_  /_ __  /_  __ \_ | /| / /
___  ___ |  / _  /   _  __/ _  / / /_/ /_ |/ |/ /
 _/_/  |_/_/  /_/    /_/    /_/  \____/____/|__/
 """

BASE_LOG_URL = '/admin/airflow/log'
LOGGING_LEVEL = logging.INFO
Exemplo n.º 57
0
def init_statsd(prefix=None, host=None, port=8125):
    statsd = StatsClient(host, port, prefix=prefix)
    return statsd
Exemplo n.º 58
0
 def client(self):
     if self._client is None:
         self._client = StatsClient(**self.config)
     return self._client
Exemplo n.º 59
0

def read_cpu_temperature():
    try:
        region0_temperature = subprocess.check_output(
            ["cat", "/etc/armbianmonitor/datasources/soctemp"])
        return float(region0_temperature)
    except:
        return float(-1)


if __name__ == '__main__':
    start = timer()

    bus = UART_Adapter('/dev/ttyUSB0')
    stats = StatsClient('statsd', 8125, 'readtemp')
    try:
        with open('/boot/id.txt', 'r') as f:
            sensor_id = f.readline().strip()
    except:
        sensor_id = 'unknown'

    stats.gauge('online.%s' % sensor_id, 1)
    led_toggle(1)
    for rom in get_roms():
        read_temperature(rom)
    led_toggle(0)

    elapsed_time = timer() - start
    stats.timing('runtime.%s.elapsed' % sensor_id, int(1000 * elapsed_time))
Exemplo n.º 60
0
class MetricProcessor():
    BOLTS = ['mfp', 'reporter']
    SPOUTS = ['transaction']
    COMPONENTS = set(BOLTS + SPOUTS)
    DATA_POINTS = [
        'component_id', 'arrival_count', 'arrival_rate', 'executors',
        'population', 'sojourn_time'
    ]

    def __init__(self, metric_dict):
        self.metric_dict = metric_dict
        self.meta = Meta(metric_dict['meta'])
        self.metrics = Metrics(metric_dict['metrics'])
        self.logger = MetricLogger("metrics_4.csv", self.DATA_POINTS)
        self.statsd = StatsClient()
        self.api = ApiClient()

    @property
    def component_id(self):
        return self.meta['srcComponentId']

    @property
    def is_spout(self):
        return self.component_id in self.SPOUTS

    @property
    def is_bolt(self):
        return self.component_id in self.BOLTS

    @property
    @handleMissingMetric
    def arrival_rate(self):
        return self.metrics['__receive']['arrival_rate_secs']

    @property
    @handleMissingMetric
    def sojourn_time(self):
        return self.metrics['__receive']['sojourn_time_ms']

    @property
    @handleMissingMetric
    def population(self):
        return self.metrics['__receive']['population']

    @property
    @handleMissingMetric
    def execute_latency(self):
        latency_dict = self.metrics['__execute-latency']
        return latency_dict.values()[0]

    @property
    def processing_rate(self):
        if self.execute_latency <= 0:
            return -1
        return 1000.0 / self.execute_latency

    @property
    @handleMissingMetric
    def process_latency(self):
        return self.metrics['__process-latency']

    @property
    @handleMissingMetric
    def dropped_messages(self):
        return self.metrics['__receive']['dropped_messages']

    @property
    @handleMissingMetric
    def overflow(self):
        return self.metrics['__receive']['overflow']

    @property
    @handleMissingMetric
    def backpressure(self):
        return self.metrics['__skipped-backpressure-ms']

    @property
    @handleMissingMetric
    def executors(self):
        if not hasattr(self, "_executors"):
            topology_id = self.api.get_summary().topology_id
            self._executors = self.api.get_component(self.component_id,
                                                     topology_id).executors
        return self._executors

    @property
    @handleMissingMetric
    def current_arrival_count(self):
        return int(self.metrics['arrival_count'])

    @property
    def arrival_count(self):
        return _arrival_count_

    def process(self):
        self.update_global_arrival_count()

        if not self.should_process():
            return

        self.collect_executor_data()

        self.log_to_file()
        self.track_metric_status()
        # self.log_to_statsd()

        print "\n\n", self

    def track_metric_status(self):
        status = Status(component=self.component_id,
                        arrival_rate=self.arrival_count,
                        population=self.population,
                        executors=self.executors,
                        processing_rate=self.processing_rate)
        status_store.push(status)

    def update_global_arrival_count(self):
        if self.current_arrival_count >= 0:
            global _arrival_count_
            _arrival_count_ = self.current_arrival_count

    def should_process(self):
        if self.is_spout:
            return self.should_process_spout()
        elif self.is_bolt:
            return self.should_process_bolt()
        return False

    def should_process_spout(self):
        return int(self.arrival_count) > 0

    def should_process_bolt(self):
        return int(self.processing_rate) > 0

    def log_to_statsd(self):
        for point in self.DATA_POINTS[1:]:
            self.statsd.gauge('%s.arrival_rate' % self.component_id,
                              getattr(self, point))

    def log_to_file(self):
        log_data = [getattr(self, point) for point in self.DATA_POINTS]
        self.logger.log(log_data)

    def collect_executor_data(self):
        topology_id = self.api.get_summary().topology_id
        self.executor_data = self.api.get_topology(topology_id)

    def __str__(self):
        return "%s. arrival_count: %s, arrival_rate: %s, sojourn_time: %s, population: %s, dropped_messages: %s, execute_latency: %s, processing_rate: %s, executors: %s" % (
                self.component_id, \
                self.arrival_count, \
                self.arrival_rate, \
                self.sojourn_time, \
                self.population, \
                self.dropped_messages, \
                self.execute_latency, \
                self.processing_rate, \
                self.executors)