Example #1
0
class StaticticStatsD(object):
    """
    Send stats to statsd.
    """
    def __init__(self, hostname, host, port, prefix=None):
        self.client = StatsClient(host, port, prefix=prefix)
        self.hostname = hostname

    def incr(self, metric, value=1, prefix=None):
        """
        Increment 'metric' counter with 'value'.
        """
        if prefix is not None:
            metric = '%s.%s' % (prefix, metric)

        self.client.incr(metric, value)

        # separate metric for hostname
        if self.hostname is not None:
            metric = '%s.%s' % (self.hostname, metric)
            self.client.incr(metric, value)

    def timing(self, metric, value, prefix=None):
        """
        Send 'metric' timing.
        """
        if prefix is not None:
            metric = '%s.%s' % (prefix, metric)

        self.client.timing(metric, value)

        # separate metric for hostname
        if self.hostname is not None:
            metric = '%s.%s' % (self.hostname, metric)
            self.client.timing(metric, value)
Example #2
0
    class StatsdStatsLogger(BaseStatsLogger):

        def __init__(self, host='localhost', port=8125,
                     prefix='superset', statsd_client=None):
            """
            Initializes from either params or a supplied, pre-constructed statsd client.

            If statsd_client argument is given, all other arguments are ignored and the
            supplied client will be used to emit metrics.
            """
            if statsd_client:
                self.client = statsd_client
            else:
                self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def timing(self, key, value):
            self.client.timing(key, value)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Example #3
0
class StatsdWrapper:
    """Simple wrapper around the statsd client."""
    statsd = None

    def __init__(self, host, port, prefix):
        if host:
            self.statsd = StatsClient(
                host=host,
                port=port,
                prefix=prefix,
            )

    def incr(self, *args):
        if self.statsd:
            self.statsd.incr(*args)

    def decr(self, *args):
        if self.statsd:
            self.statsd.decr(*args)

    def gauge(self, *args):
        if self.statsd:
            self.statsd.gauge(*args)

    def timing(self, *args):
        if self.statsd:
            self.statsd.timing(*args)

    def timer(self, *args):
        if self.statsd:
            self.statsd.timer(*args)

    def set(self, *args):
        if self.statsd:
            self.statsd.set(*args)
Example #4
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(  # pylint: disable=super-init-not-called
            self,
            host: str = "localhost",
            port: int = 8125,
            prefix: str = "superset",
            statsd_client: Optional[StatsClient] = None,
        ) -> None:
            """
            Initializes from either params or a supplied, pre-constructed statsd client.

            If statsd_client argument is given, all other arguments are ignored and the
            supplied client will be used to emit metrics.
            """
            if statsd_client:
                self.client = statsd_client
            else:
                self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key: str) -> None:
            self.client.incr(key)

        def decr(self, key: str) -> None:
            self.client.decr(key)

        def timing(self, key: str, value: float) -> None:
            self.client.timing(key, value)

        def gauge(self, key: str, value: float) -> None:
            self.client.gauge(key, value)
Example #5
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self,
                     host="localhost",
                     port=8125,
                     prefix="myapp",
                     statsd_client=None):
            """
            Initializes from either params or a supplied, pre-constructed statsd client.

            If statsd_client argument is given, all other arguments are ignored and the
            supplied client will be used to emit metrics.
            """
            if statsd_client:
                self.client = statsd_client
            else:
                self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def timing(self, key, value):
            self.client.timing(key, value)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Example #6
0
 def _evaluate(experiment: EvExperiment, dao: Dao, statsd: StatsClient):
     try:
         with statsd.timer('timing.evaluation'):
             _logger.info(f'Evaluating experiment [{experiment.id}]')
             _logger.debug(
                 f'Loading goals for experiment [{experiment.id}]')
             with statsd.timer('timing.query'):
                 goals = dao.get_agg_goals(experiment).sort_values(
                     ['exp_variant_id', 'goal'])
                 _logger.info(
                     f'Retrieved {len(goals)} goals in experiment [{experiment.id}]'
                 )
             with statsd.timer('timing.stats'):
                 evaluation = experiment.evaluate_agg(goals)
                 statsd.incr('evaluations')
             _logger.info((
                 f'Evaluation of experiment [{experiment.id}] finished with evaluation'
                 f' of {evaluation.metrics.metric_id.nunique()} '
                 f'metrics and {evaluation.checks.check_id.nunique()} checks'
             ))
         return Result.from_evaluation(experiment, evaluation)
     except Exception as e:
         _logger.error(
             f'Cannot evaluate experiment [{experiment.id}] because of {e}')
         _logger.exception(e)
         statsd.incr('errors.experiment')
         raise HTTPException(
             status_code=500,
             detail=
             f'Cannot evaluate experiment [{experiment.id}] because of {e}',
         )
def send_stats(last_timestamp, last_message_count, json_filename):
	with open(json_filename) as data_file:
		data = json.load(data_file)

	current_timestamp = data["now"]
	current_message_count = data["messages"]

	secs = False
	msgs = False

	if last_timestamp is False:
		print "Starting up, first pass...."
	elif current_message_count < last_message_count:
		print "Looks like dump1090 restarted, message count reset (%d)" % current_message_count
	else:
		secs = current_timestamp - last_timestamp
		msgs = current_message_count - last_message_count
		
		print "{0} sec\t{1} messages\t{2} messages per sec avg".format(secs, msgs, (msgs / secs))

	last_timestamp = current_timestamp
	last_message_count = current_message_count
	threading.Timer(INTERVAL, send_stats, [last_timestamp, last_message_count, json_filename]).start()

	aircrafts_5s = []
	aircrafts_10s = []
	aircrafts_30s = []
	aircrafts_60s = []

	for aircraft in data["aircraft"]:
		if aircraft["seen"] < 5:
			aircrafts_5s.append(aircraft["hex"])
		if aircraft["seen"] < 10:
			aircrafts_10s.append(aircraft["hex"])
		if aircraft["seen"] < 30:
			aircrafts_30s.append(aircraft["hex"])
		if aircraft["seen"] < 60:
			aircrafts_60s.append(aircraft["hex"])

	print "\t5s:{0}\t10s:{1}\t30s:{2}\t60s:{3}".format(len(aircrafts_5s), len(aircrafts_10s), len(aircrafts_30s), len(aircrafts_60s))

	radio_name = sys.argv[1]

	if secs:
		client = StatsClient(STATSD_HOST)
		client.incr("radios.%s.message_rate" % radio_name, msgs)

		pipe = client.pipeline()
		c = 0
		max_msg_size = 20
		for hex in aircrafts_10s:
			pipe.set("radios.%s.aircraft" % radio_name, hex)
			c = c + 1
			if c == max_msg_size:
				pipe.send()
				c = 0
		if c != max_msg_size:
			pipe.send()
Example #8
0
def test_disabled_client():
    """ Assert that a cliend with disabled=True does not send any data to
    statsd.
    """
    sc = StatsClient(host=ADDR[0], port=ADDR[1], disable=True)
    sc._sock = mock.Mock()

    sc.incr('foo')

    eq_(sc._sock.call_count, 0)
Example #9
0
def _increment_metric(statsd_metric):
    """
    Send messages to statsd, this is similar to:
    echo "airflow.operator_successes_PythonOperator:1|c" | nc -u -w0 127.0.0.1 8125
    """
    statsd = StatsClient(host="127.0.0.1", port=8125, prefix="airflow")
    statsd.incr(statsd_metric)
    # Avoid race conditions in our testing. After sending the data to
    # statsd, we should allow time for statsd exporter to collect
    # and serve new values
    sleep(0.5)
Example #10
0
class StatisticsForStatsd(RequiredConfig):
    """This class is a wrapper around statsd adding a simple configman
    compatible interface and stats naming scheme.  Code using this class
    will distrubute `incr` calls with names associated with them.  When
    ever an `incr` call is encountered, the name will be paired with the
    name of the statsd names and the increment action fired off.

    This class will only send stats `incr` calls for names that appear in
    the configuration parameter `active_counters_list`.  This enables counters
    to be turned on and off at configuration time."""

    required_config = Namespace()
    required_config.add_option('statsd_host',
                               doc='the hostname of statsd',
                               default='')
    required_config.add_option('statsd_port',
                               doc='the port number for statsd',
                               default=8125)
    required_config.add_option(
        'prefix',
        doc='a string to be used as the prefix for statsd names',
        default='')
    required_config.add_option(
        'active_counters_list',
        default='',
        #default='restarts, jobs, criticals, errors, mdsw_failures',
        doc='a comma delimeted list of counters',
        from_string_converter=str_to_list)

    def __init__(self, config, name):
        super(StatisticsForStatsd, self).__init__()
        self.config = config
        if config.prefix and name:
            self.prefix = '.'.join((config.prefix, name))
        elif config.prefix:
            self.prefix = config.prefix
        elif name:
            self.prefix = name
        else:
            self.prefix = ''
        self.statsd = StatsClient(config.statsd_host, config.statsd_port,
                                  self.prefix)

    def incr(self, name):
        if (self.config.statsd_host
                and name in self.config.active_counters_list):
            if self.prefix and name:
                name = '.'.join((self.prefix, name))
            elif self.prefix:
                name = self.prefix
            elif not name:
                name = 'unknown'
            self.statsd.incr(name)
Example #11
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self, host, port, prefix='superset'):
            self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Example #12
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self, host, port, prefix='superset'):
            self.client = StatsClient(host=host, port=port, prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def gauge(self, key):
            # pylint: disable=no-value-for-parameter
            self.client.gauge(key)
Example #13
0
    class StatsdStatsLogger(BaseStatsLogger):
        def __init__(self, host, port, prefix='superset'):
            self.client = StatsClient(
                  host=host,
                  port=port,
                  prefix=prefix)

        def incr(self, key):
            self.client.incr(key)

        def decr(self, key):
            self.client.decr(key)

        def gauge(self, key):
            self.client.gauge(key)
Example #14
0
 async def evaluate_experiment(
         experiment: Experiment,
         evaluation_pool: ThreadPoolExecutor = Depends(get_executor_pool),
         dao: Dao = Depends(get_dao),
         statsd: StatsClient = Depends(get_statsd),
 ):
     """
     Evaluates single `Experiment`.
     """
     _logger.info(f'Calling evaluate with {experiment.json()}')
     statsd.incr('requests.evaluate')
     loop = asyncio.get_event_loop()
     return await loop.run_in_executor(evaluation_pool, _evaluate,
                                       experiment.to_experiment(statsd),
                                       dao, statsd)
Example #15
0
class _Statsd(object):
    def __init__(self, config):
        if config.get('datadog', True):
            initialize(statsd_host=config['host'],
                       statsd_port=config['port'],
                       prefix=config['prefix'])
            self.datadog = True
            self._statsd = statsd
        else:
            self.datadog = False
            self._statsd = StatsClient(config['host'],
                                       config['port'],
                                       config['prefix'])

    def incr(self, metric, count=1, rate=1, **kw):
        if self.datadog:
            return self._statsd.increment(metric, value=count,
                                          sample_rate=rate, **kw)
        else:
            return self._statsd.incr(metric, count=count, rate=rate)

    def timer(self, metric, rate=1, **kw):
        if self.datadog:
            return self._statsd.timed(metric, sample_rate=rate, **kw)
        else:
            return self._statsd.timer(metric, rate=rate)
Example #16
0
class _Statsd(object):
    def __init__(self, config):
        if config.get('datadog', True):
            initialize(statsd_host=config['host'],
                       statsd_port=config['port'],
                       prefix=config['prefix'])
            self.datadog = True
            self._statsd = statsd
        else:
            self.datadog = False
            self._statsd = StatsClient(config['host'], config['port'],
                                       config['prefix'])

    def incr(self, metric, count=1, rate=1, **kw):
        if self.datadog:
            return self._statsd.increment(metric,
                                          value=count,
                                          sample_rate=rate,
                                          **kw)
        else:
            return self._statsd.incr(metric, count=count, rate=rate)

    def timer(self, metric, rate=1, **kw):
        if self.datadog:
            return self._statsd.timed(metric, sample_rate=rate, **kw)
        else:
            return self._statsd.timer(metric, rate=rate)
Example #17
0
class StatsDBackend(BaseBackend):

    name = 'statsd'

    def __init__(self, config):
        self.config = config
        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.statsd = StatsClient(self.config['STATSD_HOST'],
                                  self.config['STATSD_PORT'],
                                  self.config['STATSD_PREFIX'])

    def timing(self, stat_name, delta):
        return self.statsd.timing(stat_name, delta, self.config['STATS_RATE'])

    def incr(self, stat_name, count=1):
        return self.statsd.incr(stat_name, count, self.config['STATS_RATE'])

    def decr(self, stat_name, count=1):
        return self.statsd.decr(stat_name, count, self.config['STATS_RATE'])

    def gauge(self, stat_name, value, delta=False):
        return self.statsd.gauge(stat_name, value, self.config['STATS_RATE'], delta)
Example #18
0
def _send_emails(
    mail,
    stats: StatsClient,
    logger: Logger,
    emails: List[OutgoingEmail],
    retry_delay_seconds: int,
) -> List[str]:
    """Attempt to send emails, retrying if necessary.

    Returns list of recipient email addresses who had an email that failed to be sent
    to them.
    """

    failed_recipients = []
    for email in emails:
        while True:
            result = mail.send(email)
            if result.status == SendEmailState.TEMPORARY_FAILURE:
                stats.incr(STAT_FAILED_TO_SEND_MAIL_TEMPORARY)
                logger.warning(
                    'Encountered temporary failure while sending email: "{}"'.format(
                        result.reason_text
                    )
                )

                # "Temporary failures" can be anything from a transient network glitch
                # to something as serious and long-lived as Amazon pausing our
                # ability to send emails.
                logger.warning("Sleeping for {} seconds".format(retry_delay_seconds))
                time.sleep(retry_delay_seconds)
                continue  # retry sending this email
            elif result.status == SendEmailState.PERMANENT_FAILURE:
                logger.warning(render_exception(result.exception))
                sentry_sdk.capture_exception(result.exception)
                failed_recipients.append(email.to)
            break

    return failed_recipients
Example #19
0
class StatsdClient():
    def __init__(self):
        self.statsd_client = None

    def init_app(self, app, *args, **kwargs):
        app.statsd_client = self
        self.active = app.config.get('STATSD_ENABLED')
        self.namespace = "{}.notifications.{}.".format(
            app.config.get('NOTIFY_ENVIRONMENT'),
            app.config.get('NOTIFY_APP_NAME'))

        if self.active:
            self.statsd_client = StatsClient(
                app.config.get('STATSD_HOST'),
                app.config.get('STATSD_PORT'),
                prefix=app.config.get('STATSD_PREFIX'))

    def format_stat_name(self, stat):
        return self.namespace + stat

    def incr(self, stat, count=1, rate=1):
        if self.active:
            self.statsd_client.incr(self.format_stat_name(stat), count, rate)

    def gauge(self, stat, count):
        if self.active:
            self.statsd_client.gauge(self.format_stat_name(stat), count)

    def timing(self, stat, delta, rate=1):
        if self.active:
            self.statsd_client.timing(self.format_stat_name(stat), delta, rate)

    def timing_with_dates(self, stat, start, end, rate=1):
        if self.active:
            delta = (start - end).total_seconds()
            self.statsd_client.timing(self.format_stat_name(stat), delta, rate)
Example #20
0
class StatsD(object):
    def __init__(self, app=None, config=None):
        self.config = None
        self.statsd = None
        if app is not None:
            self.init_app(app)
        else:
            self.app = None

    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.app = app

        self.statsd = StatsClient(self.config['STATSD_HOST'],
                                  self.config['STATSD_PORT'],
                                  self.config['STATSD_PREFIX'])

    def timer(self, *args, **kwargs):
        return self.statsd.timer(*args, **kwargs)

    def timing(self, *args, **kwargs):
        return self.statsd.timing(*args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.statsd.incr(*args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.statsd.decr(*args, **kwargs)

    def gauge(self, *args, **kwargs):
        return self.statsd.gauge(*args, **kwargs)

    def set(self, *args, **kwargs):
        return self.statsd.set(*args, **kwargs)
Example #21
0
class StatsD(object):
    def __init__(self, app=None, config=None):
        self.config = None
        self.statsd = None
        if app is not None:
            self.init_app(app)
        else:
            self.app = None

    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.app = app

        self.statsd = StatsClient(self.config['STATSD_HOST'],
            self.config['STATSD_PORT'], self.config['STATSD_PREFIX'])

    def timer(self, *args, **kwargs):
        return self.statsd.timer(*args, **kwargs)

    def timing(self, *args, **kwargs):
        return self.statsd.timing(*args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.statsd.incr(*args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.statsd.decr(*args, **kwargs)

    def gauge(self, *args, **kwargs):
        return self.statsd.gauge(*args, **kwargs)

    def set(self, *args, **kwargs):
        return self.statsd.set(*args, **kwargs)
Example #22
0
class StatsD(object):
    def __init__(self, app=None, config=None):
        self.config = None
        self.statsd = None
        if app is not None:
            self.init_app(app)
        else:
            self.app = None

    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault("STATSD_HOST", "localhost")
        self.config.setdefault("STATSD_PORT", 8125)
        self.config.setdefault("STATSD_PREFIX", None)

        self.app = app

        self.statsd = StatsClient(
            host=self.config["STATSD_HOST"], port=self.config["STATSD_PORT"], prefix=self.config["STATSD_PREFIX"]
        )

    def timer(self, *args, **kwargs):
        return self.statsd.timer(*args, **kwargs)

    def timing(self, *args, **kwargs):
        return self.statsd.timing(*args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.statsd.incr(*args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.statsd.decr(*args, **kwargs)

    def gauge(self, *args, **kwargs):
        return self.statsd.gauge(*args, **kwargs)
Example #23
0

def chunker(seq, size):
    return (seq[pos:pos + size] for pos in range(0, len(seq), size))


if __name__ == "__main__":
    statsd_client = StatsClient(host="localhost",
                                port=8125,
                                prefix="hyrule",
                                maxudpsize=512,
                                ipv6=False)
    connection_attempts = read_auth_log_file_last_24_hours()
    locations = combine_duplicated_regions_with_different_lat_long(
        filter_empty_locations(get_location_from_ips(connection_attempts)))

    for location in locations:
        pprint(location)
        try:
            statsd_client.incr(
                "ssh_attempts",
                tags={
                    "country": location.get("country"),
                    "region": location.get("regionName"),
                    "latitude": location.get("lat"),
                    "longitude": location.get("lon")
                },
            )
        except UnicodeEncodeError as error:
            pprint(f"Could not ascii encode {json.dumps(location)}")
Example #24
0
#!/usr/bin/python

from statsd import StatsClient
import os

statsd = StatsClient(host='metrics.ccs.neu.edu')

if os.getenv('PAM_TYPE') == 'open_session':
  statsd.incr('ccs.linux.102.logins')
else:
  statsd.decr('ccs.linux.102.logins')

Example #25
0
    conn = util.opendb()
    c = conn.cursor()

    util.create_schema(c)
    auth = util.authinfo(c)
    
    (q,option) = get_option(c,q)
    last_q = q.split(' ')[-1]
    if q.startswith('_'):   # option
        process_option(c,q)
    elif q.startswith('+'): # add bookmark
        add_bookmark(c,q)
    elif last_q.startswith('#') and (':' not in q): # tag expansion
        pbsearch_tag(c,'',last_q[1:])
    else:
        pbsearch_sql(c,option,q)
    
    util.closedb(conn)

if __name__ == '__main__':
    try:
        statsd = StatsClient(host='g.jmjeong.com',
                             port=8125,
                             prefix='jmjeong.alfred.bookmark')

        with statsd.timer('main'):
            statsd.incr('launch');
            main()
    except:
        main()
Example #26
0
class StatsD(object):
    def __init__(self, app=None, config=None):
        self.config = None
        self.statsd = None

        if app is not None:
            self.init_app(app, config=config)
        else:
            self.app = None

    def init_app(self, app, config=None):
        if config is not None:
            self.config = config
        elif self.config is None:
            self.config = app.config

        self.config.setdefault('STATSD_HOST', 'localhost')
        self.config.setdefault('STATSD_PORT', 8125)
        self.config.setdefault('STATSD_PREFIX', None)

        self.app = app

        self.statsd = StatsClient(self.config['STATSD_HOST'],
            self.config['STATSD_PORT'], self.config['STATSD_PREFIX'])

        self.use_ms=self.config.get('STATSD_USEMS', True)

        # Configure any of our middleware
        self.setup_middleware()

    def timer(self, *args, **kwargs):
        return self.statsd.timer(*args, **kwargs)

    def timing(self, *args, **kwargs):
        return self.statsd.timing(*args, **kwargs)

    def incr(self, *args, **kwargs):
        return self.statsd.incr(*args, **kwargs)

    def decr(self, *args, **kwargs):
        return self.statsd.decr(*args, **kwargs)

    def gauge(self, *args, **kwargs):
        return self.statsd.gauge(*args, **kwargs)

    def setup_middleware(self):
        """Helper to configure/setup any Flask-StatsD middleware"""
        # Configure response time middleware (if desired)
        self.config.setdefault('STATSD_CONFIGURE_MIDDLEWARE', True)
        self.config.setdefault('STATSD_RESPONSE_METRIC_NAME', 'response.time')
        self.config.setdefault('STATSD_NUMBER_OF_REQUESTS_METRIC_NAME', 'request.api')
        self.config.setdefault('STATSD_RESPONSE_SAMPLE_RATE', 1)
        self.config.setdefault('STATSD_RESPONSE_AUTO_TAG', True)
        self.config.setdefault('STATSD_RESPONSE_ENDPOINT_TAG_FORMAT', 'endpoint_{0}')
        self.config.setdefault('STATSD_RESPONSE_METHOD_TAG_FORMAT', 'method_{0}')
        if self.config['STATSD_CONFIGURE_MIDDLEWARE']:
            self.app.before_request(self.before_request)
            self.app.after_request(self.after_request)

    def before_request(self):
        """
        statsd middleware handle for before each request
        """
        # Set the request start time
        g.flask_statsd_start_time = time.time()
        g.flask_statsd_request_tags = []

        # Add some default request tags
        if self.config['STATSD_RESPONSE_AUTO_TAG']:
            self.add_request_tags([
                # Endpoint tag
                self.config['STATSD_RESPONSE_ENDPOINT_TAG_FORMAT'].format(str(request.endpoint).lower()),
                # Method tag
                self.config['STATSD_RESPONSE_METHOD_TAG_FORMAT'].format(request.method.lower()),
            ])

        # Send no of requests per second
        metric = '.'.join([self.config['STATSD_NUMBER_OF_REQUESTS_METRIC_NAME'],
                           str(request.endpoint).lower(), request.method.lower()])
        self.statsd.incr(metric, 1)


    def after_request(self, response):
        """
         statsd middleware handler for after each request

        :param response: the response to be sent to the client
        :type response: ``flask.Response``
        :rtype: ``flask.Response``
        """
        # Return early if we don't have the start time
        if not hasattr(g, 'flask_statsd_start_time'):
            return response

        # Get the response time for this request
        elapsed = time.time() - g.flask_statsd_start_time
        # Convert the elapsed time to milliseconds if they want them
        if self.use_ms:
            elapsed = int(round(1000 * elapsed))

        # Add some additional response tags
        if self.config['STATSD_RESPONSE_AUTO_TAG']:
            self.add_request_tags(['status_code_%s' % (response.status_code, )])

        metric = self.config['STATSD_RESPONSE_METRIC_NAME']
        tags = self.get_request_tags()
        if tags:
            metric = ".".join([metric] + tags)

        # Emit our timing metric
        self.statsd.timing(metric,
                           elapsed, rate=self.config['STATSD_RESPONSE_SAMPLE_RATE'])

        # We ALWAYS have to return the original response
        return response
    
    def get_request_tags(self):
        """
        Get the current list of tags set for this request

        :rtype: list
        """
        return getattr(g, 'flask_statsd_request_tags', [])

    def add_request_tags(self, tags):
        """
        Add the provided list of tags to the tags stored for this request

        :param tags: tags to add to this requests tags
        :type tags: list
        :rtype: list
        """
        # Get the current list of tags to append to
        # DEV: We use this method since ``self.get_request_tags`` will ensure that we get a list back
        current_tags = self.get_request_tags()

        # Append our new tags, and return the new full list of tags for this request
        g.flask_statsd_request_tags = current_tags + tags
        return g.flask_statsd_request_tags
Example #27
0
 async def readiness_liveness_probe(statsd: StatsClient = Depends(get_statsd)):
     statsd.incr("requests.health")
     return {"message": "ep-stats-api is ready"}
class Server():

	def __init__(self, args):
		# Setup logging - Generate a default rotating file log handler and stream handler
		logFileName = 'connector-statsd.log'
		fhFormatter = logging.Formatter('%(asctime)-25s %(levelname)-7s %(message)s')
		sh = logging.StreamHandler()
		sh.setFormatter(fhFormatter)
		
		self.logger = logging.getLogger("server")
		self.logger.addHandler(sh)
		self.logger.setLevel(logging.DEBUG)
		
		
		self.port = int(os.getenv('VCAP_APP_PORT', '9666'))
		self.host = str(os.getenv('VCAP_APP_HOST', 'localhost'))

		if args.bluemix == True:
			self.options = ibmiotf.application.ParseConfigFromBluemixVCAP()
		else:
			if args.token is not None:
				self.options = {'auth-token': args.token, 'auth-key': args.key}
			else:
				self.options = ibmiotf.application.ParseConfigFile(args.config)
		
		# Bottle
		self._app = Bottle()
		self._route()
		
		# Init IOTF client
		self.client = ibmiotf.application.Client(self.options, logHandlers=[sh])
	
		# Init statsd client
		if args.statsd:
			self.statsdHost = args.statsd
		else: 
			self.statsdHost = "localhost"
		
		self.statsd = StatsClient(self.statsdHost, prefix=self.client.orgId)
		
	
	def _route(self):
		self._app.route('/', method="GET", callback=self._status)
	
	
	def myEventCallback(self, evt):
		try:
			flatData = flattenDict(evt.data, join=lambda a,b:a+'.'+b)
			
			self.logger.debug("%-30s%s" % (evt.device, evt.event + ": " + json.dumps(flatData)))
			
			eventNamespace = evt.deviceType +  "." + evt.deviceId + "." + evt.event
			
			self.statsd.incr("events.meta." + eventNamespace)
			for datapoint in flatData:
				eventDataNamespace = "events.data." + eventNamespace + "." + datapoint[0]
				# Pass through numeric data
				# Convert boolean datapoints to numeric 0|1 representation
				# Throw away everything else (e.g. String data)
				if isinstance(datapoint[1], bool):
					if datapoint[1] == True:
						self.statsd.gauge(eventDataNamespace, 1)
					else:
						self.statsd.gauge(eventDataNamespace, 0)
				elif isinstance(datapoint[1], Number):
					self.statsd.gauge(eventDataNamespace, datapoint[1])
		except Exception as e:
			self.logger.critical("%-30s%s" % (evt.device, evt.event + ": Exception processing event - " + str(e)))
			#self.logger.critical(json.dumps(evt.data))

	def start(self):
		self.client.connect()
		self.client.deviceEventCallback = self.myEventCallback
		self.client.subscribeToDeviceEvents()
		self.logger.info("Serving at %s:%s" % (self.host, self.port))
		self._app.run(host=self.host, port=self.port)
	
	def stop(self):
		self.client.disconnect()
		
	def _status(self):
		return template('status', env_options=os.environ)
Example #29
0
class DefaultJob(Job):
    # Config keys
    IS_STATSD_ENABLED = 'is_statsd_enabled'
    JOB_IDENTIFIER = 'identifier'

    """
    Default job that expects a task, and optional publisher
    If configured job will emit success/fail metric counter through statsd where prefix will be
    amundsen.databuilder.job.[identifier] .
    Note that job.identifier is part of metrics prefix and choose unique & readable identifier for the job.

    To configure statsd itself, use environment variable: https://statsd.readthedocs.io/en/v3.2.1/configure.html
    """
    def __init__(self,
                 conf,
                 task,
                 publisher=NoopPublisher()):
        # type: (Task, ConfigTree, Publisher) -> None
        self.task = task
        self.conf = conf
        self.publisher = publisher
        self.scoped_conf = Scoped.get_scoped_conf(self.conf,
                                                  self.get_scope())
        if self.scoped_conf.get_bool(DefaultJob.IS_STATSD_ENABLED, False):
            prefix = 'amundsen.databuilder.job.{}'.format(self.scoped_conf.get_string(DefaultJob.JOB_IDENTIFIER))
            LOGGER.info('Setting statsd for job metrics with prefix: {}'.format(prefix))
            self.statsd = StatsClient(prefix=prefix)
        else:
            self.statsd = None

    def init(self, conf):
        # type: (ConfigTree) -> None
        pass

    def _init(self):
        # type: () -> None
        self.task.init(self.conf)

    def launch(self):
        # type: () -> None
        """
        Launch a job by initializing job, run task and publish
        :return:
        """

        logging.info('Launching a job')
        #  Using nested try finally to make sure task get closed as soon as possible as well as to guarantee all the
        #  closeable get closed.
        try:
            is_success = True
            self._init()
            try:
                self.task.run()
            finally:
                self.task.close()

            self.publisher.init(Scoped.get_scoped_conf(self.conf, self.publisher.get_scope()))
            Job.closer.register(self.publisher.close)
            self.publisher.publish()
        except Exception as e:
            is_success = False
            raise e
        finally:
            # TODO: If more metrics are needed on different construct, such as task, consider abstracting this out
            if self.statsd:
                if is_success:
                    LOGGER.info('Publishing job metrics for success')
                    self.statsd.incr('success')
                else:
                    LOGGER.info('Publishing job metrics for failure')
                    self.statsd.incr('fail')

            Job.closer.close()

        logging.info('Job completed')
Example #30
0
    c = conn.cursor()

    util.create_schema(c)
    auth = util.authinfo(c)

    (q, option) = get_option(c, q)
    last_q = q.split(' ')[-1]
    if q.startswith('_'):  # option
        process_option(c, q)
    elif q.startswith('+'):  # add bookmark
        add_bookmark(c, q)
    elif last_q.startswith('#') and (':' not in q):  # tag expansion
        pbsearch_tag(c, '', last_q[1:])
    else:
        pbsearch_sql(c, option, q)

    util.closedb(conn)


if __name__ == '__main__':
    try:
        statsd = StatsClient(host='g.jmjeong.com',
                             port=8125,
                             prefix='jmjeong.alfred.bookmark')

        with statsd.timer('main'):
            statsd.incr('launch')
            main()
    except:
        main()
Example #31
0
from statsd import StatsClient
from datetime import datetime
from time import sleep

statsd_client = StatsClient(host='metrics')

print 'incrementing by 1'                 # print start statement
for x in range(100,1000,100):             # start of loop
    statsd_client.incr('sd_incr_single')  # increment by the default of 1
Example #32
0
File: test.py Project: pkar/quasi
import time
from statsd import StatsClient

statsd = StatsClient(host='localhost.vagrant',
                     port=8125,
                     prefix=None,
                     maxudpsize=512)


while True:
  statsd.incr('foo')
  time.sleep(1)
Example #33
0
class StatsdMonitor(object):
    def __init__(self, broker, interval=1):
        # self.interval = interval
        self.state = app.events.State()
        self.statsd_conn = StatsClient(host='localhost', port=8125)
        self.broker_conn = BrokerConnection(broker)
        self.timers_list = []

    # monitor the task and status of worker with functions
    def run_loop(self):
        while True:
            try:
                with self.broker_conn as conn:
                    recv = EventReceiver(conn,
                                         handlers={
                                             'task-sent':
                                             self.on_task_sent,
                                             'task-failed':
                                             self.on_task_failed,
                                             'task-retried':
                                             self.on_task_retried,
                                             'task-started':
                                             self.on_task_started,
                                             'task-succeeded':
                                             self.on_task_succeeded,
                                             'task-received':
                                             self.on_task_received,
                                             'task-rejected':
                                             self.on_task_rejected,
                                             'task-revoked':
                                             self.on_task_revoked,
                                             'worker-online':
                                             self.on_worker_online,
                                             'worker-heartbeat':
                                             self.on_worker_heartbeat,
                                             'worker-offline':
                                             self.on_worker_offline,
                                         })
                    recv.capture(limit=None, timeout=None, wakeup=True)
            except (KeyboardInterrupt, SystemExit):
                raise
            except Exception:
                raise
            # time.sleep(self.interval)

    # all about the tasks

    def on_task_sent(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        self.statsd_conn.incr('tasks.sent')

    def on_task_received(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        self.statsd_conn.incr('tasks.received')

    def on_task_started(self, event):
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        logger.info('Task {}[{}] started'.format(task.name, task.uuid))
        self.statsd_conn.incr('tasks.started')
        mark = 'task.{}.recorder'.format(task.uuid)
        self.timer_start(mark)

    def on_task_succeeded(self, event):
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        logger.info('Task {}[{}] succeeded'.format(task.name, task.uuid))
        self.statsd_conn.incr('tasks.succeeded')
        mark = 'task.{}.recorder'.format(task.uuid)
        self.timer_stop(mark)

    def on_task_failed(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        logger.warning('Task {}[{}] failed'.format(task.name, task.uuid))
        self.statsd_conn.incr('tasks.failed')

    def on_task_retried(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])
        logger.warning('Task {}[{}] retried'.format(task.name, task.uuid))
        self.statsd_conn.incr('tasks.retried')

    def on_task_rejected(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])

    def on_task_revoked(self, event):  # TODO
        self.state.event(event)
        task = self.state.tasks.get(event['uuid'])

    # all about the status of the workers

    def on_worker_online(self, event):  # TODO
        self.state.event(event)
        worker = self.state.workers.get(event['hostname'])
        mark = 'worker.{}.recorder'.format(worker.hostname)
        self.timer_start(mark)

    def on_worker_heartbeat(self, event):
        self.state.event(event)
        worker = self.state.workers.get(event['hostname'])
        key_pro = 'worker.{}.processed'.format(worker.hostname)
        key_act = 'worker.{}.active'.format(worker.hostname)
        if worker.processed is None: worker.processed = 0
        if worker.active is None: worker.active = 0
        self.statsd_conn.gauge(key_pro, worker.processed)
        self.statsd_conn.gauge(key_act, worker.active)

    def on_worker_offline(self, event):  # TODO
        self.state.event(event)
        worker = self.state.workers.get(event['hostname'])
        mark = 'worker.{}.recorder'.format(worker.hostname)
        self.timer_stop(mark)

    # statsd timer record start
    def timer_start(self, mark):
        timer = self.statsd_conn.timer(mark)
        timer.start()
        self.timers_list.append(timer)

    # statsd timer record stop
    def timer_stop(self, mark):
        for timer in self.timers_list:
            if timer.stat == mark:
                timer.stop()
                self.timers_list.remove(timer)
Example #34
0
class Client(object):

    def __init__(self, server, zero_fill=True, **kw):
        self.server = server.rstrip('/')
        self.session = requests.session()
        # getting monolith info
        info = self.session.get(server).json
        if callable(info):
            info = info()

        self.es = self.server + info['es_endpoint']
        self.fields = info['fields']
        self.zero_fill = zero_fill

        # statsd settings
        statsd_host = kw.get('statsd.host', 'localhost')
        statsd_port = int(kw.get('statsd.port', 8125))
        statsd_prefix = kw.get('statsd.prefix', 'monolith.client')
        self.statsd = StatsClient(host=statsd_host, port=statsd_port,
                                  prefix=statsd_prefix)

    def __call__(self, field, start, end, interval=DAY, strict_range=False,
                 **terms):

        if isinstance(interval, basestring):
            interval = _str2interval[interval.encode()]

        if isinstance(start, basestring):
            start = datetime.datetime.strptime(start.encode(),
                                               '%Y-%m-%d').toordinal()
            start = datetime.date.fromordinal(start)
            end = datetime.datetime.strptime(end.encode(),
                                             '%Y-%m-%d').toordinal()
            end = datetime.date.fromordinal(end)

        if interval == DAY:
            drange = util.iterdays(start, end)
        elif interval == WEEK:
            drange = util.iterweeks(start, end)
        elif interval == MONTH:
            drange = util.itermonths(start, end)
        else:
            drange = util.iteryears(start, end)

        # building the query
        start_date_str = start.strftime('%Y-%m-%d')
        end_date_str = end.strftime('%Y-%m-%d')

        if isinstance(interval, int):
            interval = _interval2str[interval]

        # XXX we'll see later if we want to provide a
        # nicer query interface

        # we need a facet query
        if strict_range:
            greater = "gt"
            lower = "lt"
        else:
            greater = "gte"
            lower = "lte"

        query = {
            "query": {
                "match_all": {},
            },
            "size": 0,  # we aren't interested in the hits
            "facets": {
                "histo1": {
                    "date_histogram": {
                        "value_field": field,
                        "interval": interval,
                        "key_field": "date",
                    },
                    "facet_filter": {
                        "range": {
                            "date": {
                                greater: start_date_str,
                                lower: end_date_str,
                            }
                        }
                    }
                }
            }
        }

        if len(terms) > 0:
            term = {}

            for key, value in terms.items():
                term[key] = value

            range_ = query['facets']['histo1']['facet_filter']['range']
            filter_ = {'and': [{'term': term},
                               {'range': range_}]}
            query['facets']['histo1']['facet_filter'] = filter_

        with self.statsd.timer('elasticsearch-query'):
            res = self.session.post(self.es, data=json.dumps(query))
            if res.status_code != 200:
                raise ValueError(res.content)

            # getting the JSON content
            res = res.json
            if callable(res):
                res = res()

        # statsd calls
        self.statsd.incr('elasticsearch-call')

        if not isinstance(res, dict):
            raise ValueError(res)

        if 'errors' in res:
            raise ValueError(res['errors'][0]['description'])

        dates = set()

        for entry in res['facets']['histo1']['entries']:
            time_ = entry['time'] / 1000.0
            date_ = datetime.datetime.utcfromtimestamp(time_).date()
            if 'total' in entry:
                count = entry['total']
            else:
                count = entry['count']

            if date_ not in dates:
                dates.add(date_)

            yield {'count': count, 'date': date_}

        if self.zero_fill:
            # yielding zeros
            for date_ in drange:
                if strict_range and date_ in (start, end):
                    continue
                if date_ not in dates:
                    yield {'count': 0, 'date': date_}
Example #35
0
class StatsdMetrics(Metrics):

    def __init__(self, host='localhost', port=8125, prefix=None):
        self.statsd = StatsClient(host, port, prefix)

    def fanout_timer(self, feed_class):
        return self.statsd.timer('%s.fanout_latency' % feed_class.__name__)

    def feed_reads_timer(self, feed_class):
        return self.statsd.timer('%s.read_latency' % feed_class.__name__)

    def on_feed_read(self, feed_class, activities_count):
        self.statsd.incr('%s.reads' % feed_class.__name__, activities_count)

    def on_feed_write(self, feed_class, activities_count):
        self.statsd.incr('%s.writes' % feed_class.__name__, activities_count)

    def on_feed_remove(self, feed_class, activities_count):
        self.statsd.incr('%s.deletes' % feed_class.__name__, activities_count)

    def on_fanout(self, feed_class, operation, activities_count=1):
        metric = (feed_class.__name__, operation.__name__)
        self.statsd.incr('%s.fanout.%s' % metric, activities_count)

    def on_activity_published(self):
        self.statsd.incr('activities.published')

    def on_activity_removed(self):
        self.statsd.incr('activities.removed')
Example #36
0
class AMQPConnection:
    """
    This class is inspired from the following pika sample:
    http://pika.readthedocs.io/en/0.11.0/examples/tornado_consumer.html

    If the channel is closed, it will indicate a problem with one of the
    commands that were issued and that should surface in the output as well.

    """
    def __init__(self, config=AmqpConfig):
        """
        Create a new instance of the AMQPConnection class, passing in the AMQPConfig
        class to connect to RabbitMQ.
        :param config:
        """

        self._config = config
        self._connection = None
        self._channel = None
        self._isStarted = Future()
        self._channelClosed = Future()
        self._connectionClosed = Future()
        self.logger = logging.getLogger("tornado.application")
        self.statsdClient = StatsClient(MonitoringConfig.metrics_host,
                                        MonitoringConfig.metrics_port,
                                        prefix=MonitoringConfig.metrics_prefix)

    @gen.coroutine
    def connect(self, ioloop):
        """
        This method connects to RabbitMQ, returning the state.
        When the connection is established, the on_connection_open method
        will be invoked by pika.
        This method waits for the connection to be open

        :param ioloop: the ioloop to be used by the tornadoConnection
        :return: True if the connection is successful
        """
        self.logger.info(
            "pid:{} AMQP connecting to: exchange:{} host:{} port: {}".format(
                os.getpid(), self._config.exchange, self._config.host,
                self._config.port))
        credentials = pika.PlainCredentials(self._config.user,
                                            self._config.password)

        pika.TornadoConnection(
            pika.ConnectionParameters(host=self._config.host,
                                      port=self._config.port,
                                      credentials=credentials),
            self._on_connection_open,
            on_open_error_callback=self._on_connection_open_error,
            on_close_callback=self._on_connection_closed,
            custom_ioloop=ioloop)

        res = yield self._isStarted
        return res

    @gen.coroutine
    def disconnect(self):
        """
        This method closes the channel and the connection to RabbitMQ.
        :return:
        """
        res = yield self._isStarted
        if not res:
            return

        self._channelClosed = Future()
        self._connectionClosed = Future()
        self._channel.close()
        yield self._channelClosed
        yield self._connectionClosed

    @gen.coroutine
    def declare_queue(self, name):
        """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
        command. This method wait for the queue to be declared successfully

        :param str|unicode name: The name of the queue to declare.

        """
        future_result = Future()

        def on_queue_ready(method_frame):
            self.logger.info("pid:{} Queue {} has been declared".format(
                os.getpid(), name))

            future_result.set_result(True)

        self.logger.info("pid:{} Queue declare:{}".format(os.getpid(), name))
        self._channel.queue_declare(on_queue_ready,
                                    queue=name,
                                    durable=True,
                                    exclusive=False,
                                    auto_delete=False)
        res = yield future_result
        return res

    @gen.coroutine
    def subscribe(self, routing_key, queue_name, handler):
        """
        This method subscribe to a routing_key, binding the routing_key to the given
        queue name.
        The input handler will be called when a message will be received
        :param routing_key: a string describing the routing_key
        :param queue_name: a string describing the queue_name
        :param handler: the message handler
        :return:
        """
        self.logger.info('Subscribe to routing_key: %s %s', routing_key,
                         handler)

        # declare queue
        yield self.declare_queue(queue_name)

        #  bind it
        bind_ok = Future()

        def on_bind_ok(unused_frame):
            bind_ok.set_result(True)

        self._channel.queue_bind(on_bind_ok, queue_name, self._config.exchange,
                                 routing_key)
        yield bind_ok

        # consume it
        self._channel.basic_consume(handler, queue_name)

    def publish(self, routing_key, msg):
        """publish a message to RabbitMQ, check for delivery confirmations in the
        _on_delivery_confirmations method.

        """
        self._channel.basic_publish(
            exchange=self._config.exchange,
            routing_key=routing_key,
            body=msg,
            properties=pika.BasicProperties(delivery_mode=2,
                                            # make message persistent
                                            ),
            mandatory=True)

    def _on_exchange_declare_ok(self, unused_frame):
        """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
        command.

        :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame

        """
        self.logger.info(
            "pid:{} Exchange is declared:{} host:{} port:{}".format(
                os.getpid(), self._config.exchange, self._config.host,
                self._config.port))
        self._isStarted.set_result(True)

    def _on_connection_closed(self, connection, reply_code, reply_text):
        """This method is invoked by pika when the connection to RabbitMQ is
        closed.

        :param pika.connection.Connection connection: The closed connection obj
        :param int reply_code: The server provided reply_code if given
        :param str reply_text: The server provided reply_text if given

        """
        self.logger.info(
            "pid:{} AMQP is disconnected from exchange:{} host:{} port:{} connexion:{}"
            .format(os.getpid(), self._config.exchange, self._config.host,
                    self._config.port, connection))
        self._connection = None
        self._channel = None
        self._connectionClosed.set_result(True)

    def _on_connection_open(self, connection):
        """This method is called by pika once the connection to RabbitMQ has
        been established. It passes the handle to the connection object in
        case we need it, but in this case, we'll just mark it unused.

        :type connection: pika.TornadoConnection

        """
        self._connection = connection
        connection.channel(on_open_callback=self._on_channel_open)

        self.logger.info(
            "pid:{} AMQP is connected exchange:{} host:{} port:{} connexion:{}"
            .format(os.getpid(), self._config.exchange, self._config.host,
                    self._config.port, connection))

    def _on_connection_open_error(self, unused_connection, msg):
        """This method is called by pika in case of connection errors to
        RabbitMQ. It passes the handle to the connection object in
        case we need it, but in this case, we'll just mark it unused.

        :type unused_connection: pika.TornadoConnection

        """
        self.logger.error("on_open_error callback: {}".format(msg))
        self._isStarted.set_result(False)

    def _on_channel_open(self, channel):
        """This method is invoked by pika when the channel has been opened.
        The channel object is passed in so we can make use of it.

        Since the channel is now open, we'll declare the exchange to use.

        :param pika.channel.Channel channel: The channel object

        """
        self._channel = channel
        channel.add_on_close_callback(self._on_channel_closed)
        channel.exchange_declare(self._on_exchange_declare_ok,
                                 exchange=self._config.exchange,
                                 durable=True,
                                 exchange_type='topic')
        self.logger.info("channel open {}".format(channel))
        # Enabled delivery confirmations
        self._channel.confirm_delivery(self._on_delivery_confirmation)

        self._channel.add_on_return_callback(self._on_return_message_callback)

    def _on_channel_closed(self, channel, reply_code, reply_text):
        """Invoked by pika when RabbitMQ unexpectedly closes the channel.
        Channels are usually closed if you attempt to do something that
        violates the protocol, such as re-declare an exchange or queue with
        different parameters. In this case, we'll close the connection
        to shutdown the object.

        :param pika.channel.Channel channel: The closed channel
        :param int reply_code: The numeric reason the channel was closed
        :param str reply_text: The text reason the channel was closed

        """
        self.logger.info('Channel was closed: (%s) %s', reply_code, reply_text)
        self._channel = None
        self._channelClosed.set_result(True)
        self._connection.close()

    def _on_return_message_callback(self, channel, method, properties, body):
        """
        This method is called when the message sent to AMQP
        hasn't been published to any queues
        :param channel: channel used for sending the message
        :param method:
        :param properties:
        :param body: message body
        :return:
        """
        self.logger.error(
            "message has been returned by the rabbitmq server: {}".format(
                body))
        self.statsdClient.incr('amqp.output_return', count=1)

    def _on_delivery_confirmation(self, method_frame):
        """Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
        command, passing in either a Basic.Ack or Basic.Nack frame with
        the delivery tag of the message that was published. The delivery tag
        is an integer counter indicating the message number that was sent
        on the channel via Basic.Publish. Here we're just doing house keeping
        to keep track of stats and remove message numbers that we expect
        a delivery confirmation of from the list used to keep track of messages
        that are pending confirmation.

        :param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame

        """
        confirmation_type = method_frame.method.NAME.split('.')[1].lower()
        if confirmation_type == 'ack':
            self.statsdClient.incr('amqp.output_delivered', count=1)
        else:
            self.logger.error(
                "delivery_confirmation failed {}".format(method_frame))
            self.statsdClient.incr('amqp.output_failure', count=1)
Example #37
0
def process_event(
    event: dict,
    render: Render,
    thread_store: ThreadStore,
    logger: Logger,
    retry_delay_seconds: int,
    stats: StatsClient,
    mail,
) -> int:
    """Reliably send emails for the provided event.

    Attempts to send all emails with "full context". If rendering fails or some
    emails can't be sent to some recipients, then they're retried with a
    "minimal context" to ensure that users still receive a notification.

    Note that it's still possible for a user to not get an email for an
    event if there's an issue with rendering or sending a "minimal context"
    email. However, due to the deliberately simple nature of these emails,
    the risk should be minimal.


    Returns the number of emails successfully sent.
    """
    timestamp = event["timestamp"]
    full_context = event.get("context")
    recipient_filter_list = None
    if not full_context:
        stats.incr(STAT_FAILED_TO_RENDER_FULL_CONTEXT_EVENT)
        logger.warning(
            'Phabricator event didn\'t have "full" context. '
            "Falling back to sending a simpler, more resilient email."
        )
        successful_full_email_count = 0
    else:
        process_full_result = process_emails_full(
            timestamp,
            event["isSecure"],
            full_context,
            render,
            thread_store,
            stats,
            logger,
            retry_delay_seconds,
            mail,
        )
        successful_full_email_count = process_full_result.successfully_sent_email_count
        if process_full_result.state == ProcessEventState.SUCCESS:
            return successful_full_email_count
        elif process_full_result.state == ProcessEventState.FAILED_TO_RENDER:
            stats.incr(STAT_FAILED_TO_RENDER_FULL_CONTEXT_EVENT)
            logger.warning(
                "Failed to render emails for a Phabricator event with full "
                "context. Falling back to sending a simpler, more resilient email."
            )
        else:
            stats.incr(
                STAT_FAILED_TO_SEND_FULL_CONTEXT_MAIL,
                count=len(process_full_result.failed_to_send_recipients),
            )
            logger.warning(
                "Failed to send at least one email with full context. Falling "
                "back to sending a simpler, more resilient email for the "
                "affected recipient(s)."
            )
            recipient_filter_list = process_full_result.failed_to_send_recipients

    # If we've reached this point, we've failed to render or send emails with
    # full context.
    process_minimal_result = process_events_minimal(
        timestamp,
        event["minimalContext"],
        render,
        thread_store,
        stats,
        logger,
        retry_delay_seconds,
        recipient_filter_list,
        mail,
    )

    if process_minimal_result.state == ProcessEventState.FAILED_TO_RENDER:
        stats.incr(STAT_FAILED_TO_RENDER_MINIMAL_CONTEXT_EVENT)
        logger.error(
            "Failed to render emails for a Phabricator event with minimal "
            "context. Skipping these emails."
        )
    elif process_minimal_result.state == ProcessEventState.FAILED_TO_SEND:
        stats.incr(
            STAT_FAILED_TO_SEND_MINIMAL_CONTEXT_MAIL,
            count=len(process_minimal_result.failed_to_send_recipients),
        )
        logger.error(
            "Failed to send at least one email with minimal context. Skipping "
            "these emails."
        )
    return (
        successful_full_email_count
        + process_minimal_result.successfully_sent_email_count
    )
Example #38
0
class Client(object):

    def __init__(self, server, index='time_*', zero_fill=True, **kw):
        self.server = server
        self.session = requests.session()
        self.es = urljoin(self.server, index + '/_search')
        self.zero_fill = zero_fill

        # statsd settings
        statsd_host = kw.get('statsd.host', 'localhost')
        statsd_port = int(kw.get('statsd.port', 8125))
        statsd_prefix = kw.get('statsd.prefix', 'monolith.client')
        self.statsd = StatsClient(host=statsd_host, port=statsd_port,
                                  prefix=statsd_prefix)

    def raw(self, query):
        with self.statsd.timer('elasticsearch-query'):
            res = self.session.get(self.es, data=json.dumps(query))
            if res.status_code != 200:
                raise ValueError(res.content)

            # Get the JSON content.
            res = res.json
            if callable(res):
                res = res()

        self.statsd.incr('elasticsearch-call')

        if not isinstance(res, dict):
            raise ValueError(res)

        if 'errors' in res:
            raise ValueError(res['errors'][0]['description'])

        return res

    def __call__(self, field, start, end, interval=DAY, strict_range=False,
                 **terms):

        if isinstance(interval, basestring):
            interval = _str2interval[interval.encode()]

        if isinstance(start, basestring):
            start = datetime.datetime.strptime(start.encode(),
                                               '%Y-%m-%d').toordinal()
            start = datetime.date.fromordinal(start)
        elif isinstance(start, datetime.datetime):
            start = start.date()

        if isinstance(end, basestring):
            end = datetime.datetime.strptime(end.encode(),
                                             '%Y-%m-%d').toordinal()
            end = datetime.date.fromordinal(end)
        elif isinstance(end, datetime.datetime):
            end = end.date()

        if interval == DAY:
            drange = util.iterdays(start, end)
        elif interval == WEEK:
            drange = util.iterweeks(start, end)
        elif interval == MONTH:
            drange = util.itermonths(start, end)
        else:
            drange = util.iteryears(start, end)

        # building the query
        start_date_str = start.strftime('%Y-%m-%d')
        end_date_str = end.strftime('%Y-%m-%d')

        if isinstance(interval, int):
            interval = _interval2str[interval]

        # XXX we'll see later if we want to provide a
        # nicer query interface

        # we need a facet query
        if strict_range:
            greater = "gt"
            lower = "lt"
        else:
            greater = "gte"
            lower = "lte"

        query = {
            "query": {
                "match_all": {},
            },
            "size": 0,  # we aren't interested in the hits
            "facets": {
                "histo1": {
                    "date_histogram": {
                        "value_field": field,
                        "interval": interval,
                        "key_field": "date",
                    },
                    "facet_filter": {
                        "range": {
                            "date": {
                                greater: start_date_str,
                                lower: end_date_str,
                            }
                        }
                    }
                }
            }
        }

        if terms:

            range_ = query['facets']['histo1']['facet_filter']['range']

            query['facets']['histo1']['facet_filter'] = {
                'and': ([{'term': {k: v}} for k, v in terms.items()] +
                        [{'range': range_}])}

        res = self.raw(query)
        counts = {}

        for entry in res['facets']['histo1']['entries']:
            time_ = entry['time'] / 1000.0
            date_ = datetime.datetime.utcfromtimestamp(time_).date()
            if 'total' in entry:
                count = entry['total']
            else:
                count = entry['count']
            counts[date_] = count

        for date_ in drange:
            if strict_range and date_ in (start, end):
                continue

            if date_ in counts:
                yield {'count': counts[date_], 'date': date_}
            elif self.zero_fill:
                yield {'count': None, 'date': date_}
Example #39
0
class Server():
    def __init__(self, args):
        # Setup logging - Generate a default rotating file log handler and stream handler
        logFileName = 'connector-statsd.log'
        fhFormatter = logging.Formatter(
            '%(asctime)-25s %(levelname)-7s %(message)s')
        sh = logging.StreamHandler()
        sh.setFormatter(fhFormatter)

        self.logger = logging.getLogger("server")
        self.logger.addHandler(sh)
        self.logger.setLevel(logging.DEBUG)

        self.port = int(os.getenv('VCAP_APP_PORT', '9666'))
        self.host = str(os.getenv('VCAP_APP_HOST', 'localhost'))

        if args.bluemix == True:
            self.options = ibmiotf.application.ParseConfigFromBluemixVCAP()
        else:
            if args.token is not None:
                self.options = {'auth-token': args.token, 'auth-key': args.key}
            else:
                self.options = ibmiotf.application.ParseConfigFile(args.config)

        # Bottle
        self._app = Bottle()
        self._route()

        # Init IOTF client
        self.client = ibmiotf.application.Client(self.options,
                                                 logHandlers=[sh])

        # Init statsd client
        if args.statsd:
            self.statsdHost = args.statsd
        else:
            self.statsdHost = "localhost"

        self.statsd = StatsClient(self.statsdHost, prefix=self.client.orgId)

    def _route(self):
        self._app.route('/', method="GET", callback=self._status)

    def myEventCallback(self, evt):
        try:
            flatData = flattenDict(evt.data, join=lambda a, b: a + '.' + b)

            self.logger.debug(
                "%-30s%s" %
                (evt.device, evt.event + ": " + json.dumps(flatData)))

            eventNamespace = evt.deviceType + "." + evt.deviceId + "." + evt.event

            self.statsd.incr("events.meta." + eventNamespace)
            for datapoint in flatData:
                eventDataNamespace = "events.data." + eventNamespace + "." + datapoint[
                    0]
                # Pass through numeric data
                # Convert boolean datapoints to numeric 0|1 representation
                # Throw away everything else (e.g. String data)
                if isinstance(datapoint[1], bool):
                    if datapoint[1] == True:
                        self.statsd.gauge(eventDataNamespace, 1)
                    else:
                        self.statsd.gauge(eventDataNamespace, 0)
                elif isinstance(datapoint[1], Number):
                    self.statsd.gauge(eventDataNamespace, datapoint[1])
        except Exception as e:
            self.logger.critical("%-30s%s" %
                                 (evt.device, evt.event +
                                  ": Exception processing event - " + str(e)))
            #self.logger.critical(json.dumps(evt.data))

    def start(self):
        self.client.connect()
        self.client.deviceEventCallback = self.myEventCallback
        self.client.subscribeToDeviceEvents()
        self.logger.info("Serving at %s:%s" % (self.host, self.port))
        self._app.run(host=self.host, port=self.port)

    def stop(self):
        self.client.disconnect()

    def _status(self):
        return template('status', env_options=os.environ)
Example #40
0
class StatisticsForStatsd(RequiredConfig):
    """This class is a wrapper around statsd adding a simple configman
    compatible interface and stats naming scheme.  Code using this class
    will distrubute `incr` calls with names associated with them.  When
    ever an `incr` call is encountered, the name will be paired with the
    name of the statsd names and the increment action fired off.

    This class will only send stats `incr` calls for names that appear in
    the configuration parameter `active_counters_list`.  This enables counters
    to be turned on and off at configuration time."""

    required_config = Namespace()
    required_config.add_option(
        'statsd_host',
        doc='the hostname of statsd',
        default=''
    )
    required_config.add_option(
        'statsd_port',
        doc='the port number for statsd',
        default=''
    )
    required_config.add_option(
        'prefix',
        doc='a string to be used as the prefix for statsd names',
        default=''
    )
    required_config.add_option(
        'active_counters_list',
        default='',
        #default='restarts, jobs, criticals, errors, mdsw_failures',
        doc='a comma delimeted list of counters',
        from_string_converter=str_to_list
    )

    def __init__(self, config, name):
        super(StatisticsForStatsd, self).__init__()
        self.config = config
        if config.prefix and name:
            self.prefix = '.'.join((config.prefix, name))
        elif config.prefix:
            self.prefix = config.prefix
        elif name:
            self.prefix = name
        else:
            self.prefix = ''
        self.statsd = StatsClient(
            config.statsd_host,
            config.statsd_port,
            self.prefix
        )

    def incr(self, name):
        if (
            self.config.statsd_host
            and name in self.config.active_counters_list
        ):
            if self.prefix and name:
                name = '.'.join((self.prefix, name))
            elif self.prefix:
                name = self.prefix
            elif not name:
                name = 'unknown'
            self.statsd.incr(name)
from statsd import StatsClient
from datetime import datetime
from time import sleep

statsd_client = StatsClient(host='metrics')

print 'incrementing by \"n\"'             # print start statement
for x in range(100,1000,100):             # start of loop
    statsd_client.incr('sd_incr_count',x) # increment by X value
    print 'increment by {}'.format(x)     # print increment log
Example #42
0
from evdev import InputDevice, categorize, ecodes
import secrets
from statsd import StatsClient

c = StatsClient(host=secrets.GRAPHITE_URL, port=8126, prefix=None)

dev = InputDevice('/dev/input/event0')

for event in dev.read_loop():
  if event.type == ecodes.EV_KEY:
    if event.value == 01:
      c.incr('keystroke', count=1, rate=1)
Example #43
0
class StatsdMetrics(Metrics):
    def __init__(self, host='localhost', port=8125, prefix=None):
        self.statsd = StatsClient(host, port, prefix)

    def fanout_timer(self, feed_class):
        return self.statsd.timer('%s.fanout_latency' % feed_class.__name__)

    def feed_reads_timer(self, feed_class):
        return self.statsd.timer('%s.read_latency' % feed_class.__name__)

    def on_feed_read(self, feed_class, activities_count):
        self.statsd.incr('%s.reads' % feed_class.__name__, activities_count)

    def on_feed_write(self, feed_class, activities_count):
        self.statsd.incr('%s.writes' % feed_class.__name__, activities_count)

    def on_feed_remove(self, feed_class, activities_count):
        self.statsd.incr('%s.deletes' % feed_class.__name__, activities_count)

    def on_fanout(self, feed_class, operation, activities_count=1):
        metric = (feed_class.__name__, operation.__name__)
        self.statsd.incr('%s.fanout.%s' % metric, activities_count)

    def on_activity_published(self):
        self.statsd.incr('activities.published')

    def on_activity_removed(self):
        self.statsd.incr('activities.removed')
Example #44
0
class Monitor():
    def __init__(self, cliParam):
        ## 用作收集网络数据的worker
        self.pool = Pool(processes=3)
        ##
        self.cliParam = cliParam
        ##init influxdb statd
        influxdb_config = cliParam["influxdb_config"]
        self.sdc = StatsClient(host=check_format(
            influxdb_config.get('host', '')),
                               port=influxdb_config.getint('port', 8120),
                               prefix="icmpping",
                               maxudpsize=512,
                               ipv6=False)
        self.tmpdata = {}

    def start(self):
        fping_cmd = '%s %s' % (cliParam["fping_cmd"], " ".join(
            cliParam["ip_list"]))
        retry = 0
        while retry < cliParam["fping_retry_max"]:
            runtime = str(datetime.datetime.now())
            status, out = commands.getstatusoutput(fping_cmd)
            if status == 0 or status == 256:
                break
            else:
                logger.error("status:%s,info:%s" % (status, out))
                logger.warn('fping retry ..')
                time.sleep(cliParam["fping_retry_interval"])
                retry += 1
        if retry > cliParam["fping_retry_max"]:
            logger.error('fping retry count > retry_max')
            sys.exit()

        list_mtr_ips = []
        for info in out.split('\n'):
            #print info
            cur_time = time.strftime('%Y-%m-%d %H:%M:%S',
                                     time.localtime(time.time()))
            target = info.split(':')[0].strip()
            if self.tmpdata.get(target) == None:
                self.tmpdata[target] = {}
            self.tmpdata['time'] = int(time.time() * 1000)
            self.tmpdata[target]['hostname'] = check_format(
                cliParam["targets_info"].get(target))
            try:
                self.tmpdata[target]['loss'] = int(
                    info.split('=')[1].split('/')[2].split('%')[0])
            except Exception as e:
                logger.error(self.tmpdata[target]['loss'])
                logger.error(e)
            if self.tmpdata[target].get('send_count') == None:
                self.tmpdata[target]['send_count'] = 0
            if self.tmpdata[target].get('warn_startTime') == None:
                self.tmpdata[target]['warn_startTime'] = ''
            if self.tmpdata[target]['loss'] < 100:
                self.tmpdata[target]['max'] = Decimal(
                    info.split('/')[-1]).quantize(Decimal('0.00'))
                self.tmpdata[target]['avg'] = Decimal(
                    info.split('/')[-2]).quantize(Decimal('0.00'))
                self.tmpdata[target]['min'] = Decimal(
                    info.split('/')[-3].split('=')[-1]).quantize(
                        Decimal('0.00'))
            # 写入数据influxdb
            retry = 0
            while retry < cliParam["retry_max"]:
                try:
                    s_host = cliParam["my_name"]
                    s_ip = cliParam["my_public_ip"].replace('.', '_')
                    d_host = self.tmpdata[target]['hostname']
                    d_ip = target.replace('.', '_')
                    loss = int(info.split('=')[1].split('/')[0]) - int(
                        info.split('=')[1].split('/')[1])
                    data = '.'.join([s_host, s_ip, d_host, d_ip])
                    if self.tmpdata[target]['loss'] >= cliParam[
                            "send_data_loss_min"]:
                        self.sdc.incr('.'.join(['loss', data]), loss)
                        if self.tmpdata[target]['loss'] < 100:
                            self.sdc.timing('.'.join(['delay.max', data]),
                                            self.tmpdata[target]['max'])
                            self.sdc.timing('.'.join(['delay.min', data]),
                                            self.tmpdata[target]['min'])
                            self.sdc.timing('.'.join(['delay.avg', data]),
                                            self.tmpdata[target]['avg'])
                        logger.info({
                            "s_host": s_host,
                            "s_ip": s_ip,
                            "d_host": d_host,
                            "d_ip": d_ip,
                            "loss": loss,
                            "data": data
                        })
                        break
                    else:
                        logger.info({
                            "s_host":
                            s_host,
                            "s_ip":
                            s_ip,
                            "d_host":
                            d_host,
                            "d_ip":
                            d_ip,
                            "loss":
                            loss,
                            "data":
                            data,
                            "info":
                            "not send data to influxdb when cur_loss:%s < send_data_loss_min:%s"
                            % (loss, self.send_data_loss_min)
                        })
                        break
                except Exception as e:
                    logger.error("get fping info err : %s" % e)
                    time.sleep(cliParam["retry_interval"])
                    retry += 1

            # 收集网络数据
            if self.tmpdata[target]['loss'] >= cliParam["warn_loss"]\
                    and self.tmpdata[target]['send_count'] < cliParam["collect_max"] \
                    and cliParam["mtr_status"] == 'true':
                ## 延时发送mtr
                list_mtr_ips.append(target)
            if self.tmpdata[target]['loss'] < cliParam["warn_loss"]:
                self.tmpdata[target]['send_count'] = 0
                self.tmpdata[target]['warn_startTime'] = ''
            if retry > cliParam["retry_max"]:
                logger.error('write influxdb retry count > retry_max')
                sys.exit()
        if list_mtr_ips != []:
            for target in list_mtr_ips:
                collectParam = {
                    "target": target,
                    "mtr_cmd": cliParam["mtr_cmd"],
                    "ping_cmd": cliParam["ping_cmd"],
                    "fping_cmd": cliParam["fping_cmd"],
                    "my_name": cliParam["my_name"],
                    "my_public_ip": cliParam["my_public_ip"],
                    "mtr_logfile": cliParam["mtr_logfile"],
                    "loss_value": self.tmpdata[target]['loss'],
                    "runtime": runtime,
                    "send_mail_status": cliParam["send_mail_status"],
                    "target_hostname": cliParam["targets_info"].get(target)
                }
                if cliParam.get("admin_list", "") != "":
                    collectParam["admin_list"] = cliParam["admin_list"]
                if self.tmpdata[target]['send_count'] < cliParam["collect_max"]:
                    ##
                    self.pool.apply_async(collect_network_data, [collectParam])
                    ##
                    self.tmpdata[target]['send_count'] += 1
                    logger.warn(self.tmpdata[target]['send_count'])
                else:
                    if self.tmpdata[target]['warn_startTime'] == '':
                        self.tmpdata[target]['warn_startTime'] = cur_time

    def run(self):
        while True:
            try:
                self.start()
            except Exception as e:
                logger.error(e)
                sys.exit(2)
Example #45
0
from flask import Flask, render_template, g, request, redirect
import requests
import json
from collections import OrderedDict
import datetime
import pymongo
from statsd import StatsClient

statsd = StatsClient(host='127.0.0.1',
                     port=8125)

statsd.incr('start')


app = Flask(__name__)

conn = pymongo.MongoClient()
db = conn.blog

@app.route("/")
@statsd.timer('index')
def index():
    statsd.incr('index_pageview')
    articles = db.articles.find().sort('created_at', pymongo.DESCENDING).limit(10)
    ret = []
    for article in articles:
        user = db.users.find_one({'user_id': article['user_id']})
        article['user'] = user
        ret.append(article)
    latest_users = db.users.find().sort('created_at', pymongo.DESCENDING).limit(10)
    return render_template('index.html', articles=ret, latest_users=latest_users)
Example #46
0
 async def readiness_liveness_probe(statsd: StatsClient = Depends(get_statsd)):
     statsd.incr('requests.health')
     return {'message': 'ep-stats-api is ready'}