async def sign(context, path, signing_formats):
    """Call the appropriate signing function per format, for a single file.

    Args:
        context (Context): the signing context
        path (str): the source file to sign
        signing_formats (list): the formats to sign with

    Returns:
        list: the list of paths generated. This will be a list of one, unless
            there are detached sigfiles.

    """
    # We use 8135 by default, as datadog's default of 8125 conflicts
    # with collectd. Bug 1493265
    initialize(statsd_host=context.config.get('datadog_host', 'localhost'),
               statsd_port=context.config.get('datadog_port', 8135))

    output = path
    # Loop through the formats and sign one by one.
    for fmt in signing_formats:
        signing_func = _get_signing_function_from_format(fmt)
        log.info("sign(): Signing {} with {}...".format(output, fmt))
        metric_tags = [
            'format:{}'.format(fmt), 'host:{}'.format(platform.node()),
            'app:signingscript'
        ]
        with statsd.timed('signingfunc.time', tags=metric_tags):
            output = await signing_func(context, output, fmt)
    # We want to return a list
    if not isinstance(output, (tuple, list)):
        output = [output]
    return output
Esempio n. 2
0
async def sign(context, path, signing_formats):
    """Call the appropriate signing function per format, for a single file.

    Args:
        context (Context): the signing context
        path (str): the source file to sign
        signing_formats (list): the formats to sign with

    Returns:
        list: the list of paths generated. This will be a list of one, unless
            there are detached sigfiles.

    """
    output = path
    # Loop through the formats and sign one by one.
    for fmt in signing_formats:
        signing_func = FORMAT_TO_SIGNING_FUNCTION.get(
            fmt, FORMAT_TO_SIGNING_FUNCTION['default'])
        log.info("sign(): Signing {} with {}...".format(output, fmt))
        metric_tags = [
            'format:{}'.format(fmt), 'host:{}'.format(platform.node()),
            'app:signingscript'
        ]
        with statsd.timed('signingfunc.time', tags=metric_tags):
            output = await signing_func(context, output, fmt)
    # We want to return a list
    if not isinstance(output, (tuple, list)):
        output = [output]
    return output
Esempio n. 3
0
        def wrapper(*args, **kwargs):

            tags = {'operation': attr}
            if 'index' in kwargs:
                tags['index'] = kwargs['index']

            with statsd.timed('elasticsearch',
                              tags=self.__prepare_tags(tags),
                              use_ms=True):
                return es_attr(*args, **kwargs)
Esempio n. 4
0
 def run_and_notify(self, send_to_s3_override=False):
     try:
         slack.send("{} started".format(self.s3_key), channel_name="sky-py")
         with statsd.timed(self.statsd_time_name):
             self.run(send_to_s3_override=send_to_s3_override)
         statsd.increment(self.statsd_time_name, tags=['success'])
         slack.send("{} finished".format(self.s3_key),
                    channel_name="sky-py")
     except Exception:
         statsd.increment(self.statsd_time_name, tags=['error'])
         slack.send("{} failed".format(self.s3_key), channel_name="sky-py")
         sentry.handleException(sys.exc_info())
Esempio n. 5
0
    async def stream_worker_task(self, worker_id: int) -> None:
        if self._redis_stream is None or self._redis_cache is None:
            raise RuntimeError("redis clients are not ready")

        # NOTE(sileht): This task must never fail, we don't want to write code to
        # reap/clean/respawn them
        stream_processor = StreamProcessor(self._redis_stream,
                                           self._redis_cache)
        stream_selector = StreamSelector(self._redis_stream, worker_id,
                                         self.worker_count)

        while not self._stopping.is_set():
            try:
                stream_name = await stream_selector.next_stream()
                if stream_name:
                    LOG.debug("worker %s take stream: %s", worker_id,
                              stream_name)
                    try:
                        with statsd.timed("engine.stream.consume.time"):
                            await stream_processor.consume(stream_name)
                    finally:
                        LOG.debug(
                            "worker %s release stream: %s",
                            worker_id,
                            stream_name,
                        )
                else:
                    LOG.debug("worker %s has nothing to do, sleeping a bit",
                              worker_id)
                    await self._sleep_or_stop()
            except asyncio.CancelledError:
                LOG.debug("worker %s killed", worker_id)
                return
            except aredis.exceptions.ConnectionError:
                statsd.increment("redis.client.connection.errors")
                LOG.warning("worker lost Redis connection",
                            worker_id,
                            exc_info=True)
                await self._sleep_or_stop()
            except Exception:
                LOG.error("worker %s fail, sleeping a bit",
                          worker_id,
                          exc_info=True)
                await self._sleep_or_stop()

        LOG.debug("worker %s exited", worker_id)
Esempio n. 6
0
    async def stream_worker_task(self, worker_id: int) -> None:
        # NOTE(sileht): This task must never fail, we don't want to write code to
        # reap/clean/respawn them
        stream_processor = StreamProcessor(self._redis)
        stream_selector = StreamSelector(self._redis, worker_id,
                                         self.worker_count)

        while not self._stopping.is_set():
            try:
                stream_name = await stream_selector.next_stream()
                if stream_name:
                    LOG.debug("worker %s take stream: %s", worker_id,
                              stream_name)
                    try:
                        with statsd.timed("engine.stream.consume.time"):
                            await stream_processor.consume(stream_name)
                    finally:
                        LOG.debug(
                            "worker %s release stream: %s",
                            worker_id,
                            stream_name,
                        )
                else:
                    LOG.debug("worker %s has nothing to do, sleeping a bit",
                              worker_id)
                    await self._sleep_or_stop()
            except asyncio.CancelledError:
                # NOTE(sileht): We don't wait for the thread and just return, the thread
                # will be killed when the program exits.
                LOG.debug("worker %s killed", worker_id)
                return
            except Exception:
                LOG.error("worker %s fail, sleeping a bit",
                          worker_id,
                          exc_info=True)
                await self._sleep_or_stop()

        stream_processor.close()
        LOG.debug("worker %s exited", worker_id)
Esempio n. 7
0
 async def request(self, method, url, *args, **kwargs):
     reply = None
     try:
         with statsd.timed("http.client.request.time",
                           tags=[f"hostname:{self.base_url.host}"]):
             reply = await super().request(method, url, *args, **kwargs)
     except http.HTTPClientSideError as e:
         if e.status_code == 403:
             _check_rate_limit(e.response)
         raise
     finally:
         if reply is None:
             status_code = "error"
         else:
             status_code = reply.status_code
         statsd.increment(
             "http.client.requests",
             tags=[
                 f"hostname:{self.base_url.host}",
                 f"status_code:{status_code}"
             ],
         )
         self._requests.append((method, url))
     return reply
from datadog import initialize, statsd
import time
import random

options = {
    'statsd_host': '127.0.0.1',
    'statsd_port': 8125
}

initialize(**options)

statsd.increment('example_metric.increment', tags=["environment:dev"])
statsd.decrement('example_metric.decrement', tags=["environment:dev"])
statsd.gauge('example_metric.gauge', 40, tags=["environment:dev"])
statsd.set('example_metric.set', 40, tags=["environment:dev"])
statsd.histogram('example_metric.histogram', random.randint(0, 20), tags=["environment:dev"])

with statsd.timed('example_metric.timer', tags=["environment:dev"]):
    # do something to be measured
    time.sleep(random.randint(0, 10))

statsd.distribution('example_metric.distribution', random.randint(0, 20), tags=["environment:dev"])
Esempio n. 9
0
 def timed(self, *args, **kwargs):
     ddstatsd.timed(*args, **kwargs)