Example #1
0
 def _get_send_response_timer(self, tag):
     if tag not in self._send_response_timers:
         if self._metrics_registry:
             self._send_response_timers[tag] = TimerWrapper(
                 self._metrics_registry.timer(
                     'interconnect_send_response_time',
                     tags=['message_type={}'.format(tag)]))
         else:
             self._send_response_timers[tag] = TimerWrapper()
     return self._send_response_timers[tag]
Example #2
0
 def _get_dispatch_timer(self, tag):
     if tag not in self._dispatch_timers:
         if self._metrics_registry:
             self._dispatch_timers[tag] = TimerWrapper(
                 self._metrics_registry.timer(
                     'dispatch_execution_time',
                     tags=['handler={}'.format(tag)]))
         else:
             self._dispatch_timers[tag] = TimerWrapper()
     return self._dispatch_timers[tag]
Example #3
0
    def __init__(self, max_workers=None, name='',
                 trace=None, metrics_registry=None):
        if trace is None:
            self._trace = 'SAWTOOTH_TRACE_LOGGING' in os.environ
        else:
            self._trace = trace

        self._name = name
        if name == '':
            self._name = 'Instrumented'

        LOGGER.debug('Creating thread pool executor %s', self._name)

        self._workers_in_use = atomic.Counter()

        self._max_workers = max_workers
        if self._max_workers is None:
            # This is the same default as ThreadPoolExecutor, but we want to
            # know how many workers there are for logging
            self._max_workers = multiprocessing.cpu_count() * 5
        super().__init__(max_workers)

        if metrics_registry:
            # Tracks how many workers are already in use
            self._workers_already_in_use_gauge = GaugeWrapper(
                metrics_registry.gauge(
                    '{}-threadpool.workers_already_in_use'.format(self._name)))
            # Tracks how long tasks take to run
            self._task_run_timer = TimerWrapper(
                metrics_registry.timer(
                    '{}-threadpool.task_run_time'.format(self._name)))
            # Tracks how long tasks wait in the queue
            self._task_time_in_queue_timer = TimerWrapper(
                metrics_registry.timer(
                    '{}-threadpool.task_time_in_queue'.format(self._name)))
        else:
            self._workers_already_in_use_gauge = GaugeWrapper()
            self._task_run_timer = TimerWrapper()
            self._task_time_in_queue_timer = TimerWrapper()
Example #4
0
    def __init__(self,
                 max_workers=None,
                 name='',
                 trace=None,
                 metrics_registry=None):
        if trace is None:
            self._trace = 'SAWTOOTH_TRACE_LOGGING' in os.environ
        else:
            self._trace = trace

        self._name = name
        if name == '':
            self._name = 'Instrumented'

        LOGGER.debug('Creating thread pool executor %s', self._name)

        self._workers_in_use = atomic.Counter()

        self._max_workers = max_workers
        if self._max_workers is None:
            # This is the same default as ThreadPoolExecutor, but we want to
            # know how many workers there are for logging
            self._max_workers = multiprocessing.cpu_count() * 5
        super().__init__(max_workers)

        if metrics_registry:
            # Tracks how many workers are already in use
            self._workers_already_in_use_gauge = GaugeWrapper(
                metrics_registry.gauge(
                    '{}-threadpool.workers_already_in_use'.format(self._name)))
            # Tracks how long tasks take to run
            self._task_run_timer = TimerWrapper(
                metrics_registry.timer('{}-threadpool.task_run_time'.format(
                    self._name)))
            # Tracks how long tasks wait in the queue
            self._task_time_in_queue_timer = TimerWrapper(
                metrics_registry.timer(
                    '{}-threadpool.task_time_in_queue'.format(self._name)))
        else:
            self._workers_already_in_use_gauge = GaugeWrapper()
            self._task_run_timer = TimerWrapper()
            self._task_time_in_queue_timer = TimerWrapper()
Example #5
0
    def __init__(self, max_workers=None, name='', trace=None,metrics_registry=None):
        if trace is None:
            self._trace = 'BGX_TRACE_LOGGING' in os.environ
        else:
            self._trace = trace

        self._name = name
        if name == '':
            self._name = 'Instrumented'

        LOGGER.debug('Creating thread pool executor %s', self._name)

        self._old_workers_in_use = atomic.Counter()

        self._max_workers = max_workers
        if self._max_workers is None:
            # This is the same default as ThreadPoolExecutor, but we want to
            # know how many workers there are for logging
            self._max_workers = multiprocessing.cpu_count() * 5
        super().__init__(max_workers)
        if metrics_registry:
            self._task_time_in_queue_timer = TimerWrapper(metrics_registry.timer('threadpool.InstrumentedThreadPoolExecutor.task_time_in_queue', tags=['name={}'.format(self._name)])) 
            self._task_run_timer = TimerWrapper(metrics_registry.timer('threadpool.InstrumentedThreadPoolExecutor.task_run_time', tags=['name={}'.format(self._name)])) 
            self._workers_in_use = TimerWrapper(metrics_registry.timer('threadpool.InstrumentedThreadPoolExecutor.workers_in_use', tags=['name={}'.format(self._name)])) 
        else:
            self._task_time_in_queue_timer = TimerWrapper()
            self._task_run_timer = TimerWrapper()
            self._workers_in_use = TimerWrapper()
Example #6
0
class InstrumentedThreadPoolExecutor(ThreadPoolExecutor):
    def __init__(self, max_workers=None, name='', trace=None,metrics_registry=None):
        if trace is None:
            self._trace = 'BGX_TRACE_LOGGING' in os.environ
        else:
            self._trace = trace

        self._name = name
        if name == '':
            self._name = 'Instrumented'

        LOGGER.debug('Creating thread pool executor %s', self._name)

        self._old_workers_in_use = atomic.Counter()

        self._max_workers = max_workers
        if self._max_workers is None:
            # This is the same default as ThreadPoolExecutor, but we want to
            # know how many workers there are for logging
            self._max_workers = multiprocessing.cpu_count() * 5
        super().__init__(max_workers)
        if metrics_registry:
            self._task_time_in_queue_timer = TimerWrapper(metrics_registry.timer('threadpool.InstrumentedThreadPoolExecutor.task_time_in_queue', tags=['name={}'.format(self._name)])) 
            self._task_run_timer = TimerWrapper(metrics_registry.timer('threadpool.InstrumentedThreadPoolExecutor.task_run_time', tags=['name={}'.format(self._name)])) 
            self._workers_in_use = TimerWrapper(metrics_registry.timer('threadpool.InstrumentedThreadPoolExecutor.workers_in_use', tags=['name={}'.format(self._name)])) 
        else:
            self._task_time_in_queue_timer = TimerWrapper()
            self._task_run_timer = TimerWrapper()
            self._workers_in_use = TimerWrapper()

    def submit(self, fn, *args, **kwargs):
        submitted_time = time.time()
        time_in_queue_ctx = self._task_time_in_queue_timer.time()
        
        try:
            task_name = fn.__qualname__
        except AttributeError:
            task_name = str(fn)

        if self._trace:
            task_details = '{}[{},{}]'.format(fn, args, kwargs)
        else:
            task_details = task_name

        def wrapper():
            time_in_queue_ctx.stop()
            start_time = time.time()
            time_in_use = self._workers_in_use.time()
            workers_already_in_use = self._old_workers_in_use.get_and_inc()
            time_in_queue = (start_time - submitted_time) * 1000.0

            if self._trace:
                LOGGER.debug('(%s) Task \'%s\' in queue for %0.3f ms',
                    self._name,
                    task_name,
                    time_in_queue)
                LOGGER.debug('(%s) Workers already in use %s/%s',
                    self._name,
                    workers_already_in_use,
                    self._max_workers)
                LOGGER.debug('(%s) Executing task %s', self._name, task_details)

            with self._task_run_timer.time():
                return_value = None
                try:
                    return_value = fn(*args, **kwargs)
                # pylint: disable=broad-except
                except Exception:
                    LOGGER.exception(
                        '(%s) Unhandled exception during execution of task %s',
                        self._name,
                        task_details)

                time_in_use.stop()
                end_time = time.time()
                run_time = (end_time - start_time) * 1000.0
                self._old_workers_in_use.dec()
                
                if self._trace:
                    LOGGER.debug(
                        '(%s) Finished task %s', self._name, task_details)

                    LOGGER.debug(
                        '(%s) Task \'%s\' took %0.3f ms',
                        self._name,
                        task_name,
                        run_time)

                return return_value

        return super().submit(wrapper)
Example #7
0
class InstrumentedThreadPoolExecutor(ThreadPoolExecutor):
    def __init__(self,
                 max_workers=None,
                 name='',
                 trace=None,
                 metrics_registry=None):
        if trace is None:
            self._trace = 'SAWTOOTH_TRACE_LOGGING' in os.environ
        else:
            self._trace = trace

        self._name = name
        if name == '':
            self._name = 'Instrumented'

        LOGGER.debug('Creating thread pool executor %s', self._name)

        self._workers_in_use = atomic.Counter()

        self._max_workers = max_workers
        if self._max_workers is None:
            # This is the same default as ThreadPoolExecutor, but we want to
            # know how many workers there are for logging
            self._max_workers = multiprocessing.cpu_count() * 5
        super().__init__(max_workers)

        if metrics_registry:
            # Tracks how many workers are already in use
            self._workers_already_in_use_gauge = GaugeWrapper(
                metrics_registry.gauge(
                    '{}-threadpool.workers_already_in_use'.format(self._name)))
            # Tracks how long tasks take to run
            self._task_run_timer = TimerWrapper(
                metrics_registry.timer('{}-threadpool.task_run_time'.format(
                    self._name)))
            # Tracks how long tasks wait in the queue
            self._task_time_in_queue_timer = TimerWrapper(
                metrics_registry.timer(
                    '{}-threadpool.task_time_in_queue'.format(self._name)))
        else:
            self._workers_already_in_use_gauge = GaugeWrapper()
            self._task_run_timer = TimerWrapper()
            self._task_time_in_queue_timer = TimerWrapper()

    def submit(self, fn, *args, **kwargs):
        time_in_queue_ctx = self._task_time_in_queue_timer.time()

        try:
            task_name = fn.__qualname__
        except AttributeError:
            task_name = str(fn)

        if self._trace:
            task_details = '{}[{},{}]'.format(fn, args, kwargs)
        else:
            task_details = task_name

        def wrapper():
            time_in_queue_ctx.stop()

            self._workers_already_in_use_gauge.set_value(
                self._workers_in_use.get_and_inc())

            if self._trace:
                LOGGER.debug('(%s) Executing task %s', self._name,
                             task_details)

            with self._task_run_timer.time():
                return_value = None
                try:
                    return_value = fn(*args, **kwargs)
                # pylint: disable=broad-except
                except Exception:
                    LOGGER.exception(
                        '(%s) Unhandled exception during execution of task %s',
                        self._name, task_details)

                self._workers_in_use.dec()

                if self._trace:
                    LOGGER.debug('(%s) Finished task %s', self._name,
                                 task_details)

                return return_value

        return super().submit(wrapper)
Example #8
0
class InstrumentedThreadPoolExecutor(ThreadPoolExecutor):
    def __init__(self, max_workers=None, name='',
                 trace=None, metrics_registry=None):
        if trace is None:
            self._trace = 'SAWTOOTH_TRACE_LOGGING' in os.environ
        else:
            self._trace = trace

        self._name = name
        if name == '':
            self._name = 'Instrumented'

        LOGGER.debug('Creating thread pool executor %s', self._name)

        self._workers_in_use = atomic.Counter()

        self._max_workers = max_workers
        if self._max_workers is None:
            # This is the same default as ThreadPoolExecutor, but we want to
            # know how many workers there are for logging
            self._max_workers = multiprocessing.cpu_count() * 5
        super().__init__(max_workers)

        if metrics_registry:
            # Tracks how many workers are already in use
            self._workers_already_in_use_gauge = GaugeWrapper(
                metrics_registry.gauge(
                    '{}-threadpool.workers_already_in_use'.format(self._name)))
            # Tracks how long tasks take to run
            self._task_run_timer = TimerWrapper(
                metrics_registry.timer(
                    '{}-threadpool.task_run_time'.format(self._name)))
            # Tracks how long tasks wait in the queue
            self._task_time_in_queue_timer = TimerWrapper(
                metrics_registry.timer(
                    '{}-threadpool.task_time_in_queue'.format(self._name)))
        else:
            self._workers_already_in_use_gauge = GaugeWrapper()
            self._task_run_timer = TimerWrapper()
            self._task_time_in_queue_timer = TimerWrapper()

    def submit(self, fn, *args, **kwargs):
        time_in_queue_ctx = self._task_time_in_queue_timer.time()

        try:
            task_name = fn.__qualname__
        except AttributeError:
            task_name = str(fn)

        if self._trace:
            task_details = '{}[{},{}]'.format(fn, args, kwargs)
        else:
            task_details = task_name

        def wrapper():
            time_in_queue_ctx.stop()

            self._workers_already_in_use_gauge.set_value(
                self._workers_in_use.get_and_inc())

            if self._trace:
                LOGGER.debug(
                    '(%s) Executing task %s', self._name, task_details)

            with self._task_run_timer.time():
                return_value = None
                try:
                    return_value = fn(*args, **kwargs)
                # pylint: disable=broad-except
                except Exception:
                    LOGGER.exception(
                        '(%s) Unhandled exception during execution of task %s',
                        self._name,
                        task_details)

                self._workers_in_use.dec()

                if self._trace:
                    LOGGER.debug(
                        '(%s) Finished task %s', self._name, task_details)

                return return_value

        return super().submit(wrapper)