Exemplo n.º 1
0
    def call_dispatch_function(wf_reporter, *args, **kwargs):

        METRICS_PREFIX = "dispatch.function.wf."
        # Register duration metrics
        dispatch_function_duration_gauge = reg.gauge(METRICS_PREFIX +
                                                     "duration")
        # Register invocations metrics
        dispatch_function_invocations_counter = delta.delta_counter(
            reg, METRICS_PREFIX + "invocations")
        dispatch_function_invocations_counter.inc()
        # Registry errors metrics
        dispatch_erros_count = delta.delta_counter(reg,
                                                   METRICS_PREFIX + "errors")
        time_start = datetime.now()
        try:
            response = func(*args, **kwargs)
            return response
        except:
            dispatch_erros_count.inc()
            raise
        finally:
            time_taken = datetime.now() - time_start
            dispatch_function_duration_gauge.set_value(
                time_taken.total_seconds() * 1000)
            wf_reporter.report_now(registry=reg)
Exemplo n.º 2
0
 def call_lambda_with_standard_metrics(wf_reporter, *args, **kwargs):
     METRIC_PREFIX = "aws.lambda.wf."
     # Register cold start counter
     aws_cold_starts_counter = delta.delta_counter(
         reg, METRIC_PREFIX + "coldstarts")
     global is_cold_start
     if is_cold_start:
         # Set cold start counter.
         aws_cold_starts_counter.inc()
         is_cold_start = False
     # Set invocations counter
     aws_lambda_invocations_counter = delta.delta_counter(
         reg, METRIC_PREFIX + "invocations")
     aws_lambda_invocations_counter.inc()
     # Register duration gauge.
     aws_lambda_duration_gauge = reg.gauge(METRIC_PREFIX + "duration")
     # Register error counter.
     aws_lambda_errors_counter = delta.delta_counter(
         reg, METRIC_PREFIX + "errors")
     time_start = datetime.now()
     try:
         result = func(*args, **kwargs)
         return result
     except:
         # Set error counter
         aws_lambda_errors_counter.inc()
         raise
     finally:
         time_taken = datetime.now() - time_start
         # Set duration gauge
         aws_lambda_duration_gauge.set_value(time_taken.total_seconds() *
                                             1000)
         wf_reporter.report_now(registry=reg)
    def test_delta_counter(self):
        reg = TaggedRegistry()
        counter = delta.delta_counter(reg, "foo")
        assert(isinstance(counter, delta.DeltaCounter))

        # test duplicate (should return previously registered counter)
        duplicate_counter = delta.delta_counter(reg, "foo")
        assert(counter == duplicate_counter)
        assert(delta.is_delta_counter(delta.DeltaCounter.DELTA_PREFIX + "foo", reg))

        different_counter = delta.delta_counter(reg, "foobar")
        assert(counter != different_counter)
    def test_delta_counter(self):
        """Test Delta Counter."""
        reg = tagged_registry.TaggedRegistry()
        counter = delta.delta_counter(reg, 'foo')
        assert (isinstance(counter, delta.DeltaCounter))

        # test duplicate (should return previously registered counter)
        duplicate_counter = delta.delta_counter(reg, 'foo')
        assert (counter == duplicate_counter)
        assert (delta.is_delta_counter(delta.DeltaCounter.DELTA_PREFIX + 'foo',
                                       reg))
        different_counter = delta.delta_counter(reg, 'foobar')
        assert (counter != different_counter)
Exemplo n.º 5
0
def report_metrics(host, server, token):
    reg = TaggedRegistry()

    wf_proxy_reporter = WavefrontProxyReporter(
        host=host,
        port=2878,
        registry=reg,
        source="wavefront-pyformance-example",
        tags={
            "key1": "val1",
            "key2": "val2"
        },
        prefix="python.proxy.")
    wf_direct_reporter = WavefrontDirectReporter(
        server=server,
        token=token,
        registry=reg,
        source="wavefront-pyformance-exmaple",
        tags={
            "key1": "val1",
            "key2": "val2"
        },
        prefix="python.direct.")

    # counter
    c1 = reg.counter("foo_count", tags={"counter_key": "counter_val"})
    c1.inc()

    # delta counter
    d1 = delta.delta_counter(reg,
                             "foo_delta_count",
                             tags={"delta_key": "delta_val"})
    d1.inc()
    d1.inc()

    # gauge
    g1 = reg.gauge("foo_gauge", tags={"gauge_key": "gauge_val"})
    g1.set_value(2)

    # meter
    m1 = reg.meter("foo_meter", tags={"meter_key": "meter_val"})
    m1.mark()

    # timer
    t1 = reg.timer("foo_timer", tags={"timer_key": "timer_val"})
    timer_ctx = t1.time()
    time.sleep(3)
    timer_ctx.stop()

    # histogram
    h1 = reg.histogram("foo_histogram", tags={"hist_key": "hist_val"})
    h1.add(1.0)
    h1.add(1.5)

    wf_direct_reporter.report_now()
    wf_direct_reporter.stop()
    wf_proxy_reporter.report_now()
    wf_proxy_reporter.stop()
    def set_metrics_reporter(self, wavefront_reporter):
        """Set Wavefront Reporter for internal metrics.

        :param wavefront_reporter: Wavefront Reporter
        """
        self._metrics_reporter = wavefront_reporter
        self._registry = wavefront_reporter.registry
        self._registry.gauge('version', self.CustomGauge(
                lambda: get_sem_ver('wavefront-opentracing-sdk-python')))
        self._registry.gauge("reporter.queue.size",
                             self.CustomGauge(self._span_buffer.qsize))
        self._registry.gauge("reporter.queue.remaining_capacity", self.
                             CustomGauge(self._get_span_buffer_remain_size))
        self.span_received = delta.delta_counter(self._registry,
                                                 "reporter.spans.received")
        self.spans_dropped = delta.delta_counter(self._registry,
                                                 "reporter.spans.dropped")
        self.report_errors = delta.delta_counter(self._registry,
                                                 "reporter.spans.errors")
Exemplo n.º 7
0
def report_metrics(host, server, token):
    """Metrics Reporting Function Example."""
    reg = tagged_registry.TaggedRegistry()

    wf_proxy_reporter = wavefront_reporter.WavefrontProxyReporter(
        host=host, port=2878, registry=reg,
        source='wavefront-pyformance-example',
        tags={'key1': 'val1', 'key2': 'val2'},
        prefix='python.proxy.').report_minute_distribution()
    wf_direct_reporter = wavefront_reporter.WavefrontDirectReporter(
        server=server, token=token, registry=reg,
        source='wavefront-pyformance-exmaple',
        tags={'key1': 'val1', 'key2': 'val2'},
        prefix='python.direct.').report_minute_distribution()

    # counter
    c_1 = reg.counter('foo_count', tags={'counter_key': 'counter_val'})
    c_1.inc()

    # delta counter
    d_1 = delta.delta_counter(reg, 'foo_delta_count',
                              tags={'delta_key': 'delta_val'})
    d_1.inc()
    d_1.inc()

    # gauge
    g_1 = reg.gauge('foo_gauge', tags={'gauge_key': 'gauge_val'})
    g_1.set_value(2)

    # meter
    m_1 = reg.meter('foo_meter', tags={'meter_key': 'meter_val'})
    m_1.mark()

    # timer
    t_1 = reg.timer('foo_timer', tags={'timer_key': 'timer_val'})
    timer_ctx = t_1.time()
    time.sleep(3)
    timer_ctx.stop()

    # histogram
    h_1 = reg.histogram('foo_histogram', tags={'hist_key': 'hist_val'})
    h_1.add(1.0)
    h_1.add(1.5)

    # Wavefront Histogram
    h_2 = wavefront_histogram.wavefront_histogram(reg, 'wf_histogram')
    h_2.add(1.0)
    h_2.add(2.0)

    wf_direct_reporter.report_now()
    wf_direct_reporter.stop()
    wf_proxy_reporter.report_now()
    wf_proxy_reporter.stop()
def lambda_handler(event, context):
    # Get registry to report metrics.
    registry = wavefront_lambda.get_registry()

    # Report Delta Counters
    delta_counter = delta.delta_counter(registry, "deltaCounter")
    delta_counter.inc()

    # Report Counters
    counter = registry.counter("counter")
    counter.inc()

    # Report Gauge
    gauge_value = registry.gauge("gaugeValue")
    gauge_value.set_value(5.5)
Exemplo n.º 9
0
    def __init__(self, tracer, operation_name, context, start_time, parents,
                 follows, tags):
        """Construct Wavefront Span.

        :param tracer: Tracer that create this span
        :type tracer: wavefront_opentracing_python_sdk.WavefrontTracer
        :param operation_name: Operation Name
        :type operation_name: str
        :param context: Span Context
        :type context: wavefront_opentracing_python_sdk.WavefrontSpanContext
        :param start_time: an explicit Span start time as a unix timestamp per
            :meth:`time.time()`
        :type start_time: float
        :param parents: List of UUIDs of parents span
        :type parents: list of uuid.UUID
        :param follows: List of UUIDs of follows span
        :type follows: list of uuid.UUID
        :param tags: Tags of the span
        :type tags: list of tuple
        """
        super().__init__(tracer=tracer, context=context)
        self._context = context
        self.operation_name = operation_name
        self.start_time = start_time
        self.duration_time = 0
        self.parents = parents
        self.follows = follows
        self._finished = False
        self._is_error = False
        self._force_sampling = None
        self.update_lock = threading.Lock()
        self.tags = []
        self.logs = []
        for tag in tags:
            if isinstance(tag, tuple):
                self.set_tag(tag[0], tag[1])
        if opentracing.ext.tags.COMPONENT not in dict(self.tags):
            self.set_tag(opentracing.ext.tags.COMPONENT, "none")
        self._spans_discarded = (None if tracer.wf_internal_reporter is None
                                 else delta.delta_counter(
                                     tracer.wf_internal_reporter.registry,
                                     "spans.discarded"))
Exemplo n.º 10
0
def report_metrics(proxy_reporter, direct_reporter):
    """Metrics Reporting Function Example."""
    # counter
    c_1 = reg.counter('foo_count', tags={'counter_key': 'counter_val'})
    c_1.inc()

    # delta counter
    d_1 = delta.delta_counter(reg,
                              'foo_delta_count',
                              tags={'delta_key': 'delta_val'})
    d_1.inc()
    d_1.inc()

    # gauge
    g_1 = reg.gauge('foo_gauge', tags={'gauge_key': 'gauge_val'})
    g_1.set_value(2)

    # meter
    m_1 = reg.meter('foo_meter', tags={'meter_key': 'meter_val'})
    m_1.mark()

    # timer
    t_1 = reg.timer('foo_timer', tags={'timer_key': 'timer_val'})
    timer_ctx = t_1.time()
    time.sleep(3)
    timer_ctx.stop()

    # histogram
    h_1 = reg.histogram('foo_histogram', tags={'hist_key': 'hist_val'})
    h_1.add(1.0)
    h_1.add(1.5)

    # Wavefront Histogram
    h_2 = wavefront_histogram.wavefront_histogram(reg, 'wf_histogram')
    h_2.add(1.0)
    h_2.add(2.0)

    direct_reporter.report_now()
    direct_reporter.stop()
    proxy_reporter.report_now()
    proxy_reporter.stop()
    def _after_request_fn(self, response=None, error=None):
        """Post-process request."""
        request = stack.top.request

        # the pop call can fail if the request is interrupted by a
        # `before_request` method so we need a default
        scope = self._current_scopes.pop(request, None)
        if scope is not None:
            if response is not None:
                scope.span.set_tag('http.status_code', response.status_code)
            if 400 <= response.status_code <= 599 or error is not None:
                scope.span.set_tag('error', 'true')
                scope.span.log_kv({
                    'event': tags.ERROR,
                    'error.object': error,
                })
            scope.close()

        operation_name = request.endpoint
        entity_name = (request.url_rule.rule or operation_name). \
            replace('-', '_').replace('/', '.').replace('{', '_'). \
            replace('}', '_').replace('<', '').replace('>', '').lstrip('.')

        self.update_gauge(registry=self.reg,
                          key=self.get_metric_name(entity_name, request) +
                          ".inflight",
                          tags=self.get_tags_map(func_name=operation_name),
                          val=-1)
        self.update_gauge(registry=self.reg,
                          key="total_requests.inflight",
                          tags=self.get_tags_map(cluster=self.CLUSTER,
                                                 service=self.SERVICE,
                                                 shard=self.SHARD),
                          val=-1)

        response_metric_key = self.get_metric_name(entity_name, request,
                                                   response)

        complete_tags_map = self.get_tags_map(cluster=self.CLUSTER,
                                              service=self.SERVICE,
                                              shard=self.SHARD,
                                              func_name=operation_name)

        aggregated_per_shard_map = self.get_tags_map(
            cluster=self.CLUSTER,
            service=self.SERVICE,
            shard=self.SHARD,
            func_name=operation_name,
            source=WAVEFRONT_PROVIDED_SOURCE)

        overall_aggregated_per_source_map = self.get_tags_map(
            cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD)

        overall_aggregated_per_shard_map = self.get_tags_map(
            cluster=self.CLUSTER,
            service=self.SERVICE,
            shard=self.SHARD,
            source=WAVEFRONT_PROVIDED_SOURCE)

        aggregated_per_service_map = self.get_tags_map(
            cluster=self.CLUSTER,
            service=self.SERVICE,
            func_name=operation_name,
            source=WAVEFRONT_PROVIDED_SOURCE)

        overall_aggregated_per_service_map = self.get_tags_map(
            cluster=self.CLUSTER,
            service=self.SERVICE,
            source=WAVEFRONT_PROVIDED_SOURCE)

        aggregated_per_cluster_map = self.get_tags_map(
            cluster=self.CLUSTER,
            func_name=operation_name,
            source=WAVEFRONT_PROVIDED_SOURCE)

        overall_aggregated_per_cluster_map = self.get_tags_map(
            cluster=self.CLUSTER, source=WAVEFRONT_PROVIDED_SOURCE)

        aggregated_per_application_map = self.get_tags_map(
            func_name=operation_name, source=WAVEFRONT_PROVIDED_SOURCE)

        overall_aggregated_per_application_map = self.get_tags_map(
            source=WAVEFRONT_PROVIDED_SOURCE)

        # flask.server.response.style._id_.make.GET.200.cumulative.count
        # flask.server.response.style._id_.make.GET.200.aggregated_per_shard.count
        # flask.server.response.style._id_.make.GET.200.aggregated_per_service.count
        # flask.server.response.style._id_.make.GET.200.aggregated_per_cluster.count
        # flask.server.response.style._id_.make.GET.200.aggregated_per_application.count
        # flask.server.response.style._id_.make.GET.errors
        self.reg.counter(response_metric_key + ".cumulative",
                         tags=complete_tags_map).inc()
        if self.application_tags.shard:
            delta_counter(self.reg,
                          response_metric_key + ".aggregated_per_shard",
                          tags=aggregated_per_shard_map).inc()
        delta_counter(self.reg,
                      response_metric_key + ".aggregated_per_service",
                      tags=aggregated_per_service_map).inc()
        if self.application_tags.cluster:
            delta_counter(self.reg,
                          response_metric_key + ".aggregated_per_cluster",
                          tags=aggregated_per_cluster_map).inc()
        delta_counter(self.reg,
                      response_metric_key + ".aggregated_per_application",
                      tags=aggregated_per_application_map).inc()

        # flask.server.response.errors.aggregated_per_source.count
        # flask.server.response.errors.aggregated_per_shard.count
        # flask.server.response.errors.aggregated_per_service.count
        # flask.server.response.errors.aggregated_per_cluster.count
        # flask.server.response.errors.aggregated_per_application.count
        if self.is_error_status_code(response):
            self.reg.counter(self.get_metric_name_without_status(
                entity_name, request),
                             tags=complete_tags_map).inc()
            self.reg.counter("response.errors", tags=complete_tags_map).inc()
            self.reg.counter("response.errors.aggregated_per_source",
                             tags=overall_aggregated_per_source_map).inc()
            if self.application_tags.shard:
                delta_counter(self.reg,
                              "response.errors.aggregated_per_shard",
                              tags=overall_aggregated_per_shard_map).inc()
            delta_counter(self.reg,
                          "response.errors.aggregated_per_service",
                          tags=overall_aggregated_per_service_map).inc()
            if self.application_tags.cluster:
                delta_counter(self.reg,
                              "response.errors.aggregated_per_cluster",
                              tags=overall_aggregated_per_cluster_map).inc()
            delta_counter(self.reg,
                          "response.errors.aggregated_per_application",
                          tags=overall_aggregated_per_application_map).inc()

        # flask.server.response.completed.aggregated_per_source.count
        # flask.server.response.completed.aggregated_per_shard.count
        # flask.server.response.completed.aggregated_per_service.count
        # flask.server.response.completed.aggregated_per_cluster.count
        # flask.server.response.completed.aggregated_per_application.count
        self.reg.counter("response.completed.aggregated_per_source",
                         tags=overall_aggregated_per_source_map).inc()
        if self.SHARD is not NULL_TAG_VAL:
            delta_counter(self.reg,
                          "response.completed.aggregated_per_shard",
                          tags=overall_aggregated_per_shard_map).inc()
            self.reg.counter("response.completed.aggregated_per_service",
                             tags=overall_aggregated_per_service_map).inc()
        if self.CLUSTER is not NULL_TAG_VAL:
            delta_counter(self.reg,
                          "response.completed.aggregated_per_cluster",
                          tags=overall_aggregated_per_cluster_map).inc()
            self.reg.counter(
                "response.completed.aggregated_per_application",
                tags=overall_aggregated_per_application_map).inc()

        # flask.server.response.style._id_.make.summary.GET.200.latency.m
        # flask.server.response.style._id_.make.summary.GET.200.cpu_ns.m
        # flask.server.response.style._id_.make.summary.GET.200.total_time.count
        wf_start_timestamp = request.environ.get('_wf_start_timestamp')
        wf_cpu_nanos = request.environ.get('_wf_cpu_nanos')
        if wf_start_timestamp:
            timestamp_duration = default_timer() - wf_start_timestamp
            wavefront_histogram(self.reg,
                                response_metric_key + ".latency",
                                tags=complete_tags_map).add(timestamp_duration)
            self.reg.counter(response_metric_key + ".total_time",
                             tags=complete_tags_map).inc(timestamp_duration)
        if wf_cpu_nanos:
            cpu_nanos_duration = time.clock() - wf_cpu_nanos
            wavefront_histogram(self.reg,
                                response_metric_key + ".cpu_ns",
                                tags=complete_tags_map).add(cpu_nanos_duration)
Exemplo n.º 12
0
    def report_wavefront_generated_data(self, span):
        """Report Wavefront generated data from spans."""
        if self.wf_derived_reporter is None:
            # WavefrontSpanReporter not set, so no tracing spans will be
            # reported as metrics/histograms.
            return
        # Need to sanitize metric name as application, service and operation
        # names can have spaces and other invalid metric name characters.
        point_tags = {}
        span_tags = span.get_tags_as_map()

        if self.red_metrics_custom_tag_keys:
            for key in self.red_metrics_custom_tag_keys:
                if key in span_tags:
                    point_tags.update({key: span_tags.get(key)[0]})
        # Set default value of span.kind tag.
        point_tags.setdefault(SPAN_KIND, constants.NULL_TAG_VAL)

        for key in self.single_valued_tag_keys:
            if key in span_tags:
                point_tags.update({key: span_tags.get(key)[0]})
        point_tags.update({COMPONENT: span_tags.get(COMPONENT)[0]})

        # Propagate http status if the span has error.
        if HTTP_STATUS_CODE in span_tags:
            point_tags.update(
                {HTTP_STATUS_CODE: span_tags.get(HTTP_STATUS_CODE)[0]})

        if self.heartbeater_service:
            self.heartbeater_service.report_custom_tags(point_tags)

        # Add operation tag after sending RED heartbeat.
        point_tags.update({self.OPERATION_NAME_TAG: span.get_operation_name()})
        application_service_prefix = (
            'tracing.derived.{}.{}.'.format(
                span_tags.get(constants.APPLICATION_TAG_KEY)[0],
                span_tags.get(constants.SERVICE_TAG_KEY)[0]))

        if span.is_error():
            delta.delta_counter(self.wf_derived_reporter.registry,
                                self.sanitize(application_service_prefix +
                                              span.get_operation_name() +
                                              self.ERROR_SUFFIX),
                                point_tags).inc()

        delta.delta_counter(self.wf_derived_reporter.registry,
                            self.sanitize(application_service_prefix +
                                          span.get_operation_name() +
                                          self.INVOCATION_SUFFIX),
                            point_tags).inc()
        # Convert from secs to millis and add to duration counter.
        span_duration_millis = span.get_duration_time() * 1000
        delta.delta_counter(self.wf_derived_reporter.registry,
                            self.sanitize(application_service_prefix +
                                          span.get_operation_name() +
                                          self.TOTAL_TIME_SUFFIX),
                            point_tags).inc(span_duration_millis)
        # Convert from millis to micros and add to histogram.
        span_duration_micros = span_duration_millis * 1000
        if span.is_error():
            error_point_tags = dict(point_tags)
            error_point_tags.update({'error': 'true'})
            wavefront_histogram.wavefront_histogram(
                self.wf_derived_reporter.registry,
                self.sanitize(application_service_prefix +
                              span.get_operation_name() +
                              self.DURATION_SUFFIX),
                error_point_tags).add(span_duration_micros)
        else:
            wavefront_histogram.wavefront_histogram(
                self.wf_derived_reporter.registry,
                self.sanitize(application_service_prefix +
                              span.get_operation_name() +
                              self.DURATION_SUFFIX),
                point_tags).add(span_duration_micros)
Exemplo n.º 13
0
def inc_counter(mname):
    try:
        delt = delta.delta_counter(reg, mname)
        delt.inc()
    except Exception:
        raise
    def process_response(self, request, response):
        """
        Process the response before Django calls.

        :param request: incoming HTTP request.
        :param response: outgoing response.
        """
        if not self.MIDDLEWARE_ENABLED:
            return response
        entity_name = self.get_entity_name(request)
        func_name = resolve(request.path_info).func.__name__
        module_name = resolve(request.path_info).func.__module__

        if self.tracing:
            self.tracing._finish_tracing(request, response=response)

        self.update_gauge(registry=self.reg,
                          key=self.get_metric_name(entity_name, request) +
                          ".inflight",
                          tags=self.get_tags_map(module_name=module_name,
                                                 func_name=func_name),
                          val=-1)
        self.update_gauge(registry=self.reg,
                          key="total_requests.inflight",
                          tags=self.get_tags_map(cluster=self.CLUSTER,
                                                 service=self.SERVICE,
                                                 shard=self.SHARD),
                          val=-1)

        response_metric_key = self.get_metric_name(entity_name, request,
                                                   response)

        complete_tags_map = self.get_tags_map(cluster=self.CLUSTER,
                                              service=self.SERVICE,
                                              shard=self.SHARD,
                                              module_name=module_name,
                                              func_name=func_name)

        aggregated_per_shard_map = self.get_tags_map(
            cluster=self.CLUSTER,
            service=self.SERVICE,
            shard=self.SHARD,
            module_name=module_name,
            func_name=func_name,
            source=WAVEFRONT_PROVIDED_SOURCE)

        overall_aggregated_per_source_map = self.get_tags_map(
            cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD)

        overall_aggregated_per_shard_map = self.get_tags_map(
            cluster=self.CLUSTER,
            service=self.SERVICE,
            shard=self.SHARD,
            source=WAVEFRONT_PROVIDED_SOURCE)

        aggregated_per_service_map = self.get_tags_map(
            cluster=self.CLUSTER,
            service=self.SERVICE,
            module_name=module_name,
            func_name=func_name,
            source=WAVEFRONT_PROVIDED_SOURCE)

        overall_aggregated_per_service_map = self.get_tags_map(
            cluster=self.CLUSTER,
            service=self.SERVICE,
            source=WAVEFRONT_PROVIDED_SOURCE)

        aggregated_per_cluster_map = self.get_tags_map(
            cluster=self.CLUSTER,
            module_name=module_name,
            func_name=func_name,
            source=WAVEFRONT_PROVIDED_SOURCE)

        overall_aggregated_per_cluster_map = self.get_tags_map(
            cluster=self.CLUSTER, source=WAVEFRONT_PROVIDED_SOURCE)

        aggregated_per_application_map = self.get_tags_map(
            module_name=module_name,
            func_name=func_name,
            source=WAVEFRONT_PROVIDED_SOURCE)

        overall_aggregated_per_application_map = self.get_tags_map(
            source=WAVEFRONT_PROVIDED_SOURCE)

        # django.server.response.style._id_.make.GET.200.cumulative.count
        # django.server.response.style._id_.make.GET.200.aggregated_per_shard.count
        # django.server.response.style._id_.make.GET.200.aggregated_per_service.count
        # django.server.response.style._id_.make.GET.200.aggregated_per_cluster.count
        # django.server.response.style._id_.make.GET.200.aggregated_per_application.count
        # django.server.response.style._id_.make.GET.errors
        self.reg.counter(response_metric_key + ".cumulative",
                         tags=complete_tags_map).inc()
        if self.application_tags.shard:
            delta_counter(self.reg,
                          response_metric_key + ".aggregated_per_shard",
                          tags=aggregated_per_shard_map).inc()
        delta_counter(self.reg,
                      response_metric_key + ".aggregated_per_service",
                      tags=aggregated_per_service_map).inc()
        if self.application_tags.cluster:
            delta_counter(self.reg,
                          response_metric_key + ".aggregated_per_cluster",
                          tags=aggregated_per_cluster_map).inc()
        delta_counter(self.reg,
                      response_metric_key + ".aggregated_per_application",
                      tags=aggregated_per_application_map).inc()

        # django.server.response.errors.aggregated_per_source.count
        # django.server.response.errors.aggregated_per_shard.count
        # django.server.response.errors.aggregated_per_service.count
        # django.server.response.errors.aggregated_per_cluster.count
        # django.server.response.errors.aggregated_per_application.count
        if self.is_error_status_code(response):
            self.reg.counter(self.get_metric_name_without_status(
                entity_name, request),
                             tags=complete_tags_map).inc()
            self.reg.counter("response.errors", tags=complete_tags_map).inc()
            self.reg.counter("response.errors.aggregated_per_source",
                             tags=overall_aggregated_per_source_map).inc()
            if self.application_tags.shard:
                delta_counter(self.reg,
                              "response.errors.aggregated_per_shard",
                              tags=overall_aggregated_per_shard_map).inc()
            delta_counter(self.reg,
                          "response.errors.aggregated_per_service",
                          tags=overall_aggregated_per_service_map).inc()
            if self.application_tags.cluster:
                delta_counter(self.reg,
                              "response.errors.aggregated_per_cluster",
                              tags=overall_aggregated_per_cluster_map).inc()
            delta_counter(self.reg,
                          "response.errors.aggregated_per_application",
                          tags=overall_aggregated_per_application_map).inc()

        # django.server.response.completed.aggregated_per_source.count
        # django.server.response.completed.aggregated_per_shard.count
        # django.server.response.completed.aggregated_per_service.count
        # django.server.response.completed.aggregated_per_cluster.count
        # django.server.response.completed.aggregated_per_application.count
        self.reg.counter("response.completed.aggregated_per_source",
                         tags=overall_aggregated_per_source_map).inc()
        if self.SHARD is not NULL_TAG_VAL:
            delta_counter(self.reg,
                          "response.completed.aggregated_per_shard",
                          tags=overall_aggregated_per_shard_map).inc()
            self.reg.counter("response.completed.aggregated_per_service",
                             tags=overall_aggregated_per_service_map).inc()
        if self.CLUSTER is not NULL_TAG_VAL:
            delta_counter(self.reg,
                          "response.completed.aggregated_per_cluster",
                          tags=overall_aggregated_per_cluster_map).inc()
            self.reg.counter(
                "response.completed.aggregated_per_application",
                tags=overall_aggregated_per_application_map).inc()

        # django.server.response.style._id_.make.summary.GET.200.latency.m
        # django.server.response.style._id_.make.summary.GET.200.cpu_ns.m
        # django.server.response.style._id_.make.summary.GET.200.total_time.count
        if hasattr(request, 'wf_start_timestamp'):
            timestamp_duration = default_timer() - request.wf_start_timestamp
            cpu_nanos_duration = time.clock() - request.wf_cpu_nanos
            wavefront_histogram(self.reg,
                                response_metric_key + ".latency",
                                tags=complete_tags_map).add(timestamp_duration)
            wavefront_histogram(self.reg,
                                response_metric_key + ".cpu_ns",
                                tags=complete_tags_map).add(cpu_nanos_duration)
            self.reg.counter(response_metric_key + ".total_time",
                             tags=complete_tags_map).inc(timestamp_duration)
        return response