def test_wavefront_histogram(self): """Test Wavefront Histogram.""" reg = tagged_registry.TaggedRegistry() _ = reg.histogram('pyformance_hist').add(1.0) wavefront_hist = wavefront_histogram.wavefront_histogram( reg, 'wavefront_hist').add(2.0) assert (isinstance(wavefront_hist, wavefront_histogram.WavefrontHistogram))
def report_metrics(host, server, token): """Metrics Reporting Function Example.""" reg = tagged_registry.TaggedRegistry() wf_proxy_reporter = wavefront_reporter.WavefrontProxyReporter( host=host, port=2878, registry=reg, source='wavefront-pyformance-example', tags={'key1': 'val1', 'key2': 'val2'}, prefix='python.proxy.').report_minute_distribution() wf_direct_reporter = wavefront_reporter.WavefrontDirectReporter( server=server, token=token, registry=reg, source='wavefront-pyformance-exmaple', tags={'key1': 'val1', 'key2': 'val2'}, prefix='python.direct.').report_minute_distribution() # counter c_1 = reg.counter('foo_count', tags={'counter_key': 'counter_val'}) c_1.inc() # delta counter d_1 = delta.delta_counter(reg, 'foo_delta_count', tags={'delta_key': 'delta_val'}) d_1.inc() d_1.inc() # gauge g_1 = reg.gauge('foo_gauge', tags={'gauge_key': 'gauge_val'}) g_1.set_value(2) # meter m_1 = reg.meter('foo_meter', tags={'meter_key': 'meter_val'}) m_1.mark() # timer t_1 = reg.timer('foo_timer', tags={'timer_key': 'timer_val'}) timer_ctx = t_1.time() time.sleep(3) timer_ctx.stop() # histogram h_1 = reg.histogram('foo_histogram', tags={'hist_key': 'hist_val'}) h_1.add(1.0) h_1.add(1.5) # Wavefront Histogram h_2 = wavefront_histogram.wavefront_histogram(reg, 'wf_histogram') h_2.add(1.0) h_2.add(2.0) wf_direct_reporter.report_now() wf_direct_reporter.stop() wf_proxy_reporter.report_now() wf_proxy_reporter.stop()
def report_metrics(proxy_reporter, direct_reporter): """Metrics Reporting Function Example.""" # counter c_1 = reg.counter('foo_count', tags={'counter_key': 'counter_val'}) c_1.inc() # delta counter d_1 = delta.delta_counter(reg, 'foo_delta_count', tags={'delta_key': 'delta_val'}) d_1.inc() d_1.inc() # gauge g_1 = reg.gauge('foo_gauge', tags={'gauge_key': 'gauge_val'}) g_1.set_value(2) # meter m_1 = reg.meter('foo_meter', tags={'meter_key': 'meter_val'}) m_1.mark() # timer t_1 = reg.timer('foo_timer', tags={'timer_key': 'timer_val'}) timer_ctx = t_1.time() time.sleep(3) timer_ctx.stop() # histogram h_1 = reg.histogram('foo_histogram', tags={'hist_key': 'hist_val'}) h_1.add(1.0) h_1.add(1.5) # Wavefront Histogram h_2 = wavefront_histogram.wavefront_histogram(reg, 'wf_histogram') h_2.add(1.0) h_2.add(2.0) direct_reporter.report_now() direct_reporter.stop() proxy_reporter.report_now() proxy_reporter.stop()
def _after_request_fn(self, response=None, error=None): """Post-process request.""" request = stack.top.request # the pop call can fail if the request is interrupted by a # `before_request` method so we need a default scope = self._current_scopes.pop(request, None) if scope is not None: if response is not None: scope.span.set_tag('http.status_code', response.status_code) if 400 <= response.status_code <= 599 or error is not None: scope.span.set_tag('error', 'true') scope.span.log_kv({ 'event': tags.ERROR, 'error.object': error, }) scope.close() operation_name = request.endpoint entity_name = (request.url_rule.rule or operation_name). \ replace('-', '_').replace('/', '.').replace('{', '_'). \ replace('}', '_').replace('<', '').replace('>', '').lstrip('.') self.update_gauge(registry=self.reg, key=self.get_metric_name(entity_name, request) + ".inflight", tags=self.get_tags_map(func_name=operation_name), val=-1) self.update_gauge(registry=self.reg, key="total_requests.inflight", tags=self.get_tags_map(cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD), val=-1) response_metric_key = self.get_metric_name(entity_name, request, response) complete_tags_map = self.get_tags_map(cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD, func_name=operation_name) aggregated_per_shard_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD, func_name=operation_name, source=WAVEFRONT_PROVIDED_SOURCE) overall_aggregated_per_source_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD) overall_aggregated_per_shard_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD, source=WAVEFRONT_PROVIDED_SOURCE) aggregated_per_service_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, func_name=operation_name, source=WAVEFRONT_PROVIDED_SOURCE) overall_aggregated_per_service_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, source=WAVEFRONT_PROVIDED_SOURCE) aggregated_per_cluster_map = self.get_tags_map( cluster=self.CLUSTER, func_name=operation_name, source=WAVEFRONT_PROVIDED_SOURCE) overall_aggregated_per_cluster_map = self.get_tags_map( cluster=self.CLUSTER, source=WAVEFRONT_PROVIDED_SOURCE) aggregated_per_application_map = self.get_tags_map( func_name=operation_name, source=WAVEFRONT_PROVIDED_SOURCE) overall_aggregated_per_application_map = self.get_tags_map( source=WAVEFRONT_PROVIDED_SOURCE) # flask.server.response.style._id_.make.GET.200.cumulative.count # flask.server.response.style._id_.make.GET.200.aggregated_per_shard.count # flask.server.response.style._id_.make.GET.200.aggregated_per_service.count # flask.server.response.style._id_.make.GET.200.aggregated_per_cluster.count # flask.server.response.style._id_.make.GET.200.aggregated_per_application.count # flask.server.response.style._id_.make.GET.errors self.reg.counter(response_metric_key + ".cumulative", tags=complete_tags_map).inc() if self.application_tags.shard: delta_counter(self.reg, response_metric_key + ".aggregated_per_shard", tags=aggregated_per_shard_map).inc() delta_counter(self.reg, response_metric_key + ".aggregated_per_service", tags=aggregated_per_service_map).inc() if self.application_tags.cluster: delta_counter(self.reg, response_metric_key + ".aggregated_per_cluster", tags=aggregated_per_cluster_map).inc() delta_counter(self.reg, response_metric_key + ".aggregated_per_application", tags=aggregated_per_application_map).inc() # flask.server.response.errors.aggregated_per_source.count # flask.server.response.errors.aggregated_per_shard.count # flask.server.response.errors.aggregated_per_service.count # flask.server.response.errors.aggregated_per_cluster.count # flask.server.response.errors.aggregated_per_application.count if self.is_error_status_code(response): self.reg.counter(self.get_metric_name_without_status( entity_name, request), tags=complete_tags_map).inc() self.reg.counter("response.errors", tags=complete_tags_map).inc() self.reg.counter("response.errors.aggregated_per_source", tags=overall_aggregated_per_source_map).inc() if self.application_tags.shard: delta_counter(self.reg, "response.errors.aggregated_per_shard", tags=overall_aggregated_per_shard_map).inc() delta_counter(self.reg, "response.errors.aggregated_per_service", tags=overall_aggregated_per_service_map).inc() if self.application_tags.cluster: delta_counter(self.reg, "response.errors.aggregated_per_cluster", tags=overall_aggregated_per_cluster_map).inc() delta_counter(self.reg, "response.errors.aggregated_per_application", tags=overall_aggregated_per_application_map).inc() # flask.server.response.completed.aggregated_per_source.count # flask.server.response.completed.aggregated_per_shard.count # flask.server.response.completed.aggregated_per_service.count # flask.server.response.completed.aggregated_per_cluster.count # flask.server.response.completed.aggregated_per_application.count self.reg.counter("response.completed.aggregated_per_source", tags=overall_aggregated_per_source_map).inc() if self.SHARD is not NULL_TAG_VAL: delta_counter(self.reg, "response.completed.aggregated_per_shard", tags=overall_aggregated_per_shard_map).inc() self.reg.counter("response.completed.aggregated_per_service", tags=overall_aggregated_per_service_map).inc() if self.CLUSTER is not NULL_TAG_VAL: delta_counter(self.reg, "response.completed.aggregated_per_cluster", tags=overall_aggregated_per_cluster_map).inc() self.reg.counter( "response.completed.aggregated_per_application", tags=overall_aggregated_per_application_map).inc() # flask.server.response.style._id_.make.summary.GET.200.latency.m # flask.server.response.style._id_.make.summary.GET.200.cpu_ns.m # flask.server.response.style._id_.make.summary.GET.200.total_time.count wf_start_timestamp = request.environ.get('_wf_start_timestamp') wf_cpu_nanos = request.environ.get('_wf_cpu_nanos') if wf_start_timestamp: timestamp_duration = default_timer() - wf_start_timestamp wavefront_histogram(self.reg, response_metric_key + ".latency", tags=complete_tags_map).add(timestamp_duration) self.reg.counter(response_metric_key + ".total_time", tags=complete_tags_map).inc(timestamp_duration) if wf_cpu_nanos: cpu_nanos_duration = time.clock() - wf_cpu_nanos wavefront_histogram(self.reg, response_metric_key + ".cpu_ns", tags=complete_tags_map).add(cpu_nanos_duration)
def report_wavefront_generated_data(self, span): """Report Wavefront generated data from spans.""" if self.wf_derived_reporter is None: # WavefrontSpanReporter not set, so no tracing spans will be # reported as metrics/histograms. return # Need to sanitize metric name as application, service and operation # names can have spaces and other invalid metric name characters. point_tags = {} span_tags = span.get_tags_as_map() if self.red_metrics_custom_tag_keys: for key in self.red_metrics_custom_tag_keys: if key in span_tags: point_tags.update({key: span_tags.get(key)[0]}) # Set default value of span.kind tag. point_tags.setdefault(SPAN_KIND, constants.NULL_TAG_VAL) for key in self.single_valued_tag_keys: if key in span_tags: point_tags.update({key: span_tags.get(key)[0]}) point_tags.update({COMPONENT: span_tags.get(COMPONENT)[0]}) # Propagate http status if the span has error. if HTTP_STATUS_CODE in span_tags: point_tags.update( {HTTP_STATUS_CODE: span_tags.get(HTTP_STATUS_CODE)[0]}) if self.heartbeater_service: self.heartbeater_service.report_custom_tags(point_tags) # Add operation tag after sending RED heartbeat. point_tags.update({self.OPERATION_NAME_TAG: span.get_operation_name()}) application_service_prefix = ( 'tracing.derived.{}.{}.'.format( span_tags.get(constants.APPLICATION_TAG_KEY)[0], span_tags.get(constants.SERVICE_TAG_KEY)[0])) if span.is_error(): delta.delta_counter(self.wf_derived_reporter.registry, self.sanitize(application_service_prefix + span.get_operation_name() + self.ERROR_SUFFIX), point_tags).inc() delta.delta_counter(self.wf_derived_reporter.registry, self.sanitize(application_service_prefix + span.get_operation_name() + self.INVOCATION_SUFFIX), point_tags).inc() # Convert from secs to millis and add to duration counter. span_duration_millis = span.get_duration_time() * 1000 delta.delta_counter(self.wf_derived_reporter.registry, self.sanitize(application_service_prefix + span.get_operation_name() + self.TOTAL_TIME_SUFFIX), point_tags).inc(span_duration_millis) # Convert from millis to micros and add to histogram. span_duration_micros = span_duration_millis * 1000 if span.is_error(): error_point_tags = dict(point_tags) error_point_tags.update({'error': 'true'}) wavefront_histogram.wavefront_histogram( self.wf_derived_reporter.registry, self.sanitize(application_service_prefix + span.get_operation_name() + self.DURATION_SUFFIX), error_point_tags).add(span_duration_micros) else: wavefront_histogram.wavefront_histogram( self.wf_derived_reporter.registry, self.sanitize(application_service_prefix + span.get_operation_name() + self.DURATION_SUFFIX), point_tags).add(span_duration_micros)
def process_response(self, request, response): """ Process the response before Django calls. :param request: incoming HTTP request. :param response: outgoing response. """ if not self.MIDDLEWARE_ENABLED: return response entity_name = self.get_entity_name(request) func_name = resolve(request.path_info).func.__name__ module_name = resolve(request.path_info).func.__module__ if self.tracing: self.tracing._finish_tracing(request, response=response) self.update_gauge(registry=self.reg, key=self.get_metric_name(entity_name, request) + ".inflight", tags=self.get_tags_map(module_name=module_name, func_name=func_name), val=-1) self.update_gauge(registry=self.reg, key="total_requests.inflight", tags=self.get_tags_map(cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD), val=-1) response_metric_key = self.get_metric_name(entity_name, request, response) complete_tags_map = self.get_tags_map(cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD, module_name=module_name, func_name=func_name) aggregated_per_shard_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD, module_name=module_name, func_name=func_name, source=WAVEFRONT_PROVIDED_SOURCE) overall_aggregated_per_source_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD) overall_aggregated_per_shard_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, shard=self.SHARD, source=WAVEFRONT_PROVIDED_SOURCE) aggregated_per_service_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, module_name=module_name, func_name=func_name, source=WAVEFRONT_PROVIDED_SOURCE) overall_aggregated_per_service_map = self.get_tags_map( cluster=self.CLUSTER, service=self.SERVICE, source=WAVEFRONT_PROVIDED_SOURCE) aggregated_per_cluster_map = self.get_tags_map( cluster=self.CLUSTER, module_name=module_name, func_name=func_name, source=WAVEFRONT_PROVIDED_SOURCE) overall_aggregated_per_cluster_map = self.get_tags_map( cluster=self.CLUSTER, source=WAVEFRONT_PROVIDED_SOURCE) aggregated_per_application_map = self.get_tags_map( module_name=module_name, func_name=func_name, source=WAVEFRONT_PROVIDED_SOURCE) overall_aggregated_per_application_map = self.get_tags_map( source=WAVEFRONT_PROVIDED_SOURCE) # django.server.response.style._id_.make.GET.200.cumulative.count # django.server.response.style._id_.make.GET.200.aggregated_per_shard.count # django.server.response.style._id_.make.GET.200.aggregated_per_service.count # django.server.response.style._id_.make.GET.200.aggregated_per_cluster.count # django.server.response.style._id_.make.GET.200.aggregated_per_application.count # django.server.response.style._id_.make.GET.errors self.reg.counter(response_metric_key + ".cumulative", tags=complete_tags_map).inc() if self.application_tags.shard: delta_counter(self.reg, response_metric_key + ".aggregated_per_shard", tags=aggregated_per_shard_map).inc() delta_counter(self.reg, response_metric_key + ".aggregated_per_service", tags=aggregated_per_service_map).inc() if self.application_tags.cluster: delta_counter(self.reg, response_metric_key + ".aggregated_per_cluster", tags=aggregated_per_cluster_map).inc() delta_counter(self.reg, response_metric_key + ".aggregated_per_application", tags=aggregated_per_application_map).inc() # django.server.response.errors.aggregated_per_source.count # django.server.response.errors.aggregated_per_shard.count # django.server.response.errors.aggregated_per_service.count # django.server.response.errors.aggregated_per_cluster.count # django.server.response.errors.aggregated_per_application.count if self.is_error_status_code(response): self.reg.counter(self.get_metric_name_without_status( entity_name, request), tags=complete_tags_map).inc() self.reg.counter("response.errors", tags=complete_tags_map).inc() self.reg.counter("response.errors.aggregated_per_source", tags=overall_aggregated_per_source_map).inc() if self.application_tags.shard: delta_counter(self.reg, "response.errors.aggregated_per_shard", tags=overall_aggregated_per_shard_map).inc() delta_counter(self.reg, "response.errors.aggregated_per_service", tags=overall_aggregated_per_service_map).inc() if self.application_tags.cluster: delta_counter(self.reg, "response.errors.aggregated_per_cluster", tags=overall_aggregated_per_cluster_map).inc() delta_counter(self.reg, "response.errors.aggregated_per_application", tags=overall_aggregated_per_application_map).inc() # django.server.response.completed.aggregated_per_source.count # django.server.response.completed.aggregated_per_shard.count # django.server.response.completed.aggregated_per_service.count # django.server.response.completed.aggregated_per_cluster.count # django.server.response.completed.aggregated_per_application.count self.reg.counter("response.completed.aggregated_per_source", tags=overall_aggregated_per_source_map).inc() if self.SHARD is not NULL_TAG_VAL: delta_counter(self.reg, "response.completed.aggregated_per_shard", tags=overall_aggregated_per_shard_map).inc() self.reg.counter("response.completed.aggregated_per_service", tags=overall_aggregated_per_service_map).inc() if self.CLUSTER is not NULL_TAG_VAL: delta_counter(self.reg, "response.completed.aggregated_per_cluster", tags=overall_aggregated_per_cluster_map).inc() self.reg.counter( "response.completed.aggregated_per_application", tags=overall_aggregated_per_application_map).inc() # django.server.response.style._id_.make.summary.GET.200.latency.m # django.server.response.style._id_.make.summary.GET.200.cpu_ns.m # django.server.response.style._id_.make.summary.GET.200.total_time.count if hasattr(request, 'wf_start_timestamp'): timestamp_duration = default_timer() - request.wf_start_timestamp cpu_nanos_duration = time.clock() - request.wf_cpu_nanos wavefront_histogram(self.reg, response_metric_key + ".latency", tags=complete_tags_map).add(timestamp_duration) wavefront_histogram(self.reg, response_metric_key + ".cpu_ns", tags=complete_tags_map).add(cpu_nanos_duration) self.reg.counter(response_metric_key + ".total_time", tags=complete_tags_map).inc(timestamp_duration) return response