def intercept_stream_stream(self, continuation, client_call_details, request_iterator): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( client_call_details) grpc_type = grpc_utils.BIDI_STREAMING response_iterator = continuation( client_call_details, grpc_utils.wrap_iterator_inc_counter( request_iterator, GRPC_CLIENT_MSG_SENT_TOTAL_COUNTER, grpc_type, grpc_service_name, grpc_method_name)) return grpc_utils.wrap_iterator_inc_counter( response_iterator, GRPC_CLIENT_MSG_RECEIVED_TOTAL_COUNTER, grpc_type, grpc_service_name, grpc_method_name)
def intercept_stream_unary(self, continuation, client_call_details, request_iterator): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( client_call_details) grpc_type = grpc_utils.CLIENT_STREAMING iterator_metric = self._metrics["grpc_client_stream_msg_sent"] request_iterator = grpc_utils.wrap_iterator_inc_counter( request_iterator, iterator_metric, grpc_type, grpc_service_name, grpc_method_name) start = default_timer() handler = continuation(client_call_details, request_iterator) if self._legacy: self._metrics["grpc_client_started_counter"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() self._metrics[ "legacy_grpc_client_completed_latency_seconds_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) else: self._metrics["grpc_client_started_counter"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() if self._enable_client_handling_time_histogram: self._metrics["grpc_client_handled_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) if self._enable_client_stream_send_time_histogram and not self._legacy: self._metrics["grpc_client_stream_send_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) return handler
def intercept_stream_unary(self, continuation, client_call_details, request_iterator): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call(client_call_details) grpc_type = grpc_utils.CLIENT_STREAMING iterator_metric = GRPC_CLIENT_STREAM_MSG_SENT request_iterator = grpc_utils.wrap_iterator_inc_counter( request_iterator, iterator_metric, grpc_type, grpc_service_name, grpc_method_name) start = default_timer() handler = continuation(client_call_details, request_iterator) if self._legacy: GRPC_CLIENT_STARTED_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() LEGACY_GRPC_CLIENT_COMPLETED_LATENCY_SECONDS_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) else: GRPC_CLIENT_STARTED_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() if self._enable_client_handling_time_histogram: GRPC_CLIENT_HANDLED_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) if self._enable_client_stream_send_time_histogram and not self._legacy: GRPC_CLIENT_STREAM_SEND_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) return handler
def intercept_unary_unary(self, continuation, client_call_details, request): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( client_call_details) grpc_type = grpc_utils.UNARY self._metrics["grpc_client_started_counter"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() start = default_timer() handler = continuation(client_call_details, request) if self._legacy: self._metrics[ "legacy_grpc_client_completed_latency_seconds_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) elif self._enable_client_handling_time_histogram: self._metrics["grpc_client_handled_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) if self._legacy: self._metrics["legacy_grpc_client_completed_counter"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name, code=handler.code().name).inc() else: self._metrics["grpc_client_handled_counter"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name, grpc_code=handler.code().name).inc() return handler
def intercept_unary_stream(self, continuation, client_call_details, request): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( client_call_details) grpc_type = grpc_utils.SERVER_STREAMING self._metrics["grpc_client_started_counter"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() start = default_timer() handler = continuation(client_call_details, request) if self._legacy: self._metrics[ "legacy_grpc_client_completed_latency_seconds_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) elif self._enable_client_handling_time_histogram: self._metrics["grpc_client_handled_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) handler = grpc_utils.wrap_iterator_inc_counter( handler, self._metrics["grpc_client_stream_msg_received"], grpc_type, grpc_service_name, grpc_method_name) if self._enable_client_stream_receive_time_histogram and not self._legacy: self._metrics["grpc_client_stream_recv_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) return handler
def intercept_stream_stream(self, continuation, client_call_details, request_iterator): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( client_call_details) grpc_type = grpc_utils.BIDI_STREAMING start = default_timer() iterator_sent_metric = GRPC_CLIENT_STREAM_MSG_SENT response_iterator = continuation( client_call_details, grpc_utils.wrap_iterator_inc_counter( request_iterator, iterator_sent_metric, grpc_type, grpc_service_name, grpc_method_name)) if self._enable_client_stream_send_time_histogram and not self._legacy: GRPC_CLIENT_STREAM_SEND_HISTOGRAM.labels( grpc_type=grpc_type, grpc_Service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) iterator_received_metric = GRPC_CLIENT_STREAM_MSG_RECEIVED response_iterator = grpc_utils.wrap_iterator_inc_counter( response_iterator, iterator_received_metric, grpc_type, grpc_service_name, grpc_method_name) if self._enable_client_stream_receive_time_histogram and not self._legacy: GRPC_CLIENT_STREAM_RECV_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) return response_iterator
def intercept_stream_unary(self, continuation, client_call_details, request_iterator): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( client_call_details) grpc_type = grpc_utils.CLIENT_STREAMING request_iterator = grpc_utils.wrap_iterator_inc_counter( request_iterator, GRPC_CLIENT_MSG_SENT_TOTAL_COUNTER, grpc_type, grpc_service_name, grpc_method_name) start = default_timer() handler = continuation(client_call_details, request_iterator) GRPC_CLIENT_STARTED_TOTAL_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() GRPC_CLIENT_COMPLETED_LATENCY_SECONDS_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) return handler
def intercept_stream_stream(self, continuation, client_call_details, request_iterator): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( client_call_details) grpc_type = grpc_utils.BIDI_STREAMING start = default_timer() iterator_sent_metric = self._metrics["grpc_client_stream_msg_sent"] response_iterator = continuation( client_call_details, grpc_utils.wrap_iterator_inc_counter(request_iterator, iterator_sent_metric, grpc_type, grpc_service_name, grpc_method_name)) if self._enable_client_stream_send_time_histogram and not self._legacy: self._metrics["grpc_client_stream_send_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) iterator_received_metric = self._metrics[ "grpc_client_stream_msg_received"] response_iterator = grpc_utils.wrap_iterator_inc_counter( response_iterator, iterator_received_metric, grpc_type, grpc_service_name, grpc_method_name) if self._enable_client_stream_receive_time_histogram and not self._legacy: self._metrics["grpc_client_stream_recv_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) return response_iterator
def intercept_unary_stream(self, continuation, client_call_details, request): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call(client_call_details) grpc_type = grpc_utils.SERVER_STREAMING GRPC_CLIENT_STARTED_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() start = default_timer() handler = continuation(client_call_details, request) if self._legacy: LEGACY_GRPC_CLIENT_COMPLETED_LATENCY_SECONDS_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) elif self._enable_client_handling_time_histogram: GRPC_CLIENT_HANDLED_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) handler = grpc_utils.wrap_iterator_inc_counter( handler, GRPC_CLIENT_STREAM_MSG_RECEIVED, grpc_type, grpc_service_name, grpc_method_name) if self._enable_client_stream_receive_time_histogram and not self._legacy: GRPC_CLIENT_STREAM_RECV_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) return handler
def intercept_unary_unary(self, continuation, client_call_details, request): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call(client_call_details) grpc_type = grpc_utils.UNARY GRPC_CLIENT_STARTED_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() start = default_timer() handler = continuation(client_call_details, request) if self._legacy: LEGACY_GRPC_CLIENT_COMPLETED_LATENCY_SECONDS_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) elif self._enable_client_handling_time_histogram: GRPC_CLIENT_HANDLED_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe(max(default_timer() - start, 0)) if self._legacy: LEGACY_GRPC_CLIENT_COMPLETED_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name, code=handler.code().name).inc() else: GRPC_CLIENT_HANDLED_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name, grpc_code=handler.code().name).inc() return handler
def intercept_unary_unary(self, continuation, client_call_details, request): grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( client_call_details) grpc_type = grpc_utils.UNARY GRPC_CLIENT_STARTED_TOTAL_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() start = default_timer() handler = continuation(client_call_details, request) GRPC_CLIENT_COMPLETED_LATENCY_SECONDS_HISTOGRAM.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).observe( max(default_timer() - start, 0)) GRPC_CLIENT_COMPLETED_COUNTER.labels(grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name, code=handler.code().name).inc() return handler
def intercept_service(self, continuation, handler_call_details): """ Intercepts the server function calls. This implements referred to: https://github.com/census-instrumentation/opencensus-python/blob/master/opencensus/ trace/ext/grpc/server_interceptor.py and https://grpc.io/grpc/python/grpc.html#service-side-interceptor """ grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( handler_call_details) def metrics_wrapper(behavior, request_streaming, response_streaming): def new_behavior(request_or_iterator, servicer_context): response_or_iterator = None try: start = default_timer() grpc_type = grpc_utils.get_method_type( request_streaming, response_streaming) try: if request_streaming: request_or_iterator = grpc_utils.wrap_iterator_inc_counter( request_or_iterator, self. _metrics["grpc_server_stream_msg_received"], grpc_type, grpc_service_name, grpc_method_name) else: self._metrics[ "grpc_server_started_counter"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name).inc() # Invoke the original rpc behavior. response_or_iterator = behavior( request_or_iterator, servicer_context) if response_streaming: sent_metric = self._metrics[ "grpc_server_stream_msg_sent"] response_or_iterator = grpc_utils.wrap_iterator_inc_counter( response_or_iterator, sent_metric, grpc_type, grpc_service_name, grpc_method_name) else: self.increase_grpc_server_handled_total_counter( grpc_type, grpc_service_name, grpc_method_name, self._compute_status_code( servicer_context).name) return response_or_iterator except grpc.RpcError as e: self.increase_grpc_server_handled_total_counter( grpc_type, grpc_service_name, grpc_method_name, self._compute_error_code(e).name) raise e finally: if not response_streaming: if self._legacy: self._metrics["legacy_grpc_server_handled_latency_seconds"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name) \ .observe(max(default_timer() - start, 0)) elif self._enable_handling_time_histogram: self._metrics["grpc_server_handled_histogram"].labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name) \ .observe(max(default_timer() - start, 0)) except Exception as e: # pylint: disable=broad-except # Allow user to skip the exceptions in order to maintain # the basic functionality in the server # The logging function in exception can be toggled with log_exceptions # in order to suppress the noise in logging if self._skip_exceptions: if self._log_exceptions: _LOGGER.error(e) if response_or_iterator is None: return response_or_iterator return behavior(request_or_iterator, servicer_context) raise e return new_behavior optional_any = self._wrap_rpc_behavior( continuation(handler_call_details), metrics_wrapper) return optional_any
def intercept_service(self, continuation, handler_call_details): """ Intercepts the server function calls. This implements referred to: https://github.com/census-instrumentation/opencensus-python/blob/master/opencensus/ trace/ext/grpc/server_interceptor.py and https://grpc.io/grpc/python/grpc.html#service-side-interceptor """ grpc_service_name, grpc_method_name, _ = grpc_utils.split_method_call( handler_call_details) def metrics_wrapper(behavior, request_streaming, response_streaming): def new_behavior(request_or_iterator, servicer_context): start = default_timer() grpc_type = grpc_utils.get_method_type(request_streaming, response_streaming) try: if request_streaming: request_or_iterator = grpc_utils.wrap_iterator_inc_counter( request_or_iterator, GRPC_SERVER_MSG_RECEIVED_TOTAL_COUNTER, grpc_type, grpc_service_name, grpc_method_name) else: GRPC_SERVER_STARTED_TOTAL_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name) \ .inc() # Invoke the original rpc behavior. response_or_iterator = behavior(request_or_iterator, servicer_context) if response_streaming: response_or_iterator = grpc_utils.wrap_iterator_inc_counter( response_or_iterator, GRPC_SERVER_MSG_SENT_TOTAL_COUNTER, grpc_type, grpc_service_name, grpc_method_name) else: GRPC_SERVER_HANDLED_TOTAL_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name, code=self._compute_status_code( servicer_context).name).inc() return response_or_iterator except grpc.RpcError as e: GRPC_SERVER_HANDLED_TOTAL_COUNTER.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name, code=self._compute_error_code(e)).inc() raise e finally: if not response_streaming: GRPC_SERVER_HANDLED_LATENCY_SECONDS.labels( grpc_type=grpc_type, grpc_service=grpc_service_name, grpc_method=grpc_method_name) \ .observe(max(default_timer() - start, 0)) return new_behavior return self._wrap_rpc_behavior(continuation(handler_call_details), metrics_wrapper)