class ResponseInterceptor(aio.UnaryUnaryClientInterceptor): """Return a raw response""" response = messages_pb2.SimpleResponse() async def intercept_unary_unary(self, continuation, client_call_details, request): return ResponseInterceptor.response
def UnaryCall( self, request: messages_pb2.SimpleRequest, context: grpc.ServicerContext) -> messages_pb2.SimpleResponse: response = messages_pb2.SimpleResponse() response.server_id = self._server_id response.hostname = self._hostname return response
def UnaryCall(self, request, context): _maybe_echo_metadata(context) _maybe_echo_status_and_message(request, context) return messages_pb2.SimpleResponse( payload=messages_pb2.Payload( type=messages_pb2.COMPRESSABLE, body=b'\x00' * request.response_size))
async def test_response_caching(self): # Prepares a preset value to help testing interceptor = _CacheInterceptor({ 42: messages_pb2.SimpleResponse(payload=messages_pb2.Payload( body=b'\x42')) }) # Constructs a server with the cache interceptor server, stub = await _create_server_stub_pair(interceptor) # Tests if the cache store is used response = await stub.UnaryCall( messages_pb2.SimpleRequest(response_size=42)) self.assertEqual(1, len(interceptor.cache_store[42].payload.body)) self.assertEqual(interceptor.cache_store[42], response) # Tests response can be cached response = await stub.UnaryCall( messages_pb2.SimpleRequest(response_size=1337)) self.assertEqual(1337, len(interceptor.cache_store[1337].payload.body)) self.assertEqual(interceptor.cache_store[1337], response) response = await stub.UnaryCall( messages_pb2.SimpleRequest(response_size=1337)) self.assertEqual(interceptor.cache_store[1337], response)
def UnaryCall(self, request, context): if request.HasField('response_status'): context.set_code(request.response_status.code) context.set_details(request.response_status.message) return messages_pb2.SimpleResponse( payload=messages_pb2.Payload(type=messages_pb2.COMPRESSABLE, body=b'\x00' * request.response_size))
def UnaryCall( self, request: messages_pb2.SimpleRequest, context: grpc.ServicerContext) -> messages_pb2.SimpleResponse: context.send_initial_metadata((('hostname', self._hostname), )) response = messages_pb2.SimpleResponse() response.server_id = self._server_id response.hostname = self._hostname return response
def StreamingCall(self, request_iterator, context): for request in request_iterator: payload = messages_pb2.Payload(body='\0' * request.response_size) yield messages_pb2.SimpleResponse(payload=payload)
def UnaryCall(self, request, context): payload = messages_pb2.Payload(body='\0' * request.response_size) return messages_pb2.SimpleResponse(payload=payload)
def UnaryCall(self, request, context): return messages_pb2.SimpleResponse( payload=messages_pb2.Payload(type=messages_pb2.COMPRESSABLE, body=b'\x00' * request.response_size))
def UnaryCall(self, request, context): return messages_pb2.SimpleResponse()
async def UnaryCallWithSleep(self, unused_request, unused_context): await asyncio.sleep(_constants.UNARY_CALL_WITH_SLEEP_VALUE) return messages_pb2.SimpleResponse()
async def StreamingFromServer(self, request, context): payload = messages_pb2.Payload(body=b'\0' * request.response_size) # Sends response at full capacity! while True: yield messages_pb2.SimpleResponse(payload=payload)
def UnaryCallWithSleep(self, unused_request, unused_context): gevent.sleep(LONG_UNARY_CALL_WITH_SLEEP_VALUE) return messages_pb2.SimpleResponse()
async def UnaryCall(self, unused_request, context): await _maybe_echo_metadata(context) return messages_pb2.SimpleResponse()