def unary_stream(self, request, context): metadata = extract_metadata(context) maybe_echo_metadata(context) message = request.value * (request.multiplier or 1) for i in range(request.response_count): maybe_sleep(request) yield ExampleReply(message=message, seqno=i + 1, metadata=metadata)
def stream_stream(self, request, context): metadata = extract_metadata(context) maybe_echo_metadata(context) for index, req in enumerate(request): maybe_sleep(req) message = req.value * (req.multiplier or 1) yield ExampleReply(message=message, seqno=index + 1, metadata=metadata)
def stream_error(self, request, context): metadata = extract_metadata(context) maybe_echo_metadata(context) message = request.value * (request.multiplier or 1) for i in range(request.response_count): maybe_sleep(request) # raise on the last message if i == request.response_count - 1: raise Error("boom") yield ExampleReply(message=message, seqno=i + 1, metadata=metadata)
def stream_unary(self, request, context): metadata = extract_metadata(context) maybe_echo_metadata(context) messages = [] for index, req in enumerate(request): maybe_sleep(req) message = req.value * (req.multiplier or 1) messages.append(message) return ExampleReply(message=",".join(messages), metadata=metadata)
def unary_grpc_error(self, request, context): maybe_echo_metadata(context) maybe_sleep(request) code = StatusCode.UNAUTHENTICATED message = "Not allowed!" raise GrpcError(code=code, message=message, status=make_status(code, message))
def stream_error_via_context(self, request, context): message = request.value * (request.multiplier or 1) for i in range(request.response_count): maybe_sleep(request) # raise on the last message if i == request.response_count - 1: # using rich status to test compatibility with # https://grpc.github.io/grpc/python/grpc_status.html status = make_status(code=StatusCode.RESOURCE_EXHAUSTED, message="Out of tokens!") context.abort_with_status(rpc_status.to_status(status)) yield ExampleReply(message=message, seqno=i + 1)
def stream_grpc_error(self, request, context): metadata = extract_metadata(context) maybe_echo_metadata(context) message = request.value * (request.multiplier or 1) for i in range(request.response_count): maybe_sleep(request) # raise on the last message if i == request.response_count - 1: code = StatusCode.RESOURCE_EXHAUSTED message = "Out of tokens!" raise GrpcError( code=code, message=message, status=make_status(code, message), ) yield ExampleReply(message=message, seqno=i + 1, metadata=metadata)
def stream_error_via_context(self, request, context): message = request.value * (request.multiplier or 1) for i in range(request.response_count): maybe_sleep(request) # break on the last message if i == request.response_count - 1: code = StatusCode.RESOURCE_EXHAUSTED message = "Out of tokens!" context.set_code(code) context.set_message(message) context.set_trailing_metadata([( GRPC_DETAILS_METADATA_KEY, make_status(code, message).SerializeToString(), )]) break yield ExampleReply(message=message, seqno=i + 1)
def unary_error(self, request, context): maybe_echo_metadata(context) maybe_sleep(request) raise Error("boom")
def unary_unary(self, request, context): metadata = extract_metadata(context) maybe_echo_metadata(context) maybe_sleep(request) message = request.value * (request.multiplier or 1) return ExampleReply(message=message, metadata=metadata)