Пример #1
0
 def _grpc_client(self):
     if self._grpc_client_stub is None:
         # requires appending ":80" to the predictor host for gRPC to work
         if ":" not in self.predictor_host:
             self.predictor_host = self.predictor_host + ":80"
         _channel = grpc.aio.insecure_channel(self.predictor_host)
         self._grpc_client_stub = service_pb2_grpc.GRPCInferenceServiceStub(
             _channel)
     return self._grpc_client_stub
        default='localhost:8001',
        help='Inference server URL. Default is localhost:8001.')

    FLAGS = parser.parse_args()

    # We use a simple model that takes 2 input tensors of 16 integers
    # each and returns 2 output tensors of 16 integers each. One
    # output tensor is the element-wise sum of the inputs and one
    # output is the element-wise difference.
    model_name = "simple_int8"
    model_version = ""
    batch_size = 1

    # Create gRPC stub for communicating with the server
    channel = grpc.insecure_channel(FLAGS.url)
    grpc_stub = service_pb2_grpc.GRPCInferenceServiceStub(channel)

    # Generate the request
    request = service_pb2.ModelInferRequest()
    request.model_name = model_name
    request.model_version = model_version

    # Input data
    input0_data = [i for i in range(16)]
    input1_data = [1 for i in range(16)]

    # Populate the inputs in inference request
    input0 = service_pb2.ModelInferRequest().InferInputTensor()
    input0.name = "INPUT0"
    input0.datatype = "INT8"
    input0.shape.extend([1, 16])