Exemple #1
0
 def _grpc(self, arr: np.array) -> np.array:
     options = [('grpc.max_send_message_length', GRPC_MAX_MSG_LEN),
                ('grpc.max_receive_message_length', GRPC_MAX_MSG_LEN)]
     channel = grpc.insecure_channel(self.predictor_host, options)
     stub = prediction_pb2_grpc.SeldonStub(channel)
     if self.tf_data_type is not None:
         datadef = prediction_pb2.DefaultData(
             tftensor=tf.make_tensor_proto(arr, self.tf_data_type))
     else:
         datadef = prediction_pb2.DefaultData(
             tftensor=tf.make_tensor_proto(arr))
     request = prediction_pb2.SeldonMessage(data=datadef)
     response = stub.Predict(request=request)
     arr_resp = tf.make_ndarray(response.data.tftensor)
     return arr_resp
Exemple #2
0
def _grpc(arr: np.array, predictor_host: str,
          tf_data_type: Optional[str]) -> np.array:
    options = [
        ("grpc.max_send_message_length", GRPC_MAX_MSG_LEN),
        ("grpc.max_receive_message_length", GRPC_MAX_MSG_LEN),
        # TODO: Test skip functionality with gRPC's metadata
    ]
    channel = grpc.insecure_channel(predictor_host, options)
    stub = prediction_pb2_grpc.SeldonStub(channel)
    if tf_data_type is not None:
        datadef = prediction_pb2.DefaultData(
            tftensor=tf.make_tensor_proto(arr, tf_data_type))
    else:
        datadef = prediction_pb2.DefaultData(
            tftensor=tf.make_tensor_proto(arr))
    request = prediction_pb2.SeldonMessage(data=datadef)
    request_metadata = ((SELDON_SKIP_LOGGING_HEADER, "true"), )
    response = stub.Predict(request=request, metadata=request_metadata)
    arr_resp = tf.make_ndarray(response.data.tftensor)
    return arr_resp