def __init__(self, url, verbose=False):
     # FixMe: Are any of the channel options worth exposing?
     # https://grpc.io/grpc/core/group__grpc__arg__keys.html
     self._channel = grpc.insecure_channel(url, options=None)
     self._client_stub = grpc_service_v2_pb2_grpc.GRPCInferenceServiceStub(
         self._channel)
     self._verbose = verbose
示例#2
0
        default='192.168.7.122:31919',
        help='Inference server URL. Default is localhost:8001.')

    FLAGS = parser.parse_args()

    # We use a simple model that takes 2 input tensors of 16 integers
    # each and returns 2 output tensors of 16 integers each. One
    # output tensor is the element-wise sum of the inputs and one
    # output is the element-wise difference.
    model_name = "kitmodel"
    model_version = ""
    batch_size = 1

    # Create gRPC stub for communicating with the server
    channel = grpc.insecure_channel(FLAGS.url)
    grpc_stub = grpc_service_v2_pb2_grpc.GRPCInferenceServiceStub(channel)

    # Health
    try:
        request = grpc_service_v2_pb2.ServerLiveRequest()
        response = grpc_stub.ServerLive(request)
        print("server {}".format(response))
    except Exception as ex:
        print(ex)

    request = grpc_service_v2_pb2.ServerReadyRequest()
    response = grpc_stub.ServerReady(request)
    print("server {}".format(response))

    request = grpc_service_v2_pb2.ModelReadyRequest(name=model_name,
                                                    version=model_version)