def is_server_live(self, headers=None):
        """Contact the inference server and get liveness.

        Parameters
        ----------
        headers: dict
            Optional dictionary specifying additional HTTP
            headers to include in the request.

        Returns
        -------
        bool
            True if server is live, False if server is not live.

        Raises
        ------
        InferenceServerException
            If unable to get liveness.

        """
        if headers is not None:
            metadata = headers.items()
        else:
            metadata = ()
        try:
            request = grpc_service_v2_pb2.ServerLiveRequest()
            response = self._client_stub.ServerLive(request=request,
                                                    metadata=metadata)
            return response.live
        except grpc.RpcError as rpc_error:
            raise_error_grpc(rpc_error)
Esempio n. 2
0
    def is_server_live(self):
        """Contact the inference server and get liveness.

        Returns
        -------
        bool
            True if server is live, False if server is not live.

        Raises
        ------
        InferenceServerException
            If unable to get liveness.

        """
        try:
            request = grpc_service_v2_pb2.ServerLiveRequest()
            response = self._client_stub.ServerLive(request)
            return response.live
        except grpc.RpcError as rpc_error:
            raise_error_grpc(rpc_error)
Esempio n. 3
0
    # We use a simple model that takes 2 input tensors of 16 integers
    # each and returns 2 output tensors of 16 integers each. One
    # output tensor is the element-wise sum of the inputs and one
    # output is the element-wise difference.
    model_name = "kitmodel"
    model_version = ""
    batch_size = 1

    # Create gRPC stub for communicating with the server
    channel = grpc.insecure_channel(FLAGS.url)
    grpc_stub = grpc_service_v2_pb2_grpc.GRPCInferenceServiceStub(channel)

    # Health
    try:
        request = grpc_service_v2_pb2.ServerLiveRequest()
        response = grpc_stub.ServerLive(request)
        print("server {}".format(response))
    except Exception as ex:
        print(ex)

    request = grpc_service_v2_pb2.ServerReadyRequest()
    response = grpc_stub.ServerReady(request)
    print("server {}".format(response))

    request = grpc_service_v2_pb2.ModelReadyRequest(name=model_name,
                                                    version=model_version)
    response = grpc_stub.ModelReady(request)
    print("model {}".format(response))

    # Metadata