request = service_pb2.ServerReadyRequest() response = grpc_stub.ServerReady(request) print("server {}".format(response)) request = service_pb2.ModelReadyRequest(name=model_name, version=model_version) response = grpc_stub.ModelReady(request) print("model {}".format(response)) # Metadata request = service_pb2.ServerMetadataRequest() response = grpc_stub.ServerMetadata(request) print("server metadata:\n{}".format(response)) request = service_pb2.ModelMetadataRequest(name=model_name, version=model_version) response = grpc_stub.ModelMetadata(request) print("model metadata:\n{}".format(response)) # Configuration request = service_pb2.ModelConfigRequest(name=model_name, version=model_version) response = grpc_stub.ModelConfig(request) print("model config:\n{}".format(response)) # Infer request = service_pb2.ModelInferRequest() request.model_name = model_name request.model_version = model_version request.id = "my request id"
default='localhost:8001', help='Inference server URL. Default is localhost:8001.') parser.add_argument('image_filename', type=str, nargs='?', default=None, help='Input image / Input folder.') FLAGS = parser.parse_args() # Create gRPC stub for communicating with the server channel = grpc.insecure_channel(FLAGS.url) grpc_stub = service_pb2_grpc.GRPCInferenceServiceStub(channel) # Make sure the model matches our requirements, and get some # properties of the model that we need for preprocessing metadata_request = service_pb2.ModelMetadataRequest( name=FLAGS.model_name, version=FLAGS.model_version) metadata_response = grpc_stub.ModelMetadata(metadata_request) config_request = service_pb2.ModelConfigRequest(name=FLAGS.model_name, version=FLAGS.model_version) config_response = grpc_stub.ModelConfig(config_request) max_batch_size, input_name, output_name, c, h, w, format, dtype = parse_model( metadata_response, config_response.config) supports_batching = max_batch_size > 0 if not supports_batching and FLAGS.batch_size != 1: raise Exception("This model doesn't support batching.") # Send requests of FLAGS.batch_size images. If the number of # images isn't an exact multiple of FLAGS.batch_size then just
request = service_pb2.ServerReadyRequest() response = grpc_stub.ServerReady(request) print("server {}".format(response)) request = service_pb2.ModelReadyRequest(name="resnet_v1_50_graphdef", version=model_version) response = grpc_stub.ModelReady(request) print("model {}".format(response)) # Metadata request = service_pb2.ServerMetadataRequest() response = grpc_stub.ServerMetadata(request) print("server metadata:\n{}".format(response)) request = service_pb2.ModelMetadataRequest(name="resnet_v1_50_graphdef", version=model_version) response = grpc_stub.ModelMetadata(request) print("model metadata:\n{}".format(response)) # Configuration request = service_pb2.ModelConfigRequest(name="resnet_v1_50_graphdef", version=model_version) response = grpc_stub.ModelConfig(request) print("model config:\n{}".format(response)) # Infer request = service_pb2.ModelInferRequest() request.model_name = "resnet_v1_50_graphdef" request.model_version = model_version request.id = "my request id"