def dummy_client(n, print_interval=50):
    '''
    Start a dummy client

    n: number of images to send
    print_interval: print a number after this number of images is done
    '''
    print("Dummy client sending {n} images...".format(n=n))
    print("Connecting to", ADDRESSES)
    print("Batch size {batch}".format(batch=BATCH_SIZE))

    start_time = time.time()
    # Connect to server

    request_its = [empty_image_generator(n // len(ADDRESSES))
                   for _ in range(len(ADDRESSES))]
    channels = [grpc.insecure_channel(address)
                for address in ADDRESSES]
    try:
        results = [grpc_service_pb2_grpc.GRPCServiceStub(channel).StreamInfer(it)
                   for channel, it in zip(channels, request_its)]
        for i, response in enumerate(roundrobin(*results)):
            if i % print_interval == 0:
                print(i)
    finally:
        for channel in channels:
            channel.close()

    total_time = time.time() - start_time
    print("{n} images in {time:.1f} seconds ({speed:.1f} images/s)"
          .format(n=n,
                  time=total_time,
                  speed=float(n) / total_time))
Example #2
0
 def Infer(self, request, context):
     master = self.addresses[self.next_address % len(self.addresses)]
     self.next_address += 1
     if self.next_address > 1000000:
         self.next_address = 0
     with grpc.insecure_channel(master) as channel:
         stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)
         return stub.Infer(request)
Example #3
0
def imagenet_client(file_name, n, print_interval=50):
    print(
        "Sending {n} Imagenet images using batch size {batch_size}...".format(
            n=n, batch_size=BATCH_SIZE))

    assert (n % BATCH_SIZE == 0)

    start_time = time.time()
    requests = list(imagenet_request_generator(file_name, n))
    total_time = time.time() - start_time
    print("Image load time: {time:.2f}".format(time=total_time))
    start_time = time.time()
    predictions = []
    # Connect to server
    with grpc.insecure_channel('{address}:{port}'.format(
            address=SERVER_ADDRESS, port=SERVER_PORT)) as channel:
        stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)

        # Make a call

        if USE_STREAMING:
            print("Using gRPC streaming")

            def it():
                for request in requests:
                    yield request

            responses = stub.StreamInfer(it())
        else:
            print("Not using gRPC streaming")
            responses = [stub.Infer(request) for request in requests]

        # Get responses
        for i, response in enumerate(responses):
            if i % print_interval == 0:
                print(i)
            response = np.frombuffer(response.raw_output[0],
                                     dtype=np.float32).reshape((-1, 1000))
            prediction = np.argmax(response, axis=1)
            predictions.append(prediction)
    total_time = time.time() - start_time
    print(
        "Sent {n} images in {time:.3f} seconds ({speed:.3f} images/s), excluding image load time"
        .format(n=n, time=total_time, speed=float(n) / total_time))
    labels = imagenet_label_generator(file_name, 490)
    labels = itertools.cycle(labels)
    labels = itertools.islice(labels, n)
    labels = list(labels)
    # print(predictions)
    # print(labels)
    predictions = np.array(predictions).reshape((-1))
    labels = np.array(labels).reshape((-1))
    # print(predictions)
    # print(labels)
    print("Accuracy: {acc:.4}".format(
        acc=metrics.accuracy_score(labels, predictions)))
Example #4
0
 def StreamInfer(self, request_iterator, context):
     # request iterators
     request_its = iterator_split(request_iterator, len(self.addresses))
     channels = [grpc.insecure_channel(address)
                 for address in self.addresses]
     try:
         results = [grpc_service_pb2_grpc.GRPCServiceStub(channel).StreamInfer(it)
                    for channel, it in zip(channels, request_its)]
         for response in roundrobin(*results):
             yield response
     finally:
         for channel in channels:
             channel.close()
Example #5
0
def dummy_client(n, print_interval=200):
    '''
    Start a dummy client

    n: number of images to send
    print_interval: print a number after this number of images is done
    '''
    print("Naive client sending {n} dummy images...".format(n=n))
    print("Batch size {batch}".format(batch=BATCH_SIZE))

    start_time = time.time()
    # Connect to server
    with grpc.insecure_channel('{address}:{port}'.format(
            address=SERVER_ADDRESS, port=SERVER_PORT)) as channel:
        stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)

        stub.Status(grpc_service_pb2.StatusRequest())

        # Make a call
        if USE_STREAMING:
            # Streaming
            print("Using streaming")
            responses = stub.StreamInfer(empty_image_generator(n))

            for i, responses in enumerate(responses):
                if i % print_interval == 0:
                    print(i)
        else:
            # Not streaming
            print("Not using streaming")
            for i in range(n // BATCH_SIZE):
                if i % print_interval == 0:
                    print(i)
                responses = stub.Infer(
                    list(empty_image_generator(BATCH_SIZE))[0])
    total_time = time.time() - start_time
    print("{n} images in {time:.1f} seconds ({speed:.1f} images/s)".format(
        n=n, time=total_time, speed=float(n) / total_time))
Example #6
0
 def Status(self, request, context):
     master = self.addresses[0]
     with grpc.insecure_channel(master) as channel:
         stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)
         return stub.Status(request)