Example #1
0
async def run_sync_request(loop):
    global finish_benchmark

    aio.init_grpc_aio()
    channel = grpc.insecure_channel("127.0.0.1:50051")
    stub = echo_pb2_grpc.EchoStub(channel)
    for i in range(100):
        response = stub.Hi(echo_pb2.EchoRequest(message="ping"))
    print("eureka")
async def requests(idx, multicallable):
    global finish_benchmark

    times = []
    while not finish_benchmark:
        start = time.monotonic()
        response = await multicallable(echo_pb2.EchoRequest(message="ping"))
        elapsed = time.monotonic() - start
        times.append(elapsed)

    return times
async def benchmark(loop,
                    seconds=DEFAULT_SECONDS,
                    concurrency=DEFAULT_CONCURRENCY):
    global finish_benchmark

    aio.init_grpc_aio()

    print("Creating channels and warmming up ....")
    multicallables = []
    for i in range(concurrency):
        channel = aio.insecure_channel("127.0.0.1:50051")
        multicallable = channel.unary_unary(
            '/echo.Echo/Hi',
            request_serializer=echo_pb2.EchoRequest.SerializeToString,
            response_deserializer=echo_pb2.EchoReply.FromString)
        response = await multicallable(echo_pb2.EchoRequest(message="ping"))
        assert response

        multicallables.append(multicallable)

    print("Starting tasks ....")
    tasks = [
        asyncio.ensure_future(requests(idx, multicallable))
        for idx, multicallable in enumerate(multicallables)
    ]

    await asyncio.sleep(seconds)

    print("Finishing tasks ....")
    finish_benchmark = True

    while not all([task.done() for task in tasks]):
        await asyncio.sleep(0)

    times = []
    for task in tasks:
        times += task.result()

    times.sort()

    total_requests = len(times)
    avg = sum(times) / total_requests

    p75 = times[int((75 * total_requests) / 100)]
    p90 = times[int((90 * total_requests) / 100)]
    p99 = times[int((99 * total_requests) / 100)]

    print('QPS: {0}'.format(int(total_requests / seconds)))
    print('Avg: {0:.6f}'.format(avg))
    print('P75: {0:.6f}'.format(p75))
    print('P90: {0:.6f}'.format(p90))
    print('P99: {0:.6f}'.format(p99))
Example #4
0
    async def test_unary_call(self, event_loop, server):
        grpc_init_asyncio()
        channel = await create_channel("127.0.0.1", 3333)
        response = await channel.unary_call(
            b'/echo.Echo/Hi',
            echo_pb2.EchoRequest(
                message="Hi Grpc Asyncio").SerializeToString())

        assert response is not None
        assert echo_pb2.EchoReply.FromString(
            response).message == "Hi Grpc Asyncio"

        channel.close()
Example #5
0
def benchmark(seconds=DEFAULT_SECONDS, concurrency=DEFAULT_CONCURRENCY):
    global finish_benchmark, real_started

    print("Creating stubs and warmming up ....")
    stubs = []
    for i in range(concurrency):
        channel = grpc.insecure_channel("127.0.0.1:50051")
        stub = echo_pb2_grpc.EchoStub(channel)
        response = stub.Hi(echo_pb2.EchoRequest(message="ping"))
        assert response
        stubs.append(stub)

    print("Starting threads ....")
    threads = []
    for idx, stub in enumerate(stubs):
        thread = Thread(target=requests, args=(idx, stub, seconds))
        thread.start()
        threads.append(thread)

    def all_threads_started():
        return threads_started == concurrency

    # Wait till all of the threads are ready to start the benchmark
    with thread_start:
        thread_start.wait_for(all_threads_started)

    # Signal the threads to start the benchmark
    with benchmark_start:
        benchmark_start.notify_all()

    time.sleep(seconds)
    finish_benchmark = True

    for thread in threads:
        thread.join()

    latencies.sort()

    total_requests = len(latencies)
    avg = sum(latencies) / total_requests 

    p75 = latencies[int((75*total_requests)/100)]
    p90 = latencies[int((90*total_requests)/100)]
    p99 = latencies[int((99*total_requests)/100)]

    print('QPS: {0}'.format(int(total_requests/seconds)))
    print('Avg: {0:.6f}'.format(avg))
    print('P75: {0:.6f}'.format(p75))
    print('P90: {0:.6f}'.format(p90))
    print('P99: {0:.6f}'.format(p99))
def run():

    with grpc.insecure_channel('localhost:9090') as channel:

        stub = echo_pb2_grpc.TestDuplexServiceStub(channel)

        response = stub.Echo(echo_pb2.EchoRequest(message="Python Client"))

        print('Greeting for Python Client', response.message)

        bidResponse = stub.Bid(generate_messages())

        for res in bidResponse:
            print("Server Sent", res)

    pass
Example #7
0
async def requests(idx, channel):
    global finish_benchmark

    times = []
    while not finish_benchmark:
        # Synchronous client serializes and deserializes under the hood, so
        # to be fear we do the same by attributing the serialization and the
        # deserialization of the protobuf to the whole time

        # Also we build the request message at each call

        start = time.monotonic()
        response = await channel.unary_call(
            b'/echo.Echo/Hi',
            echo_pb2.EchoRequest(message="ping").SerializeToString())
        echo_reply = echo_pb2.EchoReply.FromString(response)
        elapsed = time.monotonic() - start
        times.append(elapsed)

    return times
Example #8
0
def requests(idx, stub, duration):
    global latencies, real_started, threads_started

    local_latencies = []
    elapsed = None

    with thread_start:
        threads_started += 1
        thread_start.notify()

    with benchmark_start:
        benchmark_start.wait()

    while not finish_benchmark:
        start = time.monotonic()
        response = stub.Hi(echo_pb2.EchoRequest(message="ping"))
        latency = time.monotonic() - start
        local_latencies.append(latency)

    lock_latencies.acquire()
    latencies += local_latencies
    lock_latencies.release()