示例#1
0
def multithread_http(thread, batch_size):
    multi_thread_runner = MultiThreadRunner()
    start = time.time()
    result = multi_thread_runner.run(run_http, thread, batch_size)
    end = time.time()
    total_cost = end - start
    avg_cost = 0
    total_number = 0
    for i in range(thread):
        avg_cost += result[0][i]
        total_number += result[2][i]
    avg_cost = avg_cost / thread
    print("Total cost: {}s".format(total_cost))
    print("Each thread cost: {}s. ".format(avg_cost))
    print("Total count: {}. ".format(total_number))
    print("AVG QPS: {} samples/s".format(batch_size * total_number /
                                         total_cost))
    show_latency(result[1])
示例#2
0
        return [[end - start]]


if __name__ == '__main__':
    multi_thread_runner = MultiThreadRunner()
    endpoint_list = ["127.0.0.1:9393"]
    turns = 1
    start = time.time()
    result = multi_thread_runner.run(single_func, args.thread, {
        "endpoint": endpoint_list,
        "turns": turns
    })
    end = time.time()
    total_cost = end - start
    total_number = 0
    avg_cost = 0
    for i in range(args.thread):
        avg_cost += result[0][i]
        total_number += result[2][i]
    avg_cost = avg_cost / args.thread

    print("total cost-include init: {}s".format(total_cost))
    print("each thread cost: {}s. ".format(avg_cost))
    print("qps: {}samples/s".format(args.batch_size * total_number /
                                    (avg_cost * args.thread)))
    print("qps(request): {}samples/s".format(total_number /
                                             (avg_cost * args.thread)))
    print("total count: {} ".format(total_number))
    if os.getenv("FLAGS_serving_latency"):
        show_latency(result[1])
示例#3
0
        else:
            print("unsupport batch size {}".format(args.batch_size))
    end = time.time()
    return [[end - start]]


if __name__ == '__main__':
    multi_thread_runner = MultiThreadRunner()
    endpoint_list = [
        "127.0.0.1:9292", "127.0.0.1:9293", "127.0.0.1:9294", "127.0.0.1:9295"
    ]
    turns = 100
    start = time.time()
    result = multi_thread_runner.run(single_func, args.thread, {
        "endpoint": endpoint_list,
        "turns": turns
    })
    end = time.time()
    total_cost = end - start
    avg_cost = 0
    for i in range(args.thread):
        avg_cost += result[0][i]
    avg_cost = avg_cost / args.thread

    print("total cost: {}".format(total_cost))
    print("each thread cost: {}".format(avg_cost))
    print("qps: {}samples/s".format(args.batch_size * args.thread * turns /
                                    total_cost))
    if os.getenv("FLAGS_serving_latency"):
        show_latency(result[0])