예제 #1
0
def multithread_http(thread, batch_size):
    multi_thread_runner = MultiThreadRunner()
    start = time.time()
    result = multi_thread_runner.run(run_http, thread, batch_size)
    end = time.time()
    total_cost = end - start
    avg_cost = 0
    total_number = 0
    for i in range(thread):
        avg_cost += result[0][i]
        total_number += result[2][i]
    avg_cost = avg_cost / thread
    print("Total cost: {}s".format(total_cost))
    print("Each thread cost: {}s. ".format(avg_cost))
    print("Total count: {}. ".format(total_number))
    print("AVG QPS: {} samples/s".format(batch_size * total_number /
                                         total_cost))
    show_latency(result[1])
예제 #2
0
    else:
        raise ValueError("not implemented {} request".format(args.request))
    end = time.time()
    if latency_flags:
        return [[end - start], latency_list, [total_number]]
    else:
        return [[end - start]]


if __name__ == '__main__':
    multi_thread_runner = MultiThreadRunner()
    endpoint_list = ["127.0.0.1:9393"]
    turns = 1
    start = time.time()
    result = multi_thread_runner.run(single_func, args.thread, {
        "endpoint": endpoint_list,
        "turns": turns
    })
    end = time.time()
    total_cost = end - start
    total_number = 0
    avg_cost = 0
    for i in range(args.thread):
        avg_cost += result[0][i]
        total_number += result[2][i]
    avg_cost = avg_cost / args.thread

    print("total cost-include init: {}s".format(total_cost))
    print("each thread cost: {}s. ".format(avg_cost))
    print("qps: {}samples/s".format(args.batch_size * total_number /
                                    (avg_cost * args.thread)))
    print("qps(request): {}samples/s".format(total_number /
예제 #3
0
        return [[end - start]]


if __name__ == '__main__':
    """
    usage: 
    """
    img_file_list = get_img_names("./img_data")
    img_content_list = preprocess_img(img_file_list)
    multi_thread_runner = MultiThreadRunner()
    endpoint_list = ["127.0.0.1:9494"]
    turns = 1
    start = time.time()
    result = multi_thread_runner.run(benckmark_worker, args.thread, {
        "endpoint": endpoint_list,
        "turns": turns,
        "img_list": img_content_list
    })
    end = time.time()
    total_cost = end - start
    avg_cost = 0
    for i in range(args.thread):
        avg_cost += result[0][i]
    avg_cost = avg_cost / args.thread
    print("total cost: {}s".format(total_cost))
    print("each thread cost: {}s. ".format(avg_cost))
    print("qps: {}samples/s".format(args.batch_size * args.thread * turns /
                                    total_cost))
    if os.getenv("FLAGS_serving_latency"):
        show_latency(result[1])
예제 #4
0
def multithread_rpc(thraed, batch_size):
    multi_thread_runner = MultiThreadRunner()
    result = multi_thread_runner.run(run_rpc, thread, batch_size)
# limitations under the License.

from paddle_serving_client import Client
from paddle_serving_client.utils import MultiThreadRunner
import paddle
import numpy as np


def single_func(idx, resource):
    client = Client()
    client.load_client_config(
        "./uci_housing_client/serving_client_conf.prototxt")
    client.connect(["127.0.0.1:9293", "127.0.0.1:9292"])
    x = [
        0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584,
        0.6283, 0.4919, 0.1856, 0.0795, -0.0332
    ]
    x = np.array(x)
    for i in range(1000):
        fetch_map = client.predict(feed={"x": x}, fetch=["price"])
        if fetch_map is None:
            return [[None]]
    return [[0]]


multi_thread_runner = MultiThreadRunner()
thread_num = 4
result = multi_thread_runner.run(single_func, thread_num, {})
if None in result[0]:
    exit(1)
예제 #6
0

def single_func(idx, resource):
    if args.request == "rpc":
        client = Client()
        client.load_client_config(args.model)
        client.connect([args.endpoint])
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.uci_housing.train(), buf_size=500),
                                    batch_size=1)
        start = time.time()
        for data in train_reader():
            fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
        end = time.time()
        return [[end - start]]
    elif args.request == "http":
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.uci_housing.train(), buf_size=500),
                                    batch_size=1)
        start = time.time()
        for data in train_reader():
            r = requests.post('http://{}/uci/prediction'.format(args.endpoint),
                              data={"x": data[0]})
        end = time.time()
        return [[end - start]]


multi_thread_runner = MultiThreadRunner()
result = multi_thread_runner.run(single_func, args.thread, {})
print(result)
예제 #7
0
def multithread_http(thread, batch_size):
    multi_thread_runner = MultiThreadRunner()
    result = multi_thread_runner.run(run_http, thread, batch_size)
def multithread_http(args):
    multi_thread_runner = MultiThreadRunner()
    result = multi_thread_runner.run(do_client, args.threads, args)