Ejemplo n.º 1
0
 def __init__(self, gpu_id, model_path):
     self.logger = get_logger()
     self.graph = tf.Graph()
     self.sess = tf.Session(graph=self.graph)
     with self.graph.as_default():
         self.x = tf.ones(shape=(3, 3), dtype=tf.float32)
         self.y = tf.ones(shape=(3, 3), dtype=tf.float32)
Ejemplo n.º 2
0
 def __init__(self, port, host='0.0.0.0', timeout_ms=1000, debug=False, asynchronous=False):
     self.host = host
     self.port = port
     self.transport = None
     self.client = None
     self.asynchronous = asynchronous
     self.results = {}
     self.logger = get_logger(colored_worker_name('CLIENT'),
                              mode='debug' if debug else 'info')
Ejemplo n.º 3
0
from funicorn.client import ClientRPC
from funicorn.logger import get_logger
from funicorn.utils import colored_network_name
from concurrent.futures import ThreadPoolExecutor
import time
import numpy as np

logger = get_logger(colored_network_name('CLIENT'))


def sequential_test():
    print('=' * 100)
    N = 10000
    print('Sequential Requests with {}\n'.format(N))
    rpc = ClientRPC(port=8001)
    t = time.time()
    for i in range(N):
        rpc.ping()
    logger.info('Latency: {}'.format((time.time() - t) / N))


def parallel_test():
    print('=' * 100)
    num_clients = 30
    num_requests_per_client = 1000

    print(
        'Parallel Requests with num_clients: {} - num_requests_per_client: {}\n'
        .format(num_clients, num_requests_per_client))

    def ping(num_requests_per_client):
Ejemplo n.º 4
0
 def __init__(self, gpu_id, model_path):
     self.logger = get_logger()