Exemple #1
0
def client():
    
    util.ping_test()

    client_list = []
    pool = ThreadPool(150) 
    
    for i in range(700):
        print 'Starting client', i
        client = EchoClient('10.66.10.1', 12345, 64*1000)        
        pool.add_task(client.start)
        client_list.append(client)
    
    pool.wait_completion()

    delay_list = []
    for client in client_list:
        if client.running_time is None:
            delay_ms = 0
        else:
            delay_ms = client.running_time * 1000.0
        delay_list.append(delay_ms)
    
    cdf_list = util.make_cdf(delay_list)
    
    with open('data/microflow_delay.txt', 'w') as f:
        for (x, y) in zip(delay_list, cdf_list):
            print >> f, x, y
def run(redis_host, delay_ms):
        
    util.ping_test(dest_host=redis_host)
        
    delay_queue = Queue()
    proc_list = []
    
    for client_id in range(CLIENT_COUNT):
        print 'Starting client', client_id
        p = Process(target=redis_client_process, 
                    args=(client_id, redis_host, delay_queue))
        p.daemon = True
        p.start()
        proc_list.append(p)
        time.sleep(delay_ms / 1000.0)
        
    counter = 0    
    for p in proc_list:
        p.join()
        counter += 1
        print CLIENT_COUNT - counter, 'left.'

    delay_list = []
    while not delay_queue.empty():
        (_, delay) = delay_queue.get()
        if delay is None:
            delay_ms = 0
        else:
            delay_ms = delay * 1000.0
        delay_list.append(delay_ms)
    
    cdf_list = util.make_cdf(delay_list)    
    with open('data/redis_delay.txt', 'w') as f:
        for (x, y) in zip(delay_list, cdf_list):
            print >> f, x, y
def start_processes(process_count, worker_thread_per_process, 
                      client_count, gap_ms, data_length, redis_host):

    switch = Switch(config.active_config)
    data_length = int(data_length)
    total_workers = process_count * worker_thread_per_process
    redis_set(data_length)
        
    worker_status_queue = Queue(maxsize=total_workers)
    client_id_queue = Queue(maxsize=client_count)
    result_queue = Queue(maxsize=client_count)

    # Starts the worker processes that spawn individual worker threads.
    
    for _ in range(process_count):
        p = Process(target=RedisClientProcess,
                    args=(worker_thread_per_process, data_length, redis_host,
                          worker_status_queue, client_id_queue, result_queue))
        p.daemon = True
        p.start()

    # Wait for all worker threads to start.
        
    while True:
        started_count = worker_status_queue.qsize()
        if started_count < total_workers:
            print total_workers - started_count, 'workers yet to start.'
            time.sleep(1)
        else:
            break    
        
    # Send requests in a different thread.

    util.ping_test(dest_host=redis_host, how_many_pings=2)
        
    def requests():
        for client_id in range(client_count):
            client_id_queue.put(client_id)
            time.sleep(gap_ms / 1000.0)
    t = threading.Thread(target=requests)
    t.daemon = True
    t.start()
        
    # Monitor the changes for the first minute.

    base_time = time.time()
    
    while True:    
        current_count = result_queue.qsize()
        remaining_count = client_count - current_count 
        print 'Current:', current_count, 'Remaining:', remaining_count
        if remaining_count > 0 and time.time() - base_time < 120:
            try:
                time.sleep(10)
            except KeyboardInterrupt:
                break            
            if redis_host == REDIS_HOST_OF:
                rule_list = switch.dump_tables(filter_str='')
                print 't =', time.time() - base_time, 
                print '; tcam_size =', len([rule for rule in rule_list if 'table_id=0' in rule]), 
                print '; table_1_size =', len([rule for rule in rule_list if 'table_id=1' in rule]),
                print '; table_2_size =', len([rule for rule in rule_list if 'table_id=2' in rule]),
                print '; total_size =', len([rule for rule in rule_list if 'cookie' in rule])
        else:
            break
        
    # Extract the result into local lists. All time values are expressed in ms.
    # We're only interested in results between 30-60 seconds.
        
    print 'Analyzing the result...'
    start_time_list = []
    completion_time_list = []
    while not result_queue.empty():
        (_, start_time, end_time) = result_queue.get()
        if start_time - base_time >= 60:
            start_time_list.append(start_time * 1000.0)
            if end_time is None:
                completion_time = -100.0 # Not to be plotted.
            else:
                completion_time = (end_time - start_time) * 1000.0
            completion_time_list.append(completion_time)
        
    # Calculate the actual request gap.
    
    start_time_list.sort()
    gap_list = []
    for index in range(0, len(start_time_list) - 1):
        gap_list.append(start_time_list[index + 1] - start_time_list[index])
    print 'Client gap: (mean, stdev) =', util.get_mean_and_stdev(gap_list)
    
    # Calculate the CDF of completion times.
    
    cdf_list = util.make_cdf(completion_time_list)
    with open('data/realistic_redis_completion_times.txt', 'w') as f:
        for (x, y) in zip(completion_time_list, cdf_list):
            print >> f, x, y