Beispiel #1
0
def new_exact_match_rules(wait_and_verify=True, reset_flow_table=True,
                             rule_count=CLIENT_COUNT, 
                             flow_table_filter=FLOW_TABLE_FILTER,
                             client_base_port=CLIENT_BASE_PORT):
        
    conf = config.active_config
    switch = Switch(conf)    
    if reset_flow_table: switch.reset_flow_table()
    
    # From client to redis server.
    new_tcp_rule1 = lambda client_id: \
                    'cookie=0,idle_timeout=0,hard_timeout=0,tcp,nw_tos=0x00,' + \
                    'dl_vlan=0xffff,dl_vlan_pcp=0x00,dl_src=' + \
                    conf.dest_mac + ',dl_dst=' + conf.source_mac + ',nw_src=' + \
                    conf.dest_ip + ',nw_dst=' + conf.source_ip + \
                    ',tp_src=' + str(client_id + client_base_port) + \
                    ',tp_dst=' + str(REDIS_PORT) + \
                    ',actions=output:' + conf.source_of_port
                    
    # From server back to client.
    new_tcp_rule2 = lambda client_id: \
                    'cookie=0,idle_timeout=0,hard_timeout=0,tcp,nw_tos=0x00,' + \
                    'dl_vlan=0xffff,dl_vlan_pcp=0x00,dl_src=' + \
                    conf.source_mac + ',dl_dst=' + conf.dest_mac + ',nw_src=' + \
                    conf.source_ip + ',nw_dst=' + conf.dest_ip + \
                    ',tp_dst=' + str(client_id + client_base_port) + \
                    ',tp_src=' + str(REDIS_PORT) + \
                    ',actions=output:' + conf.dest_of_port

    initial_rule_count = len(switch.dump_tables(filter_str=flow_table_filter))

    for client_id in range(rule_count):
        
        # Add the rules first.
        for rule_f in [new_tcp_rule1, new_tcp_rule2]:
            proc = util.run_ssh(conf.add_rule_cmd(rule_f(client_id)), 
                                hostname=conf.ofctl_ip, verbose=True, 
                                stdout=subprocess.PIPE)
            if wait_and_verify or (client_id % 5 == 0): 
                proc.wait()
        
        # Then verify if the correct number of rules have been added.
        if wait_and_verify and (client_id % 5 == 0 or client_id + 1 == rule_count):
            current_rule_count = len(switch.dump_tables(filter_str=flow_table_filter))
            try:
                assert current_rule_count - initial_rule_count == (client_id + 1) * 2
            except:
                print current_rule_count, initial_rule_count, client_id
                raise
def run(flow_mod_gap_ms):

    util.ping_test()
    
    switch = Switch(config.active_config)
    switch.reset_flow_table()
    
    control_client = ExpControlClient('mumu.ucsd.edu')
    control_client.execute('RESET')
    control_client.execute('SET auto_install_rules False')
    control_client.execute('SET manual_install_gap_ms %s' % flow_mod_gap_ms)
    control_client.execute('SET manual_install_active True')
    
    # Send a starter packet to trigger packet-in. Doesn't matter what port it
    # goes to, as long as it has the IP address of what would have been the
    # pktgen host.
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.sendto('Hi', (config.active_config.source_ip, 12345))
    
    # Let the whole system run for a while. We cannot check for TCAM status;
    # that'd sometimes take forever when the switch is busily processing flow-
    # mods, and it'd probably affect the flow-mods. We check the TCAM
    # afterwards.
    util.verbose_sleep(60)
    control_client.execute('SET manual_install_active False')
    
    # Gather stats about flow-mod. The flow-mod gap may not be accurate.
    flow_mod_count = control_client.execute('GET flow_mod_count')
    flow_mod_start_time = control_client.execute('GET flow_mod_start_time')
    flow_mod_end_time = control_client.execute('GET flow_mod_end_time')
    flow_mod_rate = flow_mod_count / (flow_mod_end_time - flow_mod_start_time)
    
    # Wait for the switch to stabilize before asking it for stats.
    util.verbose_sleep(80)
    print 'Dumping table...'
    
    # Parse flow tables. Search up to the last point of a TCAM-write, which
    # signifies a full TCAM. Up to that point, we count the total number of
    # rules added.
    rule_list = switch.dump_tables(filter_str='')
    tcam_rule_count = 0
    total_rule_count = 0
    print 'Parsing', len(rule_list), 'rules...'
    for rule in rule_list:
        total_rule_count += 1
        if 'table_id=0' in rule:
            tcam_rule_count += 1
        if tcam_rule_count == 1500:
            break
    
    control_client.execute('RESET')
    switch.reset_flow_table()
    util.verbose_sleep(5)
    
    return (tcam_rule_count, total_rule_count, flow_mod_rate)
Beispiel #3
0
def new_software_table_rules(rule_count=CLIENT_COUNT, 
                                 client_base_port=CLIENT_BASE_PORT):
    
    conf = config.active_config
    switch = Switch(conf)    
    
    # Fill up TCAM
    try:
        new_exact_match_rules(rule_count=1510, client_base_port=0)
    except AssertionError:
        if len(switch.dump_tables(filter_str=FLOW_TABLE_FILTER)) < 1500:
            raise

    # Any new rules will go into the software table.
    new_exact_match_rules(wait_and_verify=False, reset_flow_table=False, 
                          rule_count=CLIENT_COUNT,  
                          client_base_port=client_base_port)
def start_processes(process_count, worker_thread_per_process, 
                      client_count, gap_ms, data_length, redis_host):

    switch = Switch(config.active_config)
    data_length = int(data_length)
    total_workers = process_count * worker_thread_per_process
    redis_set(data_length)
        
    worker_status_queue = Queue(maxsize=total_workers)
    client_id_queue = Queue(maxsize=client_count)
    result_queue = Queue(maxsize=client_count)

    # Starts the worker processes that spawn individual worker threads.
    
    for _ in range(process_count):
        p = Process(target=RedisClientProcess,
                    args=(worker_thread_per_process, data_length, redis_host,
                          worker_status_queue, client_id_queue, result_queue))
        p.daemon = True
        p.start()

    # Wait for all worker threads to start.
        
    while True:
        started_count = worker_status_queue.qsize()
        if started_count < total_workers:
            print total_workers - started_count, 'workers yet to start.'
            time.sleep(1)
        else:
            break    
        
    # Send requests in a different thread.

    util.ping_test(dest_host=redis_host, how_many_pings=2)
        
    def requests():
        for client_id in range(client_count):
            client_id_queue.put(client_id)
            time.sleep(gap_ms / 1000.0)
    t = threading.Thread(target=requests)
    t.daemon = True
    t.start()
        
    # Monitor the changes for the first minute.

    base_time = time.time()
    
    while True:    
        current_count = result_queue.qsize()
        remaining_count = client_count - current_count 
        print 'Current:', current_count, 'Remaining:', remaining_count
        if remaining_count > 0 and time.time() - base_time < 120:
            try:
                time.sleep(10)
            except KeyboardInterrupt:
                break            
            if redis_host == REDIS_HOST_OF:
                rule_list = switch.dump_tables(filter_str='')
                print 't =', time.time() - base_time, 
                print '; tcam_size =', len([rule for rule in rule_list if 'table_id=0' in rule]), 
                print '; table_1_size =', len([rule for rule in rule_list if 'table_id=1' in rule]),
                print '; table_2_size =', len([rule for rule in rule_list if 'table_id=2' in rule]),
                print '; total_size =', len([rule for rule in rule_list if 'cookie' in rule])
        else:
            break
        
    # Extract the result into local lists. All time values are expressed in ms.
    # We're only interested in results between 30-60 seconds.
        
    print 'Analyzing the result...'
    start_time_list = []
    completion_time_list = []
    while not result_queue.empty():
        (_, start_time, end_time) = result_queue.get()
        if start_time - base_time >= 60:
            start_time_list.append(start_time * 1000.0)
            if end_time is None:
                completion_time = -100.0 # Not to be plotted.
            else:
                completion_time = (end_time - start_time) * 1000.0
            completion_time_list.append(completion_time)
        
    # Calculate the actual request gap.
    
    start_time_list.sort()
    gap_list = []
    for index in range(0, len(start_time_list) - 1):
        gap_list.append(start_time_list[index + 1] - start_time_list[index])
    print 'Client gap: (mean, stdev) =', util.get_mean_and_stdev(gap_list)
    
    # Calculate the CDF of completion times.
    
    cdf_list = util.make_cdf(completion_time_list)
    with open('data/realistic_redis_completion_times.txt', 'w') as f:
        for (x, y) in zip(completion_time_list, cdf_list):
            print >> f, x, y