Exemplo n.º 1
0
def test():
    
    pktgen = Pktgen(config.active_config)
    pktgen.low_level_start(pkt_count=20, pkt_size=1500, gap_ns=500000000, flow_count=20) # half a second gap
    util.verbose_sleep(10, 'Waiting for pktgen to finish')
    result = pktgen.stop_and_get_result()
    print result.__dict__
Exemplo n.º 2
0
def test():
    """ Sanity check. """
    
    proxy_client = StateProxyClient(FLEXI_CONTROLLER_HOST)
    proxy_client.reset()
    send_trigger_packet()
    
#    print '*' * 80
#    print 'ingress -> pkt-in'
#    print '*' * 80
#    
#    ingress = GenerateIngress(300, proxy_client)
#    pkt_in = ReceivePktIn(proxy_client)
#    
#    pkt_in.start()
#    ingress.start()
#    util.verbose_sleep(20, 'Ingress -> pkt_in...')
#    ingress.stop()
#    pkt_in.stop()
#    
#    print 'sent pps:', ingress.get_sent_pps()
#    print 'recvd pps:', pkt_in.get_received_pps()
#
#    print '*' * 80
#    print 'flow-mod -> rules'
#    print '*' * 80
#
#    flow_mod = GenerateFlowMod(200, proxy_client)
#    check_rule = CheckRuleInstallationRate(proxy_client, flow_stat_interval=10, steady_state_start=30, steady_state_end=60)
#    
#    check_rule.start()
#    flow_mod.start()
#    util.verbose_sleep(60, 'flow-mod -> rules')
#    flow_mod.stop()
#    check_rule.stop()
#
#    print 'sent pps:', flow_mod.get_sent_pps()
#    print 'recvd pps:', check_rule.get_received_pps()

    print '*' * 80
    print 'pkt-out -> egress'
    print '*' * 80

    pkt_out = GeneratePktOut(200, proxy_client, packet_size=1500)
    egress = ReceiveEgress(proxy_client)

    egress.start()
    pkt_out.start()
    util.verbose_sleep(5, 'pkt-out -> egress')
    pkt_out.stop()
    egress.stop()
    
    print 'sent pps:', pkt_out.get_sent_pps()
    print 'recvd pps:', egress.get_received_pps()
Exemplo n.º 3
0
    def __init__(self):
            
        # Publish the value of x onto the redis server.
        self.init_redis_server()

        # Remove previous temp data files.
        for client_data_file in os.listdir('.'):
            if client_data_file.endswith('.tmp'):
                os.remove(client_data_file)

        # Construct the new experiment state, thereby starting the experiment.
        experiment_state = ExperimentState()
        experiment_state.data_length = DATA_LENGTH
        experiment_state.gap_ms = EXPECTED_GAP_MS * REDIS_CLIENT_PROCESS_COUNT * REDIS_CLIENT_HOST_COUNT
        experiment_state.redis_server = REDIS_SERVER_IN_BAND
        experiment_state.uid = str(random.random())[2:6]
        with open(EXPERIMENT_STATE_FILE, 'w') as f:
            f.write(pickle.dumps(experiment_state))

        # Wait and stop.
        print 'Experiment State:', experiment_state.__dict__
        util.verbose_sleep(130, 'Collecting data...')
        os.remove(EXPERIMENT_STATE_FILE)
        
        # Waiting for all data files to be ready. The number of files to expect
        # is equal to the number of *.dummy files with the current experiment
        # UID.
        data_file_count = 0
        for filename in os.listdir('.'):
            if filename.startswith('dummy-' + experiment_state.uid):
                data_file_count += 1
                
        client_data_file_list = []
        while len(client_data_file_list) < data_file_count:
            print 'Waiting for all data files to be ready.',
            print 'Current:', len(client_data_file_list),
            print 'Expected total:', data_file_count
            time.sleep(5)
            client_data_file_list = []
            for filename in os.listdir('.'):
                if filename.startswith('data-' + experiment_state.uid):
                    client_data_file_list += [filename]
        
        # Join data.
        start_end_times = []
        for client_data_file in client_data_file_list:
            print 'Reading', client_data_file
            with open(client_data_file) as f:
                client_data_list = pickle.loads(f.read())
                start_end_times += [(start_time, end_time, client_data_file) \
                                    for (start_time, end_time) in client_data_list]
        
        self.save_data(start_end_times)
Exemplo n.º 4
0
def run(flow_mod_gap_ms):

    util.ping_test()
    
    switch = Switch(config.active_config)
    switch.reset_flow_table()
    
    control_client = ExpControlClient('mumu.ucsd.edu')
    control_client.execute('RESET')
    control_client.execute('SET auto_install_rules False')
    control_client.execute('SET manual_install_gap_ms %s' % flow_mod_gap_ms)
    control_client.execute('SET manual_install_active True')
    
    # Send a starter packet to trigger packet-in. Doesn't matter what port it
    # goes to, as long as it has the IP address of what would have been the
    # pktgen host.
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.sendto('Hi', (config.active_config.source_ip, 12345))
    
    # Let the whole system run for a while. We cannot check for TCAM status;
    # that'd sometimes take forever when the switch is busily processing flow-
    # mods, and it'd probably affect the flow-mods. We check the TCAM
    # afterwards.
    util.verbose_sleep(60)
    control_client.execute('SET manual_install_active False')
    
    # Gather stats about flow-mod. The flow-mod gap may not be accurate.
    flow_mod_count = control_client.execute('GET flow_mod_count')
    flow_mod_start_time = control_client.execute('GET flow_mod_start_time')
    flow_mod_end_time = control_client.execute('GET flow_mod_end_time')
    flow_mod_rate = flow_mod_count / (flow_mod_end_time - flow_mod_start_time)
    
    # Wait for the switch to stabilize before asking it for stats.
    util.verbose_sleep(80)
    print 'Dumping table...'
    
    # Parse flow tables. Search up to the last point of a TCAM-write, which
    # signifies a full TCAM. Up to that point, we count the total number of
    # rules added.
    rule_list = switch.dump_tables(filter_str='')
    tcam_rule_count = 0
    total_rule_count = 0
    print 'Parsing', len(rule_list), 'rules...'
    for rule in rule_list:
        total_rule_count += 1
        if 'table_id=0' in rule:
            tcam_rule_count += 1
        if tcam_rule_count == 1500:
            break
    
    control_client.execute('RESET')
    switch.reset_flow_table()
    util.verbose_sleep(5)
    
    return (tcam_rule_count, total_rule_count, flow_mod_rate)
Exemplo n.º 5
0
def run(packet_size=1500):
    
    # Writer initial header to file.
    #result_file = './data/hp_sensitivity_%d_byte.csv' % packet_size # HP
    #result_file = './data/evs_verify_sensitivity_%d_byte.csv' % packet_size # EVS
    result_file = './data/monaco_sensitivity_%d_byte.csv' % packet_size # MONACO
    with open(result_file, 'w') as f:
        print >> f, 'ingress_pps,flow_mod_pps,pkt_out_pps,pkt_in_pps,rule_pps,egress_pps,expected_ingress,expected_flow_mod,expected_pkt_out'
    
    #input_list = [10, 100, 400, 700, 1000]  # HP
    #input_list = [10, 100, 1000]  # OVS
    
    #input_list = [10, 100, 400, 700, 1000]
    input_list = [10,30,100,320,330]
    
    for ingress_pps in input_list:
        for flow_mod_pps in input_list:
            for pkt_out_pps in input_list:

                # Ignore certain params. TODO: Debug.
#                if packet_size == 1500:
#                    if _param_hash(ingress_pps, flow_mod_pps, pkt_out_pps) <= \
#                        _param_hash(400, 1000, 10):
#                        continue


                for attempt in range(3):
                    try:
        
                        start_controller()
                        
                        while True:
                            try:
                                proxy_client = StateProxyClient(FLEXI_CONTROLLER_HOST)
                                proxy_client.hello()
                                break
                            except:
                                print 'Waiting for controller...'
                                time.sleep(2)
                        
                        proxy_client.reset()
                        print proxy_client.getall()
                        send_trigger_packet()
        
                        # Confirm trigger.
                        while not proxy_client.run('trigger_event_is_ready'):
                            print proxy_client.getall()
                            print 'Waiting for trigger...'
                            time.sleep(2)
        
                        # Set up the pkt generators    
                        ingress = GenerateIngress(ingress_pps, proxy_client, packet_size=packet_size)    
                        flow_mod = GenerateFlowMod(flow_mod_pps, proxy_client)    
                        pkt_out = GeneratePktOut(pkt_out_pps, proxy_client, packet_size=packet_size)
        
                        # Set up pkt receivers.
                        pkt_in = ReceivePktIn(proxy_client)
                        check_rule = CheckRuleInstallationRate(proxy_client)
                        egress = ReceiveEgress(proxy_client)
                        
                        print proxy_client.getall()
                        
                        # Start receiving and sending.
                        for obj in [pkt_in, check_rule, egress, ingress, flow_mod, pkt_out]:
                            obj.start()
        
                        # Wait.
                        prompt = '(ingress_pps, flow_mod_pps, pkt_out_pps) = '
                        prompt += str((ingress_pps, flow_mod_pps, pkt_out_pps))
                        util.verbose_sleep(MAX_RUNNING_TIME, prompt)
                            
                        # Stop sending and receiving.
                        for obj in [ingress, flow_mod, pkt_out, pkt_in, check_rule, egress]:
                            obj.stop()
                            
                        # Gather data.
                        data_list = [ingress.get_sent_pps(),
                                     flow_mod.get_sent_pps(),
                                     pkt_out.get_sent_pps(),
                                     pkt_in.get_received_pps(),
                                     check_rule.get_received_pps(),
                                     egress.get_received_pps(),
                                     ingress_pps,
                                     flow_mod_pps, 
                                     pkt_out_pps]
                        
                        # Write csv data.
                        data = ','.join(['%.4f' % pps for pps in data_list])  
                        with open(result_file, 'a') as f:
                            print >> f, data
                            
                        print '*' * 80
                        print data
                        print '*' * 80
                        
                        proxy_client.exit()                                                
                        util.verbose_sleep(5, 'Waiting for the next experiment...')
                        break
                
                    except:
                        
                        proxy_client.exit()
                        
                        if attempt == 2:
                            raise
                        else:
                            with open('./data/CRASH.log', 'a') as crash_f:
                                print >> crash_f, traceback.format_exc()