def client(): util.ping_test() client_list = [] pool = ThreadPool(150) for i in range(700): print 'Starting client', i client = EchoClient('10.66.10.1', 12345, 64*1000) pool.add_task(client.start) client_list.append(client) pool.wait_completion() delay_list = [] for client in client_list: if client.running_time is None: delay_ms = 0 else: delay_ms = client.running_time * 1000.0 delay_list.append(delay_ms) cdf_list = util.make_cdf(delay_list) with open('data/microflow_delay.txt', 'w') as f: for (x, y) in zip(delay_list, cdf_list): print >> f, x, y
def run(redis_host, delay_ms): util.ping_test(dest_host=redis_host) delay_queue = Queue() proc_list = [] for client_id in range(CLIENT_COUNT): print 'Starting client', client_id p = Process(target=redis_client_process, args=(client_id, redis_host, delay_queue)) p.daemon = True p.start() proc_list.append(p) time.sleep(delay_ms / 1000.0) counter = 0 for p in proc_list: p.join() counter += 1 print CLIENT_COUNT - counter, 'left.' delay_list = [] while not delay_queue.empty(): (_, delay) = delay_queue.get() if delay is None: delay_ms = 0 else: delay_ms = delay * 1000.0 delay_list.append(delay_ms) cdf_list = util.make_cdf(delay_list) with open('data/redis_delay.txt', 'w') as f: for (x, y) in zip(delay_list, cdf_list): print >> f, x, y
def run(pkt_size, gap_ms): util.ping_test() switch = Switch(config.active_config) switch.reset_flow_table() control_client = ExpControlClient('mumu.ucsd.edu') control_client.execute('RESET') control_client.execute('SET learning False') pktgen = Pktgen(config.active_config) pktgen.low_level_start(pkt_count=50000, flow_count=50000, pkt_size=pkt_size, gap_ns=1000000*gap_ms) try: time.sleep(20) except KeyboardInterrupt: pktgen.stop_and_get_result() sys.exit(1) pktgen_result = pktgen.stop_and_get_result() pkt_in_count = control_client.execute('GET pkt_in_count') pktgen_rate = pktgen_result.sent_pkt_count / pktgen_result.running_time pkt_in_rate = pkt_in_count / pktgen_result.running_time control_client.execute('RESET') return (pktgen_rate, pkt_in_rate)
def send_trigger_packet(): """ Sends a reference packet to the controller, from which flow-mods or pkt-outs can be constructed. Blocks until the packet is sent. """ util.ping_test(how_many_pings=4, dest_host=config.active_config.source_ip) util.run_ssh('iperf -u -c ', config.active_config.dest_ip, ' -p ', TRIGGER_PORT, ' -t 1 -l 12', hostname=config.active_config.source_ip).wait()
def run(flow_mod_gap_ms): util.ping_test() switch = Switch(config.active_config) switch.reset_flow_table() control_client = ExpControlClient('mumu.ucsd.edu') control_client.execute('RESET') control_client.execute('SET auto_install_rules False') control_client.execute('SET manual_install_gap_ms %s' % flow_mod_gap_ms) control_client.execute('SET manual_install_active True') # Send a starter packet to trigger packet-in. Doesn't matter what port it # goes to, as long as it has the IP address of what would have been the # pktgen host. sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto('Hi', (config.active_config.source_ip, 12345)) # Let the whole system run for a while. We cannot check for TCAM status; # that'd sometimes take forever when the switch is busily processing flow- # mods, and it'd probably affect the flow-mods. We check the TCAM # afterwards. util.verbose_sleep(60) control_client.execute('SET manual_install_active False') # Gather stats about flow-mod. The flow-mod gap may not be accurate. flow_mod_count = control_client.execute('GET flow_mod_count') flow_mod_start_time = control_client.execute('GET flow_mod_start_time') flow_mod_end_time = control_client.execute('GET flow_mod_end_time') flow_mod_rate = flow_mod_count / (flow_mod_end_time - flow_mod_start_time) # Wait for the switch to stabilize before asking it for stats. util.verbose_sleep(80) print 'Dumping table...' # Parse flow tables. Search up to the last point of a TCAM-write, which # signifies a full TCAM. Up to that point, we count the total number of # rules added. rule_list = switch.dump_tables(filter_str='') tcam_rule_count = 0 total_rule_count = 0 print 'Parsing', len(rule_list), 'rules...' for rule in rule_list: total_rule_count += 1 if 'table_id=0' in rule: tcam_rule_count += 1 if tcam_rule_count == 1500: break control_client.execute('RESET') switch.reset_flow_table() util.verbose_sleep(5) return (tcam_rule_count, total_rule_count, flow_mod_rate)
def start_processes(process_count, worker_thread_per_process, client_count, gap_ms, data_length, redis_host): switch = Switch(config.active_config) data_length = int(data_length) total_workers = process_count * worker_thread_per_process redis_set(data_length) worker_status_queue = Queue(maxsize=total_workers) client_id_queue = Queue(maxsize=client_count) result_queue = Queue(maxsize=client_count) # Starts the worker processes that spawn individual worker threads. for _ in range(process_count): p = Process(target=RedisClientProcess, args=(worker_thread_per_process, data_length, redis_host, worker_status_queue, client_id_queue, result_queue)) p.daemon = True p.start() # Wait for all worker threads to start. while True: started_count = worker_status_queue.qsize() if started_count < total_workers: print total_workers - started_count, 'workers yet to start.' time.sleep(1) else: break # Send requests in a different thread. util.ping_test(dest_host=redis_host, how_many_pings=2) def requests(): for client_id in range(client_count): client_id_queue.put(client_id) time.sleep(gap_ms / 1000.0) t = threading.Thread(target=requests) t.daemon = True t.start() # Monitor the changes for the first minute. base_time = time.time() while True: current_count = result_queue.qsize() remaining_count = client_count - current_count print 'Current:', current_count, 'Remaining:', remaining_count if remaining_count > 0 and time.time() - base_time < 120: try: time.sleep(10) except KeyboardInterrupt: break if redis_host == REDIS_HOST_OF: rule_list = switch.dump_tables(filter_str='') print 't =', time.time() - base_time, print '; tcam_size =', len([rule for rule in rule_list if 'table_id=0' in rule]), print '; table_1_size =', len([rule for rule in rule_list if 'table_id=1' in rule]), print '; table_2_size =', len([rule for rule in rule_list if 'table_id=2' in rule]), print '; total_size =', len([rule for rule in rule_list if 'cookie' in rule]) else: break # Extract the result into local lists. All time values are expressed in ms. # We're only interested in results between 30-60 seconds. print 'Analyzing the result...' start_time_list = [] completion_time_list = [] while not result_queue.empty(): (_, start_time, end_time) = result_queue.get() if start_time - base_time >= 60: start_time_list.append(start_time * 1000.0) if end_time is None: completion_time = -100.0 # Not to be plotted. else: completion_time = (end_time - start_time) * 1000.0 completion_time_list.append(completion_time) # Calculate the actual request gap. start_time_list.sort() gap_list = [] for index in range(0, len(start_time_list) - 1): gap_list.append(start_time_list[index + 1] - start_time_list[index]) print 'Client gap: (mean, stdev) =', util.get_mean_and_stdev(gap_list) # Calculate the CDF of completion times. cdf_list = util.make_cdf(completion_time_list) with open('data/realistic_redis_completion_times.txt', 'w') as f: for (x, y) in zip(completion_time_list, cdf_list): print >> f, x, y
def run(packet_per_second=100, pkt_size=1500, run_time=220): """ Returns (pktgen_pps, pkt_in_pps, flow_mod_pps, flow_mod_pps_stdev, pkt_out_pps), where pps_in is the actual number of packets/sec of pktgen, and flow_mod_pps and flow_mod_pps_stdev are the mean and stdev pps of successful flow installations at steady state. """ util.ping_test() switch = Switch(config.active_config) switch.reset_flow_table() # Initialize the experimental controller so that POX would have the # necessary settings. control_client = ExpControlClient('mumu.ucsd.edu') control_client.execute(['RESET']) control_client.execute(['SET', 'flow_stat_interval', 20]) control_client.execute(['SET', 'install_bogus_rules', True]) control_client.execute(['SET', 'emulate_hp_switch', True]) # Start capturing packets. tcpdump = Tcpdump(config.active_config) tcpdump.start() tcpdump_start_time = time.time() # Start firing packets. pktgen = Pktgen(config.active_config) gap = 1.0 / packet_per_second pkt_count = int(run_time * packet_per_second) pktgen.low_level_start(pkt_count=pkt_count, pkt_size=pkt_size, gap_ns=gap*1000*1000*1000, flow_count=1) pktgen_start_time = time.time() flow_mod_pps_list = [] # How fast were rules successfully written into the hardware table? We take # statistics at steady state. Also display flow statistics once in a while. last_stat_time = [0] def callback(t_left): flow_stat_dict = control_client.execute(['GET', 'flow_count_dict']) for stat_time in sorted(flow_stat_dict.keys()): if stat_time > last_stat_time[0]: last_stat_time[0] = stat_time flow_count = flow_stat_dict[stat_time] print t_left, 'seconds left, with flows', flow_count if pktgen_start_time + 60 <= time.time() <= pktgen_start_time + 180: flow_mod_pps_list.append(flow_count / 10.0) # Check the stat every 20 seconds. util.callback_sleep(run_time, callback, interval=20) # How fast were packets actually generated? pktgen_result = pktgen.stop_and_get_result() pktgen_pps = pktgen_result.sent_pkt_count / pktgen_result.running_time # How fast were pkt_out events? tcpdump_end_time = time.time() tcpdump_result = tcpdump.stop_and_get_result() pkt_out_pps = (tcpdump_result.dropped_pkt_count + tcpdump_result.recvd_pkt_count) / \ (tcpdump_end_time - tcpdump_start_time) # Calculate the mean and stdev of successful flow_mod pps. (flow_mod_pps, flow_mod_pps_stdev) = util.get_mean_and_stdev(flow_mod_pps_list) # How fast were pkt_in events arriving? pkt_in_count = control_client.execute(['GET', 'pkt_in_count']) pkt_in_start_time = control_client.execute(['GET', 'pkt_in_start_time']) pkt_in_end_time = control_client.execute(['GET', 'pkt_in_end_time']) pkt_in_pps = pkt_in_count / (pkt_in_end_time - pkt_in_start_time) return (pktgen_pps, pkt_in_pps, flow_mod_pps, flow_mod_pps_stdev, pkt_out_pps)