def main(): global IP_TOT_LEN parser = argparse.ArgumentParser( description= "On/Off traffic of a deterministic and stateless stream profile.") parser.add_argument( "--ip_src", type=str, default="192.168.17.1", help="Source IP address for all packets in the stream.", ) parser.add_argument( "--ip_dst", type=str, default="192.168.17.2", help="Destination IP address for all packets in the stream.", ) parser.add_argument( "--max_bit_rate", type=float, default=1, help="Maximal bit rate (with the unit Gbps) of the underlying network.", ) parser.add_argument("--on_time", type=int, default=2, help="ON time in seconds.") parser.add_argument( "--init_off_on_ratio", type=float, default=0.5, help="Initial ratio between OFF and ON time.", ) parser.add_argument( "--iteration", type=int, default=1, help="Number of iterations for the ON state of each PPS.", ) parser.add_argument( "--numa_node", type=int, default=0, help="The NUMA node of cores used for TX and RX.", ) parser.add_argument("--test", action="store_true", help="Just used for debug.") parser.add_argument( "--out", type=str, default="", help= "The name of the output file, stored in /home/malte/malte/latency if given", ) parser.add_argument( "--ip_tot_len", type=int, default=IP_TOT_LEN, help="The IP total length of packets to be transmitted.", ) parser.add_argument( "--enable_second_flow", action="store_true", help="Enable the second flow, used to test two-vnf setup.", ) parser.add_argument( "--soft", action="store_true", help="Different overlap for sencond flow, it's easier to scale ;)", ) args = parser.parse_args() IP_TOT_LEN = args.ip_tot_len stream_params = create_stream_params( args.max_bit_rate, args.on_time, args.init_off_on_ratio, args.iteration, args.test, ) print("\n--- Initial stream parameters:") pprint.pp(stream_params) print() if args.enable_second_flow: print( "INFO: The second flow is enabled. Two flows share the physical link." ) # Simply reverse the link utilizations second_stream_params = copy.deepcopy(list(reversed(stream_params))) # Change ISG's of 2nd flow to 2s for s in second_stream_params: s["on_time"] = args.on_time - (args.on_time * 0.2) s["isg"] = s["isg"] + (args.on_time * 0.2) * 10**6 # Reset ISG of first stream of the 2nd flow back to 1 # -> they always start together then if args.soft: second_stream_params[0]["isg"] = 1 * 10**6 print("\n--- Updated stream parameters with the second flow:") pprint.pp(second_stream_params) # Does not work on the blackbox # core_mask = get_core_mask(args.numa_node) # print(f"The core mask for RX and TX: {hex(core_mask)}") if args.enable_second_flow: streams = create_streams_with_second_flow(stream_params, second_stream_params, args.ip_src, args.ip_dst) else: streams = create_streams(stream_params, args.ip_src, args.ip_dst) second_stream_params = None if args.test: pprint.pp([s.to_json() for s in streams]) import sys sys.exit(0) if args.enable_second_flow: RX_DELAY_S = (sum([s["on_time"] for s in stream_params])) / 2.0 + 3 else: RX_DELAY_S = sum([s["on_time"] for s in stream_params]) + 3 RX_DELAY_MS = 3 * 1000 # Time after last Tx to wait for the last packet at Rx side try: client = STLClient() client.connect() tx_port, rx_port = init_ports(client) client.add_streams(streams, ports=[tx_port]) # Start TX start_ts = time.time() client.clear_stats() # All cores in the core_mask is used by the tx_port and its adjacent # port, so it is the rx_port normally. # client.start(ports=[tx_port], core_mask=[core_mask], force=True) client.start(ports=[tx_port], force=True) print(f"The estimated RX delay: {RX_DELAY_MS / 1000} seconds.") client.wait_on_traffic(rx_delay_ms=RX_DELAY_MS) end_ts = time.time() test_dur = end_ts - start_ts print(f"Total test duration: {test_dur} seconds") # Check RX stats. # MARK: All latency results are in usec. err_cntrs_results, latency_results = get_rx_stats( client, tx_port, rx_port, stream_params, second_stream_params=second_stream_params, ) print("--- The latency results of all streams:") print(f"- Number of streams first flow: {len(latency_results[0])}") for index, _ in enumerate(stream_params): print(f"- Stream: {index}") # Add timestamps to .json dump to parse turbostat results later err_cntrs_results[0][index]["start_ts"] = start_ts err_cntrs_results[0][index]["end_ts"] = end_ts print(err_cntrs_results[0][index]) print(latency_results[0][index]) # Save stats as .json dump if args.out: savedir_latency = "/home/malte/malte/latency/flow1/" savedir_error = "/home/malte/malte/error/flow1/" if not os.path.exists(savedir_latency): os.mkdir(savedir_latency) if not os.path.exists(savedir_error): os.mkdir(savedir_error) savedir_latency += args.out + "_latency.json" savedir_error += args.out + "_error.json" print("\nResults: ", savedir_latency, ", ", savedir_error) save_rx_stats(err_cntrs_results[0], savedir_error, stream_params) save_rx_stats(latency_results[0], savedir_latency, stream_params) if second_stream_params is not None: print( f"\n\n- Number of streams second flow: {len(latency_results[1])}" ) for index, _ in enumerate(stream_params): print(f"- Stream: {index}") err_cntrs_results[1][index]["start_ts"] = start_ts err_cntrs_results[1][index]["end_ts"] = end_ts print(err_cntrs_results[1][index]) print(latency_results[1][index]) if args.out: savedir_latency = "/home/malte/malte/latency/flow2/" savedir_error = "/home/malte/malte/error/flow2/" if not os.path.exists(savedir_latency): os.mkdir(savedir_latency) if not os.path.exists(savedir_error): os.mkdir(savedir_error) savedir_latency += args.out + "_latency.json" savedir_error += args.out + "_error.json" print("\nResults: ", savedir_latency, ", ", savedir_error) save_rx_stats(err_cntrs_results[1], savedir_error, stream_params) save_rx_stats(latency_results[1], savedir_latency, stream_params) except STLError as error: print(error) finally: client.disconnect()
def main(): parser = argparse.ArgumentParser(description="") parser.add_argument( "--ip_src", type=str, default="192.168.17.1", help="Source IP address for all packets in the stream.", ) parser.add_argument( "--ip_dst", type=str, default="192.168.17.2", help="Destination IP address for all packets in the stream.", ) # Due to different packet sizes, it is easier to keep the PPS fixed. # 0.25 Mpps -> about 3Gbps bit rate. parser.add_argument( "--pps", type=float, default=0.25, help="Transmit L1 rate in Mpps." ) # This is "configured" to give some space for power management ;) parser.add_argument( "--tot_pkts_burst", type=int, default=50 * 10 ** 3, help="Total number of packets in each single burst.", ) parser.add_argument( "--model", type=str, default="poisson", choices=["poisson", "pareto"], help="To be used traffic model.", ) # MARK: Currently NOT implemented. parser.add_argument( "--src_num", type=int, default=1, help="Number of flow sources." ) parser.add_argument( "--burst_num", type=int, default=100, help="The number of bursts in one test round.", ) parser.add_argument("--test", action="store_true", help="Just used for debug.") parser.add_argument( "--out", type=str, default="", help="Stores file with given name" ) args = parser.parse_args() print(f"* The fastest reaction time of X-MEN: {X_MEN_REACTION_TIME} seconds.") print(f"* Traffic model: {args.model}") l3_data = {"ip_src": args.ip_src, "ip_dst": args.ip_dst} streams, flow_duration = get_streams( args.pps, args.burst_num, args.model, args.src_num, args.tot_pkts_burst, l3_data, args.test, ) if args.test: pprint.pp([s.to_json() for s in streams[:3]]) sys.exit(0) print(f"* Flow duration: {flow_duration} seconds.") try: client = STLClient() client.connect() tx_port, rx_port = init_ports(client) client.add_streams(streams, ports=[tx_port]) start_ts = time.time() client.clear_stats() client.start(ports=[tx_port], force=True) rx_delay_sec = flow_duration + 5 print(f"The estimated RX delay: {rx_delay_sec} seconds.") client.wait_on_traffic(rx_delay_ms=3000) # rx_delay_sec * 10 ** 3) end_ts = time.time() test_dur = end_ts - start_ts print(f"Total test duration: {test_dur} seconds") err_cntrs_results, latency_results = get_rx_stats( client, tx_port, rx_port, args.burst_num ) print("--- The latency results of all streams:") for m_burst in range(args.burst_num): # Include ISG duratio, packet size and time stamps into .json dump err_cntrs_results[m_burst]["isg"] = ISGS_SAVE[m_burst] err_cntrs_results[m_burst]["len"] = IP_TOT_LENS_SAVE[m_burst] err_cntrs_results[m_burst]["start_ts"] = start_ts err_cntrs_results[m_burst]["end_ts"] = end_ts print("Burst ", m_burst) print("ISG: ", ISGS_SAVE[m_burst]) print("Dropped: ", err_cntrs_results[m_burst]["dropped"]) print("Latency: ", latency_results[m_burst]["average"]) # print(err_cntrs_results[m_burst]) # print(latency_results[m_burst]) if args.out: savedir_latency = "/home/malte/malte/latency/" savedir_error = "/home/malte/malte/error/" if not os.path.exists(savedir_latency): os.mkdir(savedir_latency) if not os.path.exists(savedir_error): os.mkdir(savedir_error) savedir_latency += args.out + "_latency.json" savedir_error += args.out + "_error.json" print("\nResults: ", savedir_latency, ", ", savedir_error) save_rx_stats(err_cntrs_results, savedir_error, args.burst_num) save_rx_stats(latency_results, savedir_latency, args.burst_num) except STLError as error: print(error) finally: client.disconnect()
def main(): global PAYLOAD_SIZE parser = argparse.ArgumentParser( description= "On/Off traffic of a deterministic and stateless stream profile.") parser.add_argument( "--ip_src", type=str, default="192.168.17.1", help="Source IP address for all packets in the stream.", ) parser.add_argument( "--ip_dst", type=str, default="192.168.17.2", help="Destination IP address for all packets in the stream.", ) parser.add_argument( "--max_bit_rate", type=float, default=1, help="Maximal bit rate (with the unit Gbps) of the underlying network.", ) parser.add_argument("--on_time", type=int, default=2, help="ON time in seconds.") parser.add_argument( "--init_off_on_ratio", type=float, default=0.5, help="Initial ratio between OFF and ON time.", ) parser.add_argument( "--iteration", type=int, default=1, help="Number of iterations for the ON state of each PPS.", ) parser.add_argument( "--numa_node", type=int, default=0, help="The NUMA node of cores used for TX and RX.", ) parser.add_argument("--test", action="store_true", help="Just used for debug.") parser.add_argument( "--out", type=str, default="", help= "The name of the output file, stored in /home/malte/malte/latency if given", ) parser.add_argument( "--payload_size", type=int, default=PAYLOAD_SIZE, help="Payload size of the packets", ) args = parser.parse_args() PAYLOAD_SIZE = args.payload_size stream_params = create_stream_params( args.max_bit_rate, args.on_time, args.init_off_on_ratio, args.iteration, args.test, ) print("\n--- To be used stream parameters:") pprint.pp(stream_params) print() # Does not work on the blackbox # core_mask = get_core_mask(args.numa_node) # print(f"The core mask for RX and TX: {hex(core_mask)}") streams = create_streams(stream_params, args.ip_src, args.ip_dst) if args.test: pprint.pp(streams) pprint.pp([s.to_json() for s in streams]) RX_DELAY_S = sum([s["on_time"] for s in stream_params]) + 3 RX_DELAY_MS = 3 * 1000 # Time after last Tx to wait for the last packet at Rx side try: client = STLClient() client.connect() tx_port, rx_port = init_ports(client) client.add_streams(streams, ports=[tx_port]) # Start TX start_ts = time.time() client.clear_stats() # All cores in the core_mask is used by the tx_port and its adjacent # port, so it is the rx_port normally. # client.start(ports=[tx_port], core_mask=[core_mask], force=True) client.start(ports=[tx_port], force=True) print(f"The estimated RX delay: {RX_DELAY_MS / 1000} seconds.") client.wait_on_traffic(rx_delay_ms=RX_DELAY_MS) end_ts = time.time() test_dur = end_ts - start_ts print(f"Total test duration: {test_dur} seconds") # Check RX stats. # MARK: All latency results are in usec. # err_cntrs_results, latency_results = get_rx_stats( # client, tx_port, rx_port, stream_params # ) err_cntrs_results, latency_results, flow_results = get_rx_stats( client, tx_port, rx_port, stream_params) print("--- The latency results of all streams:") print(f"- Number of streams: {len(latency_results)}") for index, _ in enumerate(stream_params): print(f"- Stream: {index}") err_cntrs_results[index]["start_ts"] = start_ts err_cntrs_results[index]["end_ts"] = end_ts print(err_cntrs_results[index]) print(latency_results[index]) print(flow_results[index]) if args.out: savedir_latency = "/home/malte/malte/latency/" + args.out + "_latency.json" savedir_error = "/home/malte/malte/error/" + args.out + "_error.json" print("Results: ", savedir_latency, ", ", savedir_error) save_rx_stats(err_cntrs_results, savedir_error, stream_params) save_rx_stats(latency_results, savedir_latency, stream_params) except STLError as error: print(error) finally: client.disconnect()