def getFeature(self, src_path): FILEPATH = src_path print("Start: " + FILEPATH) print(datetime.datetime.now()) FILE_EXTRACT_PATH = self.get_extract_path(FILEPATH) pcap = pyshark.FileCapture(FILEPATH) featureTotal = "" featureSet = set() featureSetFinal = set() packetList = [] packetListStt = [] index = 1 for packet in pcap: print("Index packet %d" % index) index += 1 protocol = self.get_protocol(packet) if protocol != "other": src_ip = self.get_src_ip(packet) dst_ip = self.get_dst_ip(packet) src_port = self.get_src_port(packet, protocol) dst_port = self.get_dst_port(packet, protocol) service = self.get_service_http(protocol, dst_port) flag = self.get_flag(packet, protocol) sequence_number = self.get_seq_number(packet, protocol) acknowledgment_number = self.get_ackn_number(packet, protocol) packetList.append( Packet(src_ip, dst_ip, src_port, dst_port, protocol, service, flag, sequence_number, acknowledgment_number)) pcap.close() ind = 1 for packet in packetList: print("Index packetList %d" % ind) ind += 1 status_conn = self.is_connection_success(ind, packet, packetList) packetListStt.append( PacketStt(packet.src_ip, packet.dst_ip, packet.src_port, packet.dst_port, packet.protocol, packet.service, packet.flag, packet.sequence_number, packet.acknowledgment_number, status_conn)) i = 0 for packet in packetListStt: print("Index packetListStt %d" % i) i += 1 featureStrTuple = "" featureStr = "" featureStrTuple += packet.src_ip + "," featureStrTuple += packet.dst_ip + "," featureStrTuple += packet.src_port + "," featureStrTuple += packet.dst_port + "," featureStrTuple += packet.protocol + "," featureStrTuple += packet.service + "," # featureStrTuple += packet.flag + "," # featureStrTuple += packet.sequence_number + "," # featureStrTuple += packet.acknowledgment_number + "," featureStrTuple += packet.status_conn if featureStrTuple not in featureSet: featureSet.add(featureStrTuple) else: continue calcu_feature = self.get_calculate_feature(packet, packetListStt) featureStr += packet.src_ip + "," featureStr += packet.dst_ip + "," featureStr += packet.src_port + "," featureStr += packet.dst_port + "," featureStr += packet.protocol + "," featureStr += calcu_feature + "," featureStr += packet.service + "," #flag_s0 if packet.status_conn == "False": featureStr += "1" + "," else: featureStr += "0" + "," #flag_sf if packet.status_conn == "True": featureStr += "1" + "," else: featureStr += "0" featureStr += "\n" if featureStr not in featureSetFinal: featureSetFinal.add(featureStr) else: continue featureTotal += featureStr f = open(FILE_EXTRACT_PATH, "w+") f.write(featureTotal) f.close() print("Done: " + FILEPATH) print(datetime.datetime.now())
def process_pcap(fname, proto, port, outdir): """ Process pcap file :param fname: Input pcap file name :param proto: Protocol to analyze :param port: TCP/UDP Port used by this protocol :return: """ # global packets_array # print('Opening {}...'.format(fname)) if proto == 'gsm_map': filtered_cap = pyshark.FileCapture(fname, display_filter='gsm_map', only_summaries=False) filtered_cap.apply_on_packets(process_packet_gsm_map, timeout=10000) print_results( dict_ips, nodes_csv, proto, '{}_{}.pdf'.format( os.path.splitext(os.path.basename(fname))[0], 'ips')) print_results( dict_pcs, nodes_csv, proto, '{}_{}.pdf'.format( os.path.splitext(os.path.basename(fname))[0], 'point-codes')) # print_results(dict_gts, proto) elif proto == 'diameter': if port: decode_as = {'tcp.port=={}'.format(port): 'diameter'} filtered_cap = pyshark.FileCapture(fname, decode_as=decode_as, display_filter='diameter', only_summaries=False) else: filtered_cap = pyshark.FileCapture(fname, display_filter='diameter', only_summaries=False) filtered_cap.apply_on_packets(process_packet_diameter, timeout=10000) print_results( dict_ips, nodes_csv, proto, '{}_{}.pdf'.format( os.path.splitext(os.path.basename(fname))[0], 'ips')) # print_results(dict_nodes, proto, '{}_{}.pdf'.format(os.path.splitext(os.path.basename(fname))[0], 'nodes')) elif proto == 'gtp': # This option needs to cover both GTP and GTPv2 filtered_cap = pyshark.FileCapture(fname, display_filter='gtp', only_summaries=False) filtered_cap.apply_on_packets(process_packet_gtp, timeout=10000) print_results( dict_ips, nodes_csv, proto, '{}_{}.pdf'.format( os.path.splitext(os.path.basename(fname))[0], 'ips'), outdir) # print_results(dict_nodes, proto, '{}_{}.pdf'.format(os.path.splitext(os.path.basename(fname))[0], 'nodes'), outdir) print_results( dict_vlans, nodes_csv, proto, '{}_{}.pdf'.format( os.path.splitext(os.path.basename(fname))[0], 'vlans'), outdir) elif proto == 'gtpv2': filtered_cap = pyshark.FileCapture(fname, display_filter='gtpv2', only_summaries=False) filtered_cap.apply_on_packets(process_packet_gtp, timeout=10000) print_results( dict_ips, nodes_csv, proto, '{}_{}.pdf'.format( os.path.splitext(os.path.basename(fname))[0], 'ips'), outdir) # print_results(dict_nodes, proto, '{}_{}.pdf'.format(os.path.splitext(os.path.basename(fname))[0], 'nodes'), outdir) print_results( dict_vlans, nodes_csv, proto, '{}_{}.pdf'.format( os.path.splitext(os.path.basename(fname))[0], 'vlans'), outdir)
import pyshark cap = pyshark.FileCapture('mult2.pcap') for pkt in cap: try: print(pkt.bootp.field_names) print(pkt.bootp.option_hostname) #print(pkt.bootp['id']) except AttributeError: pass
def check_app(app, fullpacket=False, force=False): ''' Check application based on app name in Tapioca results ''' dnscacheloaded = False targetscacheloaded = False largewarned = False # load local network from config net.set_local() # Get pcap file location if app.endswith('.pcap'): pcapfile = app if os.path.exists(pcapfile): sys.stdout = Logger('%s.%s' % (pcapfile, report_output)) else: pcapfile = os.path.join('results', app, 'tcpdump.pcap') if os.path.exists(pcapfile): sys.stdout = Logger(os.path.join('results', app, report_output)) if os.path.exists(pcapfile): pcapdir = os.path.dirname(pcapfile) dnspkl = os.path.join(pcapdir, '.dnsmap.pkl') targetspkl = os.path.join(pcapdir, '.targets.pkl') eprint(color.bright('Checking app %s...' % color.cyan(app))) if os.path.exists(dnspkl) and not force: eprint('Loading cached DNS info...') with open(dnspkl, 'rb') as pklhandle: try: net.dnsmap = pickle.load(pklhandle) net.dnsreqs = pickle.load(pklhandle) dnscacheloaded = True except: pass if not dnscacheloaded: if os.path.getsize(pcapfile) > 100000000: # Over 100MB eprint( color.bright(color.yellow('Warning: capture size is large. Please be patient.'))) largewarned = True # Get captured DNS info for IP addresses eprint('Getting DNS info...') dnspackets = pyshark.FileCapture( pcapfile, keep_packets=False, display_filter='dns') dnspackets.apply_on_packets(net.get_dns_info, timeout=1000) with open(dnspkl, 'wb') as pklhandle: pickle.dump( net.dnsmap, pklhandle, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump( net.dnsreqs, pklhandle, protocol=pickle.HIGHEST_PROTOCOL) # if os.path.exists(targetspkl) and not force: # eprint('Loading cached targets...') # with open(targetspkl, 'rb') as pklhandle: # try: # net.targets = pickle.load(pklhandle) # targetscacheloaded = True # except: # pass if not targetscacheloaded: if fullpacket: packets = pyshark.FileCapture( pcapfile, keep_packets=False) # Get hosts contacted eprint('Getting hosts contacted...') packets.apply_on_packets( net.get_hosts_contacted_fullpacket, timeout=1000) else: packets = pyshark.FileCapture( pcapfile, keep_packets=False, only_summaries=True) # Get hosts contacted eprint('Getting hosts contacted...') packets.apply_on_packets(net.get_hosts_contacted, timeout=1000) # with open(targetspkl, 'wb') as pklhandle: # pickle.dump( # net.targets, pklhandle, protocol=pickle.HIGHEST_PROTOCOL) # Print report generate_report(app, fullpacket=fullpacket, pcapfile=pcapfile) # Reset globals net.clear()
def _insert_msql(self, file_path, app_package, app_type): import time startTime = time.time() print "Start inserting", app_package, file_path dbdao = SqlDao() appInfo = self.apps.get(app_package, None) if appInfo == None: if app_type == consts.IOS: print 'Error!! Can not find', app_package packages = pyshark.FileCapture(file_path, display_filter='http') timeStampTwo = time.time() print 'Get appInfo', timeStampTwo - startTime dns_info = {} comunicate_host = set() if appInfo is not None: app_package = appInfo.package app_name = appInfo.name app_category = appInfo.category app_company = appInfo.company else: app_name = None app_category = None app_company = None pkgInfos = [] while True: try: p = packages.next() if hasattr(p, 'http'): pkgInfo = self._parse_http_package(p) if pkgInfo[ETLConsts.HOST] == None or len( pkgInfo[ETLConsts.HOST] ) == 0 or '.' not in pkgInfo[ETLConsts.HOST]: ip = pkgInfo[ETLConsts.DESTINATION] if ip in dns_info and len(dns_info[ip]) == 1: host = dns_info[ip].pop() pkgInfo[ETLConsts.HOST] = host dns_info[ip].add(host) pkgInfo['app_name'] = app_name pkgInfo['app_category'] = app_category pkgInfo['app_company'] = app_company pkgInfos.append(pkgInfo) if len(pkgInfos) > 500: break if time.time() - timeStampTwo > 5: print "Spent more than 10s. Stop" break else: print app_name, app_category, app_company print 'ERROR WRONG PACKAGE TYPE' except StopIteration: break except: print 'ERROR', sys.exc_info()[0] try: packages.close() except: pass timeStampThree = time.time() print 'Parsing pcaps', timeStampThree - timeStampTwo params = [] for pkgInfo in pkgInfos: app_name = pkgInfo['app_name'] app_category = pkgInfo['app_category'] app_company = pkgInfo['app_company'] params.append( (app_package, pkgInfo[ETLConsts.SOURCE], pkgInfo[ETLConsts.DESTINATION], pkgInfo[ETLConsts.TIME], pkgInfo[ETLConsts.ADD_HEADER], pkgInfo[ETLConsts.HOST], pkgInfo[ETLConsts.PATH], pkgInfo[ETLConsts.ACCEPT], pkgInfo[ETLConsts.AGENT], pkgInfo[ETLConsts.REFER], pkgInfo[ETLConsts.AUTHOR], pkgInfo[ETLConsts.CONTENT_LENGTH], pkgInfo[ETLConsts.CONTENT_TYPE], pkgInfo[ETLConsts.METHOD], pkgInfo[ETLConsts.SIZE], pkgInfo[ETLConsts.HTTP_TYPE], app_name, app_category, app_company, pkgInfo[ETLConsts.RAW])) comunicate_host.add(pkgInfo[ETLConsts.HOST]) dbdao.executeBatch(self.INSERT_PACKAGES, params) dbdao.close() print 'inserting', time.time() - timeStampThree print "Finish", app_package, "Package:", len(params), len(pkgInfos)
def get_trace_controller(self, tracepath): try: cap = pyshark.FileCapture(tracepath, only_summaries=True) except: sys.stderr.write("Importing file failed.\n") return cap
def __getContent(self): cap = pyshark.FileCapture( self.capture, display_filter= '(tcp or udp) and (not mdns) and (not tls) and (not ssdp) and (not gryphon)' ) srcList = [] dstList = [] self.udpCounter = 0 self.udpData = set() self.udpTime = [] for packet in cap: payload = packet transportLayer = packet.transport_layer if str(packet[0].src) != 'ff:ff:ff:ff:ff:ff' and str( packet[0].dst) != 'ff:ff:ff:ff:ff:ff': if str(packet[0].src) not in srcList: srcMac = str(packet[0].src) srcList.append(srcMac) devicekey = { srcMac: { 'TCP Payload': [], 'TCP Payload Length': [], 'Window Size': [], 'Rogue': False } } try: srcVendor = MacLookup().lookup(srcMac) except: srcVendor = 'Unknown' if srcVendor not in self.VendorDeviceDict: srckey = {srcVendor: {}} self.VendorDeviceDict.update(srckey) if srcMac not in self.VendorDeviceDict[srcVendor]: self.VendorDeviceDict[srcVendor].update(devicekey) if packet[0].dst not in dstList: dstMac = str(packet[0].dst) dstList.append(dstMac) devicekey = { dstMac: { 'TCP Payload': [], 'TCP Payload Length': [], 'Window Size': [], 'Rogue': False } } try: dstVendor = MacLookup().lookup(dstMac) except: dstVendor = 'Unknown' if dstVendor not in self.VendorDeviceDict: dstkey = {dstVendor: {}} self.VendorDeviceDict.update(dstkey) if dstMac not in self.VendorDeviceDict[dstVendor]: self.VendorDeviceDict[dstVendor].update(devicekey) if transportLayer == 'TCP': self.udpCounter = 0 self.udpTime = [] self.udpData = [] try: windowSize = packet[2].window_size_value except: continue self.VendorDeviceDict[srcVendor][srcMac][ 'Window Size'].append(windowSize) for layer in packet: if 'DATA' in str(layer): try: payload = str(packet.data.data) payloadLen = int(packet.data.len) self.VendorDeviceDict[srcVendor][srcMac][ 'TCP Payload'].append(payload) self.VendorDeviceDict[srcVendor][srcMac][ 'TCP Payload Length'].append(payloadLen) except: continue if transportLayer == 'UDP': self.__udpCheck(srcVendor, packet, srcMac) self.pCount = self.pCount + 1 cap.close() self.finish = time.perf_counter()
def print_user(): count = 0 capture = pyshark.FileCapture('sample.pcap', display_filter='http contains user') for packet in capture: count = count + 1 print(count)
def data_data_dump(self): cap = pyshark.FileCapture('../data_file/zhanbao.pcap') new_order_len = 121 # 新订单去除头部的长度 order_confirm_len = 201 # 委托确认/撤单成功响应去除头部的长度 cancel_order_len = 98 # 撤单请求去除头部的长度 cancel_order_failed_len = 127 # 撤单失败响应成功去除头部的长度 match_order_len = 165 # 委托成交去除头部长度 for i in cap: if 'data' in i: # 判断i里面是否有data数据 len_data = len(i.data.data) if i.ip.src_host == self.ors_ip and i.ip.dst_host == self.tgw_ip: # 指定源IP地址和目标ip地址 """新订单:ORS->TGW""" if len_data % (2 * new_order_len) == 0: # 新订单 count = 0 multi = int(len_data / (2 * new_order_len)) while count < multi: self.dump_data_list.append( i.data.data[count * (2 * new_order_len): (2 * new_order_len) * (count + 1)]) self.dump_timestamp.append(i.sniff_timestamp) count += 1 elif i.ip.src_host == self.tgw_ip and i.ip.dst_host == self.ors_ip: """委托确认和撤单成功响应:TGW->ORS""" if len_data % (2 * order_confirm_len) == 0: # 委托确认和撤单成功响应 count = 0 multi = int(len_data / (2 * order_confirm_len)) while count < multi: self.dump_data_list.append( i.data.data[count * (2 * order_confirm_len): (2 * order_confirm_len) * (count + 1)]) self.dump_timestamp.append(i.sniff_timestamp) count += 1 elif len_data % (2 * cancel_order_len) == 0: # 撤单请求 """撤单请求:TGW->ORS""" print(len_data) count = 0 multi = int(len_data / (2 * cancel_order_len)) while count < multi: self.dump_data_list.append( i.data.data[count * (2 * cancel_order_len): (2 * cancel_order_len) * (count + 1)]) self.dump_timestamp.append(i.sniff_timestamp) count += 1 elif len_data % (2 * cancel_order_failed_len) == 0: # 撤单失败响应 """撤单响应失败:TGW->ORS""" count = 0 multi = int(len_data / (2 * cancel_order_failed_len)) while count < multi: self.dump_data_list.append( i.data.data[count * (2 * cancel_order_failed_len): (2 * cancel_order_failed_len) * (count + 1)]) self.dump_timestamp.append(i.sniff_timestamp) count += 1 elif len_data % (2 * match_order_len) == 0: # 委托成交 """委托成交:TGW->ORS""" count = 0 multi = int(len_data / (2 * match_order_len)) while count < multi: self.dump_data_list.append( i.data.data[count * (2 * match_order_len): (2 * match_order_len) * (count + 1)]) self.dump_timestamp.append(i.sniff_timestamp) count += 1 else: continue else: continue # print(self.dump_data) # print(self.dump_timestamp) cap.close()
# encoding: utf-8 import pyshark import json import gzip, zlib # 打开存储的捕获文件 #cap = pyshark.FileCapture('/Users/jiajia/Develop/workspace/PacketProcess/py_capature_pkt/pcap/mysql_complete.pcap') #cap = pyshark.FileCapture('/Users/jiajia/Develop/workspace/PacketProcess/py_capature_pkt/pcap/pgsql.cap') #cap = pyshark.FileCapture('/Users/jiajia/Develop/workspace/PacketProcess/py_capature_pkt/pcap/ms-sql-tds-rpc-requests.cap') #cap = pyshark.FileCapture('/Users/jiajia/Develop/workspace/PacketProcess/py_capature_pkt/pcap/TNS_Oracle1.pcap') cap = pyshark.FileCapture('/Users/jiajia/Develop/workspace/PacketProcess/py_capature_pkt/pcap/http_pkt.pcap') cap.load_packets() # 从网络接口上进行捕获 #cap = pyshark.LiveCapture(interface='en0') #cap.sniff(packet_count=1000) #layer_dict = {} data = {} for pkt in cap: #layer_list = [] #data['highest_layer'] = pkt.highest_layer #if not pkt.number in ['574']: # continue #if not pkt.number in ['839', '841', '857', '859']: # continue if pkt.highest_layer != 'HTTP': continue print "Number:" + pkt.number
import subprocess import pyshark f = pyshark.FileCapture('hackpad.pcap', display_filter='http') print('done parsing') secret = '3ed2e01c1d1248125c67ac637384a22d997d9369c74c82abba4cc3b1bfc65f026c957ff0feef61b161cfe3373c2d9b905639aa3688659566d9acc93bb72080f7e5ebd643808a0e50e1fc3d16246afcf688dfedf02ad4ae84fd92c5c53bbd98f08b21d838a3261874c4ee3ce8fbcb96628d5706499dd985ec0c13573eeee03766f7010a867edfed92c33233b17a9730eb4a82a6db51fa6124bfc48ef99d669e21740d12656f597e691bbcbaa67abe1a09f02afc37140b167533c7536ab2ecd4ed37572fc9154d23aa7d8c92b84b774702632ed2737a569e4dfbe01338fcbb2a77ddd6990ce169bb4f48e1ca96d30eced23b6fe5b875ca6481056848be0fbc26bcbffdfe966da4221103408f459ec1ef12c72068bc1b96df045d3fa12cc2a9dcd162ffdf876b3bc3a3ed2373559bcbe3f470a8c695bf54796bfe471cd34b463e9876212df912deef882b657954d7dada47' for i in xrange(8+256,8+256*1000,2): # blargh c = f[i]['urlencoded-form'] s = f[i+1]['data-text-lines'] if 'Internal Server Error' in str(s): continue print(i) print(c) print(s) # from pwn import xor; print(xor(data,16,prev_block)) ''' 3ed2e01c1d1248125c67ac637384a22d In cryptography, 997d9369c74c82abba4cc3b1bfc65f02 a padding oracl 6c957ff0feef61b161cfe3373c2d9b90 e attack is an a 5639aa3688659566d9acc93bb72080f7 ttack which is p e5ebd643808a0e50e1fc3d16246afcf6 erformed using t 88dfedf02ad4ae84fd92c5c53bbd98f0 he padding of a 8b21d838a3261874c4ee3ce8fbcb9662 8d5706499dd985ec0c13573eeee03766 hitcon{H4 f7010a867edfed92c33233b17a9730eb cked by a de1ici 4a82a6db51fa6124bfc48ef99d669e21 0us pudding '3'} 740d12656f597e691bbcbaa67abe1a09 f02afc37140b167533c7536ab2ecd4ed
# -*- coding=utf-8 -*- import pyshark pkt_list = [] cap = pyshark.FileCapture('cap1.pcap', keep_packets=False, display_filter='http') method_dict = {} def print_highest_layer(pkt): # 本代码的主要任务: 对HTTP流量进行分析,找到特定method的请求数量 try: # 字典数据结构如下 # 键为method, 值为数量 counts = method_dict.get(pkt.http.request_method, 0) counts += 1 method_dict[pkt.http.request_method] = counts except: pass # 应用函数到数据包 cap.apply_on_packets(print_highest_layer) print(method_dict) if __name__ == '__main__': # 使用matplot进行图形化展示
import pyshark from collections import defaultdict from statistics import mean cap = pyshark.FileCapture('./mpi_exp1.pcapng') syn_list = [] syn_ack_dict = defaultdict(list) ack_dict = defaultdict(list) fin_ack_dict = defaultdict(list) conn_time = [] # TODO: setup discard of retrans SYN or SYN/ACK packets. # Probably by only allowing first relative time per stream for pkt in cap: if 'TCP' in pkt: if pkt.tcp.flags == '0x00000002': # SYN packet, 1 per stream syn_list.append(pkt) if pkt.tcp.flags == '0x00000012': # SYN/ACK packet, 1 per stream syn_ack_dict[pkt.tcp.stream].append(pkt) if pkt.tcp.flags == '0x00000010': # 1st ACK packet in stream if pkt.tcp.seq == '1' and pkt.tcp.ack == '1': ack_dict[pkt.tcp.stream].append(pkt) if pkt.tcp.flags == '0x00000011': # Fin/ACK packet, 2 per stream fin_ack_dict[pkt.tcp.stream].append(pkt) for pkt in syn_list: ack = ack_dict[pkt.tcp.stream].pop() syn_time = float(pkt.tcp.time_relative) ack_time = float(ack.tcp.time_relative)
##cap = pyshark.FileCapture('data/2018-07-31-15-15-09-192.168.100.113.pcap') ##device_IP = "192.168.100.113" ##results_file = "192.168.100.113-1" ##analyze_packet(cap,device_IP,results_file) ##cap = pyshark.FileCapture('data/2018-10-03-15-22-32-192.168.100.113.pcap') ##device_IP = "192.168.100.113" ##results_file = "192.168.100.113-2" ##analyze_packet(cap,device_IP,results_file) for i in range(1, 10): file = ( "data/2018-07-20-17-31-20-192.168.100.108_0000{0}.pcapng".format(i)) cap = pyshark.FileCapture(file) device_IP = "192.168.100.108" results_file = ("192.168.100.108-1_{0}".format(i)) analyze_packet(cap, device_IP, results_file) for i in range(10, 100): file = ("data/2018-07-20-17-31-20-192.168.100.108_000{0}.pcapng".format(i)) cap = pyshark.FileCapture(file) device_IP = "192.168.100.108" results_file = ("192.168.100.108-1_{0}".format(i)) analyze_packet(cap, device_IP, results_file) ##cap = pyshark.FileCapture('data/2019-01-10-14-34-38-192.168.1.197.pcap') ##device_IP = "192.168.1.197" ##results_file = "192.168.1.197-1" ##analyze_packet(cap,device_IP,results_file)
if len(sys.argv) < 4 or not parse_args(sys.argv): print( "Usage: python3 from_pcap.py <input file> [-out output_file] [-es Elasticsearch_URI] [--debug]" ) exit(1) pcapfile = sys.argv[1] subnet = load_config("./subnet.config") tcp_sessions = {} udp_sessions = {} first_udp = {} if debug: print("[*] Reading pcap file...") packets = pyshark.FileCapture(pcapfile, display_filter="ip && (tcp || udp)") if debug: print("[*] Parsing sessions...") i = 0 for packet in packets: if 'TCP' in packet: # Session beign if packet.tcp.flags_syn == '1' and packet.tcp.flags_ack == '1' and in_subnet( packet.ip.src, subnet) and not in_subnet(packet.ip.dst, subnet): tcp_sessions[packet.tcp.stream] = { 'client': packet.ip.dst, 'server': packet.ip.src, 'client_port': packet.tcp.dstport, 'server_port': packet.tcp.srcport, 'protocol': packet.highest_layer,
def FeatureExtraction(pcapPath, csvPath): caps = rdpcap(pcapPath) filteredCaps = pyshark.FileCapture(pcapPath, display_filter="tcp") FlowSource = None FlowDestination = None #print(caps.sessions().keys()) totalFwdPackets = 0 totalBwdPackets = 0 TotalLengthofFwdPackets = 0 TotalLengthofBwdPackets = 0 TotalLengthofFwdHeaders = 0 TotalLengthofBwdHeaders = 0 flowDuration = 0 Act_data_pkt_forward = 0 lengthofFwdPackets = [] lengthofBwdPackets = [] lengthofFlowPackets = [] lengthofFwdHeaders = [] lengthofBwdHeaders = [] deltaFwd = [] deltaBwd = [] deltaFlow = [] FwdCaps = [] bwdCaps = [] FilterdFwdCaps = [] FilterdBwdCaps = [] flowActive = [] flowIdle = [] FINFlagCount = 0 SYNFlagCount = 0 RSTFlagCount = 0 PSHFlagCount = 0 ACKFlagCount = 0 URGFlagCount = 0 CWEFlagCount = 0 ECEFlagCount = 0 fwdPSHFlags = 0 bwdPSHFlags = 0 fwdURGFlags = 0 bwdURGFlags = 0 sfLastPacketTS = -1 sfCount = 0 sfAcHelper = -1 threshold = 5000000 FIN = 0x01 SYN = 0x02 RST = 0x04 PSH = 0x08 ACK = 0x10 URG = 0x20 ECE = 0x40 CWE = 0x80 protocol = 6 flowPacketNumber = 0 fbulkDuration = 0 fbulkPacketCount = 0 fbulkSizeTotal = 0 fbulkStateCount = 0 fbulkPacketCountHelper = 0 fbulkStartHelper = 0 fbulkSizeHelper = 0 flastBulkTS = 0 bbulkDuration = 0 bbulkPacketCount = 0 bbulkSizeTotal = 0 bbulkStateCount = 0 bbulkPacketCountHelper = 0 bbulkStartHelper = 0 bbulkSizeHelper = 0 blastBulkTS = 0 bAvgBytesPerBulk = 0 bAvgPacketsPerBulk = 0 bAvgBulkRate = 0 fAvgBytesPerBulk = 0 fAvgPacketsPerBulk = 0 fAvgBulkRate = 0 time.strftime("%I:%M") time.strftime("%d/%m/%y") print(time.strftime("%I:%M")) flag = True flagBwd = True for i, cap in enumerate(caps): if TCP in cap: if i == 0: FwdSource = cap['IP'].src FwdDestination = cap['IP'].dst FlowSource = FwdSource FlowDestination = FwdDestination flowStartTime = cap.time * 1000000 flowLastSeen = cap.time * 1000000 startActive = cap.time * 1000000 endActiveTime = cap.time * 1000000 x = cap.time * 1000000 # timeStamp = datetime.datetime.strptime(str(x/1000), "%a %b %d %H:%M:%S %Y") if (sfLastPacketTS == -1): sfLastPacketTS = cap.time * 1000000 sfAcHelper = cap.time * 1000000 if ((cap.time - (sfLastPacketTS) / 1000000.0) > 1.0): sfCount += 1 lastSFduration = (cap.time - sfAcHelper) * 1000000 currentTime = (cap.time * 1000000 - sfLastPacketTS) if (currentTime - endActiveTime > threshold): if (endActiveTime - startActive > 0): flowActive.append(endActiveTime - startActive) flowIdle.append(currentTime - endActiveTime) endActiveTime = currentTime startActive = currentTime else: endActiveTime = currentTime sfAcHelper = cap.time * 1000000 sfLastPacketTS = cap.time * 1000000 #forwardBytes+=packet.getPayloadBytes() else: if (cap['IP'].src == FwdSource and cap['IP'].dst == FwdDestination): FwdCaps.append(cap) if (cap['IP'].dst == FwdSource and cap['IP'].src == FwdDestination): bwdCaps.append(cap) deltaFlow.append(abs(cap.time * 1000000 - flowLastSeen)) flowLastSeen = cap.time * 1000000 flowPacketNumber += 1 for i, filteredCap in enumerate(filteredCaps): lengthofFlowPackets.append(int(filteredCaps[i].length)) #FwdFlow if filteredCap['ip'].src == FwdSource and filteredCap[ 'ip'].dst == FwdDestination: if (i == 0): FlowSourcePort = filteredCap[ filteredCap.transport_layer].srcport FlowDestinationPort = filteredCap[ filteredCap.transport_layer].dstport FilterdFwdCaps.append(filteredCap) #BwdFlow if filteredCap['ip'].src == FwdDestination and filteredCap[ 'ip'].dst == FwdSource: FilterdBwdCaps.append(filteredCap) flowIATTotal = sum(deltaFlow) flowIATMean = numpy.mean(deltaFlow) flowIATStd = numpy.std(deltaFlow, ddof=1) flowIATMax = max(deltaFlow) flowIATMin = min(deltaFlow) flowDuration = (flowLastSeen - flowStartTime) for i, filteredCap in enumerate(FilterdFwdCaps): lengthofFwdPackets.append(int(FilterdFwdCaps[i].length)) if int(FilterdFwdCaps[i].length) >= 1: Act_data_pkt_forward += 1 for i, cap in enumerate(FwdCaps): if TCP in cap: size = lengthofFwdPackets[i] if flag == True: if (blastBulkTS > fbulkStartHelper): fbulkStartHelper = 0 if (size <= 0): flag = False lengthofFwdPackets[i] += 1 if (fbulkStartHelper == 0): fbulkStartHelper = cap.time * 1000000 fbulkPacketCountHelper = 1 fbulkSizeHelper = size flastBulkTS = cap.time * 1000000 else: if ((cap.time * 1000000 - flastBulkTS) / 1000000.0 > 1.0): fbulkStartHelper = cap.time * 1000000 flastBulkTS = cap.time * 1000000 fbulkPacketCountHelper = 1 fbulkSizeHelper = size else: fbulkPacketCountHelper += 1 fbulkSizeHelper += size if (fbulkPacketCountHelper == 4): fbulkStateCount += 1 fbulkPacketCount += fbulkPacketCountHelper fbulkSizeTotal += fbulkSizeHelper fbulkDuration += cap.time * 1000000 - fbulkStartHelper if (fbulkPacketCountHelper > 4): fbulkPacketCount += 1 fbulkSizeTotal += size fbulkDuration += cap.time * 1000000 - flastBulkTS flastBulkTS = cap.time * 1000000 if (i == 0): fwdWindowSize = cap[TCP].window F = cap['TCP'].flags if F & PSH: fwdPSHFlags += 1 if F & URG: fwdURGFlags += 1 if i != 0: forwardLastSeen = FwdCaps[i - 1].time * 1000000 deltaFwd.append(abs(cap.time * 1000000 - forwardLastSeen)) F = cap['TCP'].flags if F & FIN: FINFlagCount += 1 if F & SYN: SYNFlagCount += 1 if F & RST: RSTFlagCount += 1 if F & PSH: PSHFlagCount += 1 if F & ACK: ACKFlagCount += 1 if F & URG: URGFlagCount += 1 if F & ECE: ECEFlagCount += 1 if F & CWE: CWEFlagCount += 1 if (sfLastPacketTS == -1): sfLastPacketTS = cap.time * 1000000 sfAcHelper = cap.time * 1000000 if ((cap.time - (sfLastPacketTS) / 1000000.0) > 1.0): sfCount += 1 lastSFduration = (cap.time - sfAcHelper) * 1000000 currentTime = (cap.time * 1000000 - sfLastPacketTS) if (currentTime - endActiveTime > threshold): if (endActiveTime - startActive > 0): flowActive.append(endActiveTime - startActive) flowIdle.append(currentTime - endActiveTime) endActiveTime = currentTime startActive = currentTime else: endActiveTime = currentTime sfAcHelper = cap.time * 1000000 sfLastPacketTS = cap.time * 1000000 c = cap.show(dump=True) word = "len" index = c.find(word) if c[index + 12:index + 16] != ' ': lengthofFwdHeaders.append(int(c[index + 12:index + 16])) TotalLengthofFwdHeaders += int(c[index + 12:index + 16]) totalFwdPackets += 1 else: lengthofFwdHeaders.append(0) if i == 0: min_seg_size_forward = int(c[index + 12:index + 16]) else: min_seg_size_forward = min(min_seg_size_forward, int(c[index + 12:index + 16])) fwdPacketsPerSecond = totalFwdPackets / (flowDuration / 1000000) subFlowFwdPackets = totalFwdPackets / sfCount FwdIATTotal = sum(deltaFwd) FwdIATMean = numpy.mean(deltaFwd) FwdIATStd = numpy.std(deltaFwd, ddof=1) FwdIATMax = max(deltaFwd) FwdIATMin = min(deltaFwd) for i, filteredCap in enumerate(FilterdBwdCaps): lengthofBwdPackets.append(int(FilterdBwdCaps[i].length)) for i, cap in enumerate(bwdCaps): if TCP in cap: size = lengthofBwdPackets[i] if flagBwd == True: if (flastBulkTS > bbulkStartHelper): bbulkStartHelper = 0 if (size <= 0): flagBwd = False lengthofBwdPackets[i] += 1 if (bbulkStartHelper == 0): bbulkStartHelper = cap.time * 1000000 bbulkPacketCountHelper = 1 bbulkSizeHelper = size blastBulkTS = cap.time * 1000000 else: if ((cap.time * 1000000 - blastBulkTS) / 1000000.0 > 1.0): bbulkStartHelper = cap.time * 1000000 blastBulkTS = cap.time * 1000000 bbulkPacketCountHelper = 1 bbulkSizeHelper = size else: bbulkPacketCountHelper += 1 bbulkSizeHelper += size if (bbulkPacketCountHelper == 4): bbulkStateCount += 1 bbulkPacketCount += bbulkPacketCountHelper bbulkSizeTotal += bbulkSizeHelper bbulkDuration += cap.time * 1000000 - bbulkStartHelper if (bbulkPacketCountHelper > 4): bbulkPacketCount += 1 bbulkSizeTotal += size bbulkDuration += cap.time * 1000000 - flastBulkTS flastBulkTS = cap.time * 1000000 if (i == 0): bwdWindowSize = cap[TCP].window F = cap['TCP'].flags if F & PSH: fwdPSHFlags += 1 if F & URG: fwdURGFlags += 1 if i != 0: backwardLastSeen = bwdCaps[i - 1].time * 1000000 deltaBwd.append(abs(cap.time * 1000000 - backwardLastSeen)) F = cap['TCP'].flags if F & FIN: FINFlagCount += 1 if F & SYN: SYNFlagCount += 1 if F & RST: RSTFlagCount += 1 if F & PSH: PSHFlagCount += 1 bwdPSHFlags += 1 if F & ACK: ACKFlagCount += 1 if F & URG: URGFlagCount += 1 bwdURGFlags += 1 if F & ECE: ECEFlagCount += 1 if F & CWE: CWEFlagCount += 1 if (sfLastPacketTS == -1): sfLastPacketTS = cap.time * 1000000 sfAcHelper = cap.time * 1000000 if ((cap.time - (sfLastPacketTS) / 1000000.0) > 1.0): sfCount += 1 lastSFduration = (cap.time - sfAcHelper) * 1000000 currentTime = (cap.time * 1000000 - sfLastPacketTS) if (currentTime - endActiveTime > threshold): if (endActiveTime - startActive > 0): flowActive.append(endActiveTime - startActive) flowIdle.append(currentTime - endActiveTime) endActiveTime = currentTime startActive = currentTime else: endActiveTime = currentTime sfAcHelper = cap.time * 1000000 sfLastPacketTS = cap.time * 1000000 c = cap.show(dump=True) word = "len" index = c.find(word) if c[index + 12:index + 16] != ' ': lengthofFwdHeaders.append(int(c[index + 12:index + 16])) TotalLengthofBwdHeaders += int(c[index + 12:index + 16]) totalBwdPackets += 1 if (fbulkStateCount != 0): fAvgBytesPerBulk = fbulkSizeTotal / fbulkStateCount if (fbulkStateCount != 0): fAvgPacketsPerBulk = fbulkPacketCount / fbulkStateCount if (fbulkDuration != 0): fAvgBulkRate = fbulkSizeTotal / (fbulkDuration / 1000000) if (bbulkStateCount != 0): bAvgBytesPerBulk = bbulkSizeTotal / bbulkStateCount if (bbulkStateCount != 0): bAvgPacketsPerBulk = bbulkPacketCount / bbulkStateCount if (bbulkDuration != 0): bAvgBulkRate = bbulkSizeTotal / (bbulkDuration / 1000000) bwdPacketsPerSecond = totalBwdPackets / (flowDuration / 1000000) bwdIATTotal = sum(deltaBwd) bwdIATMean = numpy.mean(deltaBwd) bwdIATStd = numpy.std(deltaBwd, ddof=1) bwdIATMax = max(deltaBwd) bwdIATMin = min(deltaBwd) subFlowBwdPackets = totalBwdPackets / sfCount downUpRatio = totalBwdPackets / totalFwdPackets minPacketLength = min(lengthofFlowPackets) maxPacketLength = max(lengthofFlowPackets) packetLengthMean = numpy.mean(lengthofFlowPackets) packetLengthStd = numpy.std(lengthofFlowPackets, ddof=1) packetLengthvar = numpy.var(lengthofFlowPackets, ddof=1) avgPacketSize = sum(lengthofFlowPackets) / flowPacketNumber TotalLengthofFwdPackets = sum(lengthofFwdPackets) MaxLengthofFwdPackets = max(lengthofFwdPackets) MinLengthofFwdPackets = min(lengthofFwdPackets) FwdPacketLengthMean = numpy.mean(lengthofFwdPackets) FwdPacketLengthStd = numpy.std(lengthofFwdPackets, ddof=1) fwdAvgSegmentSize = TotalLengthofFwdPackets / totalFwdPackets subFlowFwdBytes = TotalLengthofFwdPackets / sfCount TotalLengthofBwdPackets = sum(lengthofBwdPackets) MaxLengthofBwdPackets = max(lengthofBwdPackets) MinLengthofBwdPackets = min(lengthofBwdPackets) bwdPacketLengthMean = numpy.mean(lengthofBwdPackets) bwdPacketLengthStd = numpy.std(lengthofBwdPackets, ddof=1) bwdAvgSegmentSize = TotalLengthofBwdPackets / totalBwdPackets subFlowBwdBytes = TotalLengthofBwdPackets / sfCount FlowID = FlowSource + '-' + FlowDestination + '-' + FlowSourcePort + '-' + FlowDestinationPort + '-' + str( protocol) packetdPerSecond = flowPacketNumber / flowDuration bytePerSecond = (TotalLengthofFwdHeaders + TotalLengthofBwdHeaders) / flowDuration if len(flowActive) != 0: activeMean = numpy.mean(flowActive) activeStd = numpy.std(flowActive, ddof=1) activeMax = max(flowActive) activeMin = min(flowActive) else: activeMax = 0 activeMean = 0 activeMin = 0 activeStd = 0 if len(flowIdle) != 0: idleMean = numpy.mean(flowIdle) idleStd = numpy.std(flowIdle, ddof=1) idleMax = max(flowIdle) idleMin = min(flowIdle) else: idleMax = 0 idleMean = 0 idleMin = 0 idleStd = 0 with open(csvPath, 'w') as myfile: wr = csv.writer(myfile) fieldnames = [ 'Flow ID', 'Source IP', 'Source Port', 'Destination IP', 'Destination Port', 'Protocol', 'Flow Duration', 'Total Fwd Packets', 'Total Bwd Packets', 'Total Length of Fwd Packets', 'Total Length of Bwd Packets', 'Fwd Packet Length Max', 'Fwd Packet Length Min', 'Fwd Packet Length Mean', 'Fwd Packet Length Std', 'Bwd Packet Length Max', 'Bwd Packet Length Min', 'Bwd Packet Length Mean', 'Bwd Packet Length Std', 'Fwd Packets/s', 'Bwd Packets/s', 'Flow Bytes/s', 'Flow Packets/s', 'Flow IAT Mean', 'Flow IAT Std', 'Flow IAT Max', 'Flow IAT Min', 'Fwd IAT Total', 'Fwd IAT Mean', 'Fwd IAT Std', 'Fwd IAT Max', 'Fwd IAT Min', 'Bwd IAT Total', 'Bwd IAT Mean', 'Bwd IAT Std', 'Bwd IAT Max', 'Bwd IAT Min', 'Fwd PSH Flags', 'Bwd PSH Flags', 'Fwd URG Flags', 'Bwd URG Flags', 'Fwd Header Length', 'Bwd Header Length', "Pkt Len Min", "Pkt Len Max", "Pkt Len Mean", "Pkt Len Std", "Pkt Len Var", 'FIN Flag Count', 'SYN Flag Count', 'RST Flag Count', 'PSH Flag Count', 'ACK Flag Count', 'URG Flag Count', 'CWE Flag Count', 'ECE Flag Count', 'Down/Up Ratio', 'Average Packet Size', ' Avg Fwd Segment Size', ' Avg Bwd Segment Size', 'Fwd Avg Bytes/Bulk', ' Fwd Avg Packets/Bulk', ' Fwd Avg Bulk Rate', ' Bwd Avg Bytes/Bulk', ' Bwd Avg Packets/Bulk', 'Bwd Avg Bulk Rate', 'Subflow Fwd Packets', ' Subflow Fwd Bytes', ' Subflow Bwd Packets', ' Subflow Bwd Bytes', 'Init_Win_bytes_forward', ' Init_Win_bytes_backward', ' act_data_pkt_fwd', ' min_seg_size_forward', 'Active Mean', ' Active Std', ' Active Max', ' Active Min', 'Idle Mean', ' Idle Std', ' Idle Max', ' Idle Min' ] writer = csv.DictWriter(myfile, fieldnames=fieldnames) writer.writeheader() writer.writerow({ 'Flow ID': FlowID, 'Source IP': FlowSource, 'Source Port': FlowSourcePort, 'Destination IP': FlowDestination, 'Destination Port': FlowDestinationPort, 'Protocol': protocol, 'Flow Duration': flowDuration, 'Total Fwd Packets': totalFwdPackets, 'Total Bwd Packets': totalBwdPackets, 'Total Length of Fwd Packets': TotalLengthofFwdPackets, 'Total Length of Bwd Packets': TotalLengthofBwdPackets, 'Fwd Packet Length Max': MaxLengthofFwdPackets, 'Fwd Packet Length Min': MinLengthofFwdPackets, 'Fwd Packet Length Mean': FwdPacketLengthMean, 'Fwd Packet Length Std': FwdPacketLengthStd, 'Bwd Packet Length Max': MaxLengthofBwdPackets, 'Bwd Packet Length Min': MinLengthofBwdPackets, 'Bwd Packet Length Mean': bwdPacketLengthMean, 'Bwd Packet Length Std': bwdPacketLengthStd, 'Flow Bytes/s': bytePerSecond, 'Flow Packets/s': packetdPerSecond, 'Flow IAT Mean': flowIATMean, 'Flow IAT Std': flowIATStd, 'Flow IAT Max': flowIATMax, 'Flow IAT Min': flowIATMin, 'Fwd IAT Total': FwdIATTotal, 'Fwd IAT Mean': FwdIATMean, 'Fwd IAT Std': FwdIATStd, 'Fwd IAT Max': FwdIATMax, 'Fwd IAT Min': FwdIATMin, 'Bwd IAT Total': bwdIATTotal, 'Bwd IAT Mean': bwdIATMean, 'Bwd IAT Std': bwdIATStd, 'Bwd IAT Max': bwdIATMax, 'Bwd IAT Min': bwdIATMin, 'Fwd PSH Flags': fwdPSHFlags, 'Bwd PSH Flags': bwdPSHFlags, 'Fwd URG Flags': fwdURGFlags, 'Bwd URG Flags': bwdURGFlags, 'Fwd Header Length': TotalLengthofFwdHeaders, 'Bwd Header Length': TotalLengthofBwdHeaders, 'Fwd Packets/s': fwdPacketsPerSecond, 'Bwd Packets/s': bwdPacketsPerSecond, "Pkt Len Min": minPacketLength, "Pkt Len Max": maxPacketLength, "Pkt Len Mean": packetLengthMean, "Pkt Len Std": packetLengthStd, "Pkt Len Var": packetLengthvar, 'FIN Flag Count': FINFlagCount, 'SYN Flag Count': SYNFlagCount, 'RST Flag Count': RSTFlagCount, 'PSH Flag Count': PSHFlagCount, 'ACK Flag Count': ACKFlagCount, 'URG Flag Count': URGFlagCount, 'CWE Flag Count': CWEFlagCount, 'ECE Flag Count': ECEFlagCount, 'Down/Up Ratio': downUpRatio, 'Average Packet Size': avgPacketSize, ' Avg Fwd Segment Size': fwdAvgSegmentSize, ' Avg Bwd Segment Size': bwdAvgSegmentSize, 'Fwd Avg Bytes/Bulk': fAvgBytesPerBulk, ' Fwd Avg Packets/Bulk': fAvgPacketsPerBulk, ' Fwd Avg Bulk Rate': fAvgBulkRate, ' Bwd Avg Bytes/Bulk': bAvgBytesPerBulk, ' Bwd Avg Packets/Bulk': bAvgPacketsPerBulk, 'Bwd Avg Bulk Rate': bAvgBulkRate, 'Subflow Fwd Packets': subFlowFwdPackets, ' Subflow Fwd Bytes': subFlowFwdBytes, ' Subflow Bwd Packets': subFlowBwdPackets, ' Subflow Bwd Bytes': subFlowBwdBytes, 'Init_Win_bytes_forward': fwdWindowSize, ' Init_Win_bytes_backward': bwdWindowSize, ' act_data_pkt_fwd': Act_data_pkt_forward, ' min_seg_size_forward': min_seg_size_forward, 'Active Mean': activeMean, ' Active Std': activeStd, ' Active Max': activeMax, ' Active Min': activeMin, 'Idle Mean': idleMean, ' Idle Std': idleStd, ' Idle Max': idleMax, ' Idle Min': idleMin }) features = numpy.array([ flowDuration, totalFwdPackets, totalBwdPackets, TotalLengthofFwdPackets, TotalLengthofBwdPackets, MaxLengthofFwdPackets, MinLengthofFwdPackets, FwdPacketLengthMean, FwdPacketLengthStd, MaxLengthofBwdPackets, MinLengthofBwdPackets, bwdPacketLengthMean, bwdPacketLengthStd, bytePerSecond, packetdPerSecond, flowIATMean, flowIATStd, flowIATMax, flowIATMin, FwdIATTotal, FwdIATMean, FwdIATStd, FwdIATMax, FwdIATMin, bwdIATTotal, bwdIATMean, bwdIATStd, bwdIATMax, bwdIATMin, fwdPSHFlags, bwdPSHFlags, fwdURGFlags, bwdURGFlags, TotalLengthofFwdHeaders, TotalLengthofBwdHeaders, fwdPacketsPerSecond, bwdPacketsPerSecond, minPacketLength, maxPacketLength, packetLengthMean, packetLengthStd, packetLengthvar, FINFlagCount, SYNFlagCount, RSTFlagCount, PSHFlagCount, ACKFlagCount, URGFlagCount, CWEFlagCount, ECEFlagCount, downUpRatio, avgPacketSize, fwdAvgSegmentSize, bwdAvgSegmentSize, fAvgBytesPerBulk, fAvgPacketsPerBulk, fAvgBulkRate, bAvgBytesPerBulk, bAvgPacketsPerBulk, bAvgBulkRate, subFlowFwdPackets, subFlowFwdBytes, subFlowBwdPackets, subFlowBwdBytes, fwdWindowSize, bwdWindowSize, Act_data_pkt_forward, min_seg_size_forward, activeMean, activeStd, activeMax, activeMin, idleMean, idleStd, idleMax, idleMin ]) return features
#Find hosts attempting a Directory Traversal import pyshark #First read the input source file. #Next we want to filter out only those packets that use http as the application protocol #and out of these packets we further filter out only those http requests where the host attempts a directory traversal. input_trace = pyshark.FileCapture( "project2.trace", display_filter='http and http.request.uri contains "../.."') #Create an empty set src_host = set() #Next we iterate over these packets and store the source IP address of all source hosts attempting a directory traversal. for packet in input_trace: src_host.add(packet.ip.src) print(sorted(src_host))
import pyshark import base64 cap = pyshark.FileCapture('somepang.pcap', display_filter='icmp.type==8') # filter by echo data = '' for packet in cap: data += packet['icmp'].data.decode('hex')[ -2:] # get last 2 chars from data f = open('flag.jpg', 'wb') f.write(base64.b64decode(data)) print 'done'
streams.add(pkt.tcp.stream) def decrypt_callback(pkt): global send global recv if hasattr(pkt, 'data'): if pkt.tcp.dstport == server: send += unhexlify(pkt.data.data) else: recv += unhexlify(pkt.data.data) shark = pyshark.FileCapture( 'misc_chall.pcapng', display_filter="tcp.port == 25565 and ip.addr == 123.207.121.32") shark.apply_on_packets(stream_callback) print(streams) for i in streams: send = b'' recv = b'' shark = pyshark.FileCapture('misc_chall.pcapng', display_filter=f"tcp.stream eq {i}") shark.apply_on_packets(decrypt_callback) decryptor = OpenSSLCrypto('aes-256-cfb', password, recv[:16], 0) data = decryptor.update(recv[16:])
plt.xticks(rotation=45) plt.xlabel("Time") plt.ylabel("KBytes") plt.show() # MAIN print("") printGreen("********************************************") printGreen("************* DEMO LIVECAPTURE *************") printGreen("********************************************") ''' file = inputYellow("Inserisci il nome del file di cattura: ") cap = pyshark.FileCapture(file) ''' cap = pyshark.FileCapture("prova.pcap") print("") # Lettura file di cattura nums = [] dates = [] count = 0 for pkt in cap: dates.append(float(pkt.frame_info.time_epoch)) nums.append(int(pkt.length) / 1000) count = count + 1 print("\r\033[F\033[K-- Pacchetti esaminati: " + "#" + str(count) + " " + str(int(pkt.length))) print(f"-- Il numero di pacchetti è: {len(nums)}") # Aggregazione dati su intervalli
if (os.path.isfile("{}{}_protocols.csv".format(pcapdir, file))): print("Skipping {}; pcap is already analyzed".format(file)) continue # Debug fileID += 1 print("Analyzing {} ({}/{} ({}%))".format(file, fileID, totalFiles, round(100/totalFiles*fileID))) # Setting up protocolCounters = {} protocolBytes = {} destinationIPCounters = {} # Reading packets in dir try: all_packets = pyshark.FileCapture(pcapdir + file, only_summaries=True) except: print("Error: Failed to read pcap file!") continue # Analyze packets print("Analyzing packets that were read...") i = 0 try: for packet in all_packets: # Read packet data packet = str(packet).split(" ") protocol = packet[4] bytes = int(packet[5]) s_ip = packet[2] # Find destination IP
def extract_attributes(src, dst, attacker_ip, split_flags=False, include_validation_attributes=False): pcap = pyshark.FileCapture(src_file_name, keep_packets=False) first = True with open(dst_file_name, "a") as csv_file: for packet in pcap: entry = {} if include_validation_attributes: for key in validation_attributes: entry[key] = '' for key in attributes: if 'flag_' in key and split_flags == False: continue entry[key] = '' try: entry['is_attack'] = 0 if include_validation_attributes: entry['timestamp'] = packet.sniff_time.strftime( '%m/%d/%Y, %H:%M:%S:%f') entry['protocol'] = packet.highest_layer if 'ip' in packet: if include_validation_attributes: entry['src_ip'] = packet.ip.src entry['dst_ip'] = packet.ip.dst if packet.ip.src == attacker_ip or packet.ip.dst == attacker_ip: entry['is_attack'] = 1 entry['ttl'] = packet.ip.ttl entry['ip_len'] = packet.ip.len if split_flags: entry['ip_flag_df'] = packet.ip.flags_df entry['ip_flag_mf'] = packet.ip.flags_mf entry['ip_flag_rb'] = packet.ip.flags_rb else: entry['ip_flags'] = packet.ip.flags if 'udp' in packet: entry['src_port'] = packet.udp.srcport entry['dst_port'] = packet.udp.dstport elif 'tcp' in packet: entry['src_port'] = packet.tcp.srcport entry['dst_port'] = packet.tcp.dstport if split_flags: entry['tcp_flag_res'] = packet.tcp.flags_res entry['tcp_flag_ns'] = packet.tcp.flags_ns entry['tcp_flag_cwr'] = packet.tcp.flags_cwr entry['tcp_flag_ecn'] = packet.tcp.flags_ecn entry['tcp_flag_urg'] = packet.tcp.flags_urg entry['tcp_flag_ack'] = packet.tcp.flags_ack entry['tcp_flag_push'] = packet.tcp.flags_push entry['tcp_flag_reset'] = packet.tcp.flags_reset entry['tcp_flag_syn'] = packet.tcp.flags_syn entry['tcp_flag_fin'] = packet.tcp.flags_fin else: entry['tcp_flags'] = packet.tcp.flags else: continue if 'mqtt' in packet: entry['mqtt_messagetype'] = packet.mqtt.msgtype entry['mqtt_messagelength'] = packet.mqtt.len if 'conflags' in packet.mqtt.field_names: if split_flags: entry[ 'mqtt_flag_uname'] = packet.mqtt.conflag_uname entry[ 'mqtt_flag_passwd'] = packet.mqtt.conflag_passwd entry[ 'mqtt_flag_retain'] = packet.mqtt.conflag_retain entry['mqtt_flag_qos'] = packet.mqtt.conflag_qos entry[ 'mqtt_flag_willflag'] = packet.mqtt.conflag_willflag entry[ 'mqtt_flag_clean'] = packet.mqtt.conflag_cleansess entry[ 'mqtt_flag_reserved'] = packet.mqtt.conflag_reserved else: entry['mqtt_flags'] = packet.mqtt.conflags writer = csv.DictWriter(csv_file, list(entry.keys()), delimiter=',') if first: writer.writeheader() first = False writer.writerow(entry) except Exception: traceback.print_exc() break pcap.close()
def main(): all_router_multicast = "ff02::1" first_iteration = True packet_type = "ICMPV6" time_window = 2.000000 path_to_pcap_file = '/root/final_RA.pcap' pcap = pyshark.FileCapture(path_to_pcap_file) prefix_list = [] source_IP_list = [] destination_IP_list = [] # create logger with 'spam_application' logger = logging.getLogger('RA_IDS_flow_based_time') logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler('RA_IDS_flow_based_time.log') fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.INFO) # create formatter and add it to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) start_time = pcap[0].sniff_time for pkt in pcap: current_time = pkt.sniff_time elapsed_time = current_time - start_time if pkt.highest_layer == packet_type: logger.debug('Packet is type ICMPv6') if pkt.icmpv6.type == "134": logger.debug('Packet is type ICMPv6 Router Advertisement 134') var_prefix = str(pkt.icmpv6.opt_prefix) prefix_list.append(var_prefix.replace(":", "")) if pkt.ipv6.dst == all_router_multicast: logger.debug('Packet Flow to destination {}'.format( all_router_multicast)) var_dst = pkt.ipv6.dst destination_IP_list.append(var_dst.replace(":", "")) var_src = pkt.ipv6.src source_IP_list.append(var_src.replace(":", "")) else: pass logger.debug('Packet is NOT type ICMPv6') if elapsed_time.total_seconds() >= time_window: start_time = pkt.sniff_time logger.info('---------- Window Starts ----------') logger.info('Start time: {}'.format(start_time)) logger.info('Current time: {}'.format(current_time)) logger.info('Elapsed time: {}'.format(elapsed_time)) logger.info( 'Calculating Shannon\'s entropy for flow based - source IP') logger.info('Source_IP count: {}'.format(len(source_IP_list))) entropy_source_IP = numpy_shannon_entropy(source_IP_list) logger.info('Entropy for flow based - source IP u: {}\n'.format( entropy_source_IP)) logger.info( 'Calculating Shannon\'s entropy for flow based - destination IP' ) logger.info('Source_IP count: {}'.format(len(destination_IP_list))) entropy_destination_IP = numpy_shannon_entropy(destination_IP_list) logger.info( 'Entropy for flow based - destination IP u: {}\n'.format( entropy_destination_IP)) if first_iteration == True: logger.debug('In first iteration - calculating EWMA threshold') source_IP_threshold = ewma(ewma_lambda, entropy_source_IP, entropy_source_IP) previous_source_IP_threshold = source_IP_threshold logger.info( 'source_IP_threshold: {}\n'.format(source_IP_threshold)) first_iteration = False else: logger.debug( 'Not in first Iteration - calculating EWMA threshold') source_IP_threshold = ewma(ewma_lambda, entropy_source_IP, previous_source_IP_threshold) previous_source_IP_threshold = source_IP_threshold logger.info( 'source_IP_threshold: {}\n'.format(source_IP_threshold)) if entropy_source_IP > source_IP_threshold: logger.info( 'RA Flooding Detected - entropy of source IP > threshold\n' ) logger.info( 'entropy_source_IP: {}'.format(entropy_source_IP)) logger.info( 'prefix_threshold: {}\n'.format(source_IP_threshold)) # flush list prefix_list = [] source_IP_list = [] destination_IP_list = [] logger.info("---------- Window Ends ----------")
#Identify host(s) attempting a directory traversal. import pyshark #Input the trace file #The filter is meant to accept packets for the udp protocol with no application layer payload #Essentially, protocols like DNS also use UDP as a transport level protocol but we don't want to consider these packets. #Since traceroute uses UDP but does not use any transport level packets, want only those packets that use udp as a transport #but with not application layer payload. trace_input = pyshark.FileCapture("project2.trace", display_filter='udp and not dns and not browser and not mdns and not cups and not nbns and not auto_rp and not smb_netlogon') #declare an Empty dictionary. This structure will store the IP addresses of the source host and destination hosts ip_dict = {} #Now iterate over the packets from the trace_input. #We extract the source IP address and build a Dictionary whose each tuple if of the form (<source_ip>, <destination_ip>):<list_of_ttl_values> #Here the key is a UDP connection endpoint (<source_ip>, <destination_ip) while the value is a list of all ttl values. for packet in trace_input: ip_src = packet.ip.src ip_dst = packet.ip.dst #Get the IP endpoints for the trace route. ip_endpoints = (ip_src, ip_dst) ttl_dict = set() if ip_endpoints in ip_dict: ttl_dict = ip_dict[ip_endpoints] ttl = int(packet.ip.ttl) if ttl not in ttl_dict: ttl_dict.add(ttl) else: ttl_dict.add(int(packet.ip.ttl)) #Store the ttl value in the dictionary ip_dict[ip_endpoints] = ttl_dict
def perform_analysis(pid, idx, files_len, pcap_file): print("P%s (%s/%s): Processing pcap file \"%s\"..." % (pid, idx, files_len, pcap_file)) cap = pyshark.FileCapture(pcap_file, use_json=True) Utils.sysUsage("PCAP file loading") cap.close() base_ts = 0 try: if args.no_time_shift: cap[0] else: base_ts = float(cap[0].frame_info.time_epoch) except KeyError: print(c.NO_PCKT % pcap_file, file=sys.stderr) return node_id = Node.NodeId(args.mac_addr, args.ip_addr) node_stats = Node.NodeStats(node_id, base_ts, devices) print(" P%s: Processing packets..." % pid) try: for packet in cap: node_stats.processPacket(packet) except: print( " %sP%s: Error: There is something wrong with \"%s\". Skipping file.%s" % (RED, pid, pcap_file, END), file=sys.stderr) return del cap Utils.sysUsage("Packets processed") print(" P%s: Mapping IP to host..." % pid) ip_map = IP.IPMapping() if args.hosts_dir != "": host_file = args.hosts_dir + "/" + os.path.basename( pcap_file)[:-4] + "txt" ip_map.extractFromFile(pcap_file, host_file) else: ip_map.extractFromFile(pcap_file) ip_map.loadOrgMapping(IP_TO_ORG) ip_map.loadCountryMapping(IP_TO_COUNTRY) Utils.sysUsage("TShark hosts loaded") print(" P%s: Generating CSV output..." % pid) de = DataPresentation.DomainExport(node_stats.stats.stats, ip_map, GEO_DB_CITY, GEO_DB_COUNTRY) de.loadDiffIPFor("eth") if args.find_diff else de.loadIPFor("eth") de.loadDomains(args.dev, args.lab, args.experiment, args.network, pcap_file, str(base_ts)) de.exportDataRows(args.out_file) print(" P%s: Analyzed data from \"%s\" successfully written to \"%s\"" % (pid, pcap_file, args.out_file)) Utils.sysUsage("Data exported") if len(plots) != 0: print(" P%s: Generating plots..." % pid) pm = DataPresentation.PlotManager(node_stats.stats.stats, plots) pm.ipMap = ip_map pm.generatePlot(pid, pcap_file, args.fig_dir, GEO_DB_CITY, GEO_DB_COUNTRY) Utils.sysUsage("Plots generated")
def __init__(self, pcap_file_name, pcap_parser_engine="scapy"): """ Init function imports libraries based on the parser engine selected Return: * packetDB ==> Full Duplex Packet Streams - Used while finally plotting streams as graph - dump packets during report generation * lan_hosts ==> Private IP (LAN) list - device details * destination_hosts ==> Destination Hosts - communication details - tor identification - malicious identification """ # Initialize Data Structures memory.packet_db = {} memory.lan_hosts = {} memory.destination_hosts = {} memory.possible_mal_traffic = [] memory.possible_tor_traffic = [] # Set Pcap Engine self.engine = pcap_parser_engine # Import library for pcap parser engine selected if pcap_parser_engine == "scapy": try: from scapy.all import rdpcap except: logging.error("Cannot import selected pcap engine: Scapy!") sys.exit() try: from scapy.all import load_layer global tls_view_feature tls_view_feature = True logging.info("tls view feature enabled") except: logging.info("tls view feature not enabled") if tls_view_feature: load_layer("tls") # Scapy sessions and other types use more O(N) iterations so just # - use rdpcap + our own iteration (create full duplex streams) self.packets = rdpcap(pcap_file_name) elif pcap_parser_engine == "pyshark": try: import pyshark except: logging.error("Cannot import selected pcap engine: PyShark!") sys.exit() self.packets = pyshark.FileCapture(pcap_file_name, include_raw=True, use_json=True) #self.packets.load_packets() #self.packets.apply_on_packets(self.analyse_packet_data, timeout=100) # Analyse capture to populate data self.analyse_packet_data()
def count_packets(): cap = pyshark.FileCapture('http.cap', keep_packets=False) cap.apply_on_packets(counter, timeout=10000) return len(packets_array)
def load_packets(filename): pack=pyshark.FileCapture(filename,display_filter='dns') return pack
import pyshark cap = pyshark.FileCapture("mbti.pcap") for i in cap: if 'ssl' in i: if 'app_data' in dir(i.ssl): data = i.ssl.app_data.replace(':', '').decode('hex') print "APP DATA ", i.ip.src, len(data)
def extract_features(file, time_interval): # open the capture file and set up the parameters for the sliding time window td = datetime.timedelta( seconds=time_interval ) # set the timedelta of the interval for window sliding cap = pyshark.FileCapture(file) startTime = cap[0].sniff_time endTime = cap[0].sniff_time + td #print(startTime.strftime("%A, %d. %B %Y %I:%M.%S%p")) #print(endTime.strftime("%A, %d. %B %Y %I:%M.%S%p")) # prepare the feature dictionary and list of the features at every time window startCount = 1 endCount = 1 startWindow = cap[0].sniff_time + datetime.timedelta(seconds=START_WINDOW) endWindow = cap[-1].sniff_time - datetime.timedelta(seconds=END_WINDOW) # percentage calculations totalCount = 0 TLSCount = 0 STUNCount = 0 TCPCount = 0 ICMPCount = 0 UDPCount = 0 # host number calculations UDPHosts = [] TLSHosts = [] STUNHosts = [] TCPHosts = [] ftrs = Features(startTime, time_interval) featureArray = [] # start processing the packets while True: # get the next packet from capture try: currpkt = cap.next() except StopIteration: break try: _ = currpkt.ip except AttributeError: continue t = currpkt.sniff_time totalCount += 1 # check the protocol of the packet, increment counts or save hosts if 'SSL' in currpkt: TLSCount += 1 host = currpkt.ip.host if host not in TLSHosts: TLSHosts.append(host) if 'TCP' in currpkt: TCPCount += 1 host = currpkt.ip.host if host not in TCPHosts: TCPHosts.append(host) if 'UDP' in currpkt: UDPCount += 1 host = currpkt.ip.host if host not in UDPHosts: UDPHosts.append(host) if 'STUN' in currpkt: STUNCount += 1 host = currpkt.ip.host if host not in STUNHosts: STUNHosts.append(host) if 'HTTP' in currpkt: ftrs.HTTPExists = 1 if 'DTLS' in currpkt: ftrs.DTLSExists = 1 if 'ICMP' in currpkt: ftrs.ICMPExists = 1 ICMPCount += 1 # check atStart and atEnd conditions if t < startWindow: if 'DNS' in currpkt: ftrs.DNSAtStart = 1 if 'STUN' in currpkt: ftrs.firstSTUNMessageTime = 1 elif t > endWindow: if 'DNS' in currpkt: ftrs.DNSAtEnd = 1 # check the time window if endTime < t: # update startTime and endTime startTime = endTime endTime = startTime + td # finalize the feature list, prepare a new list # calculate numberOfXHosts fields for HMM length = len(UDPHosts) if length < 11: ftrs.numOfUDPHosts = 0 elif length < 21: ftrs.numOfUDPHosts = 1 else: ftrs.numOfUDPHosts = 2 length = len(TLSHosts) if length < 11: ftrs.numOfTLSHosts = 0 elif length < 21: ftrs.numOfTLSHosts = 1 else: ftrs.numOfTLSHosts = 2 length = len(TCPHosts) if length < 11: ftrs.numOfTCPHosts = 0 elif length < 21: ftrs.numOfTCPHosts = 1 else: ftrs.numOfTCPHosts = 2 length = len(STUNHosts) if length < 11: ftrs.numOfSTUNHosts = 0 elif length < 21: ftrs.numOfSTUNHosts = 1 else: ftrs.numOfSTUNHosts = 2 # calculate percentages perc = TLSCount / totalCount if perc < 1: ftrs.TLSMessagePercentage = 0 elif perc < 4: ftrs.TLSMessagePercentage = 1 else: ftrs.TLSMessagePercentage = 2 perc = STUNCount / totalCount if perc < 1: ftrs.STUNMessagePercentage = 0 elif perc < 4: ftrs.STUNMessagePercentage = 1 else: ftrs.STUNMessagePercentage = 2 perc = TCPCount / totalCount if perc < 1: ftrs.TCPMessagePercentage = 0 elif perc < 4: ftrs.TCPMessagePercentage = 1 else: ftrs.TCPMessagePercentage = 2 rate = STUNCount / (t - cap[0].sniff_time).total_seconds() rate /= 1000 if rate < 0.1: ftrs.STUNMessageRate = 0 elif rate < 0.2: ftrs.STUNMessageRate = 1 else: ftrs.STUNMessageRate = 2 rate = ICMPCount / (t - cap[0].sniff_time).total_seconds() rate /= 1000 if rate < 0.1: ftrs.ICMPMessageRate = 0 elif rate < 0.2: ftrs.ICMPMessageRate = 1 else: ftrs.ICMPMessageRate = 2 rate = UDPCount / (t - cap[0].sniff_time).total_seconds() rate /= 1000 if rate < 0.1: ftrs.UDPDatagramRate = 0 elif rate < 0.2: ftrs.UDPDatagramRate = 1 else: ftrs.UDPDatagramRate = 2 featureArray.append(ftrs) ftrs = Features(startTime, time_interval) try: # set the features that will be carried on for all iterations oldftrs = featureArray[-1] if oldftrs.DNSAtStart: ftrs.DNSAtStart = 1 if oldftrs.DNSAtEnd: ftrs.DNSAtEnd = 1 if oldftrs.DTLSExists: ftrs.DTLSExists = 1 if oldftrs.HTTPExists: ftrs.HTTPExists = 1 if oldftrs.ICMPExists: ftrs.ICMPExists = 1 if oldftrs.firstSTUNMessageTime: ftrs.firstSTUNMessageTime = 1 except IndexError: pass print("Packets {}-{} processed".format(startCount, endCount)) startCount = currpkt.frame_info.number endCount += 1 #print(a.sniff_time.strftime("%A, %d. %B %Y %I:%M.%S%p")) print("Extraction finished.") return featureArray