def __init__(self, filename): """Create a stream object on a pcap file. The file can be kept open and new data written to the file can be accessed via this classes built in methods Parameters: ---------- filename: The pcap file to be streamed. """ self.file_loc = None RawPcapReader.__init__(self, filename)
def process_pcap(file_name): print('Opening {}...'.format(file_name)) for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != 0x0800: # disregard non-IPv4 packets continue packet = ether_pkt[IP] if packet.proto != 6: # Ignore non-TCP packet or if its is not http continue tcp_packet = packet[TCP] if isinstance(tcp_packet.payload, HTTP): http_packet = tcp_packet[HTTP] summary = repr(http_packet) print(f'{packet.src} {packet.dst} {summary}')
def process_pcap(file_name): # print('Opening {}...'.format(file_name)) fl = True ret = set() try: for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != 0x0800: # disregard non-IPv4 packets continue ip_pkt = ether_pkt[IP] if not is_ip_local(str(ip_pkt.dst)): ret.add(ip_pkt.dst) except Exception as e: fl = False print(e) return ret, fl
def count_access(file_name, ip_dest): count = 0 ip_victim = [] for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): ethernet_data = Ether(pkt_data) if 'type' not in ethernet_data.fields: continue if ethernet_data.type != 0x0800: continue ip_data = ethernet_data[IP] if ip_data.dst == ip_dest: tcp_data = ip_data[TCP] if tcp_data.haslayer(TLS): count += 1 if not ip_data.src in ip_victim: ip_victim.append(ip_data.src) if len(ip_victim) > 0: print(bcolor.WHITE + "----------- Informe de Resultados ---------") print (bcolor.WHITE + "Existe al menos {} paquetes analizados que contienen "\ " posible envio de datos".format(count) ) print(bcolor.WHITE + "Listados de Ips: " + "\n" + "----------------") for data in ip_victim: print(bcolor.GREEN + data) return True return False
def process_pcap(file_name): print('Opening {}...'.format(file_name)) count = 0 interesting_packet_count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != 0x0800: # disregard non-IPv4 packets continue ip_pkt = ether_pkt[IP] if ip_pkt.proto != 6: # Ignore non-TCP packet continue interesting_packet_count += 1 print('{} contains {} packets ({} interesting)'.format( file_name, count, interesting_packet_count))
def __init__(self, data): self.pkts = 0 self.flows = 0 self.ft = {} for pkt, metadata in RawPcapReader(data): self.pkts += 1 ether = Ether(pkt) if ether.type == 0x86dd: ip = ether[IPv6] if ip.nh != 6: continue else: flow_size = ip.plen elif ether.type == 0x0800: ip = ether[IP] if ip.proto != 6: continue else: flow_size = ip.len - ip.ihl * 4 tcp = ip[TCP] key = (int(ip_address(ip.src)), int(ip_address(ip.dst)), tcp.sport, tcp.dport) reverse_key = (int(ip_address(ip.dst)), int(ip_address(ip.src)), tcp.dport, tcp.sport) if self.ft.__contains__(key): self.ft[key] = flow_size + self.ft[key] elif self.ft.__contains__(reverse_key): self.ft[reverse_key] = flow_size + self.ft[reverse_key] else: self.ft[key] = flow_size
def resolvePack(fileName, myAddr): conversitions = [] # c = Conversition(myAddr, targetAddr) # conversitions.append(c) for ( pkt_data, pkt_metadata, ) in RawPcapReader(fileName): ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != 0x0800: # disregard non-IPv4 packets continue ip_pkt = ether_pkt[IP] if ip_pkt.proto != 6: # Ignore non-TCP packet continue tcp_pkt = ip_pkt[TCP] flag = 0 for c in conversitions: if c.myAddr == ip_pkt.src and c.targetAddr == ip_pkt.dst: c.stream += 1 c.upStream += 1 if 'P' in tcp_pkt.flags: c.pStream += 1 if ether_pkt.len >= 1000: c.upBigStream += 1 flag = 1 break elif c.myAddr == ip_pkt.dst and c.targetAddr == ip_pkt.src: c.stream += 1 if 'P' in tcp_pkt.flags: c.pStream += 1 if ether_pkt.len <= 100: c.smallDownStream += 1 flag = 1 break if flag == 0: if ip_pkt.src == myAddr: c = Conversition(ip_pkt.src, ip_pkt.dst) c.stream += 1 c.upStream += 1 if 'P' in tcp_pkt.flags: c.pStream += 1 if ether_pkt.len >= 1000: c.upBigStream += 1 conversitions.append(c) elif ip_pkt.dst == myAddr: c = Conversition(ip_pkt.src, ip_pkt.dst) c.stream += 1 if 'P' in tcp_pkt.flags: c.pStream += 1 if ether_pkt.len <= 100: c.smallDownStream += 1 conversitions.append(c) return conversitions
def pcap2csv(in_pcap, out_csv): # Open the pcap file with PyShark in "summary-only" mode pcap_pyshark = pyshark.FileCapture(in_pcap, only_summaries=True) pcap_pyshark.load_packets() pcap_pyshark.reset() frame_num = 0 ignored_packets = 0 print("Starting to transfer format from pcap to csv...") with open(out_csv, 'w') as fh_csv: # Open the pcap file with scapy's RawPcapReader, and iterate over each packet. print( "Packet_No,Time,Protocol,Packet_Info,Source,Destination,Length,L4_Payload", file=fh_csv) for (packet_scapy, _) in RawPcapReader(in_pcap): try: packet_pyshark = pcap_pyshark.next_packet() frame_num += 1 if not render_csv_row(packet_pyshark, packet_scapy, fh_csv): ignored_packets += 1 except StopIteration: break print('{} packets read, {} packets ignored.'.format( frame_num, ignored_packets)) print("File ", out_csv, " saved")
def print_timestamp_first_last(file_name): """ This method prints the first and the last timestamp of the packets composing the pcap file in the format Day, Month, Year HH:MM:SS Args: filename: pcap file """ counter = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): counter += 1 if counter == 1: # first_timestamp = datetime.datetime.fromtimestamp(pkt_metadata.sec).strftime("%A, %B %d, %Y %I:%M:%S") first_timestamp = datetime.datetime.fromtimestamp(pkt_metadata.sec) first_timestamp = first_timestamp + datetime.timedelta( microseconds=pkt_metadata.usec) # Debugging # print("{} {}".format(pkt_metadata.sec, pkt_metadata.usec)) # print(first_timestamp.timestamp()) last_timestamp = datetime.datetime.fromtimestamp(pkt_metadata.sec) last_timestamp = last_timestamp + datetime.timedelta( microseconds=pkt_metadata.usec) print("{}: First packet captured at {}, last packet captured {}".format( file_name, first_timestamp, last_timestamp))
def merge_cap_files (pcap_file_list, out_filename, delete_src = False): exising_pcaps = [f for f in pcap_file_list if os.path.exists(f)] if not exising_pcaps: print('ERROR: DP cores did not produce output files!') return if len(exising_pcaps) != len(pcap_file_list): print("WARNING: not all DP cores produced output files\n") out_pkts = [] for src in exising_pcaps: pkts = RawPcapReader(src) out_pkts += pkts if delete_src: os.unlink(src) # sort by timestamp out_pkts = sorted(out_pkts, key = __ts_key) writer = RawPcapWriter(out_filename, linktype = 1) writer._write_header(None) for pkt in out_pkts: writer._write_packet(pkt[0], sec=pkt[1][0], usec=pkt[1][1], caplen=pkt[1][2], wirelen=None)
def pcap_count_interesting(file_name): count = 0 interesting_packet_count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != 0x0800: # disregard non-IPv4 packets continue ip_pkt = ether_pkt[IP] if ip_pkt.proto != 6: # Ignore non-TCP packet continue interesting_packet_count += 1 return count, interesting_packet_count
def read_all(self, ipg_usec, min_ipg_usec, speedup, split_mode=None): # get the packets if split_mode is None: pkts = RawPcapReader(self.pcap_file).read_all() else: pkts = rdpcap(self.pcap_file) if not pkts: raise STLError("'%s' does not contain any packets." % self.pcap_file) self.pkts_arr = [] last_ts = 0 # fix times for pkt in pkts: if split_mode is None: pkt_data, meta = pkt ts_usec = meta[0] * 1e6 + meta[1] else: pkt_data = pkt ts_usec = pkt.time * 1e6 if ipg_usec is None: loco = locals() if 'prev_time' in loco: delta_usec = (ts_usec - loco['prev_time']) / float(speedup) else: delta_usec = 0 if min_ipg_usec and delta_usec < min_ipg_usec: delta_usec = min_ipg_usec prev_time = ts_usec last_ts += delta_usec else: # user specified ipg if min_ipg_usec: last_ts += min_ipg_usec elif ipg_usec: last_ts += ipg_usec / float(speedup) else: raise STLError( 'Please specify either min_ipg_usec or ipg_usec, not both.' ) self.pkts_arr.append([pkt_data, last_ts]) if split_mode is None: return self.pkts_arr # we need to split self.graph = Graph() self.pkt_groups = [[], []] if split_mode == 'MAC': self.generate_mac_groups() elif split_mode == 'IP': self.generate_ip_groups() else: raise STLError('unknown split mode for PCAP') return self.pkt_groups
def process_pcap(file_name): print('Opening {}...'.format(file_name)) count = 0 for (pkt_data, pkt_metadata,) in RawPcapReader(file_name): count += 1 print('{} contains {} packets'.format(file_name, count))
def process_pcap(file_name): count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 return count
def process_pcap(file_name): count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 print("Cantidad de paquetes analizados: {}".format(count))
def pcap_count_packets(file_name): count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 return count
def get_label_array(pcap_file, mal_start=None, mal_end=None): if pcap_file == "datasets/eth2dump-pingFloodDDoS1m-0,5h_1.pcap": mal_start, mal_end = 2933, 15636 n_packets = 0 for pkt in RawPcapReader(pcap_file): n_packets += 1 mal_benign_labels = np.zeros(n_packets) mal_benign_labels[mal_start:mal_end] = 1 return mal_benign_labels
def create_pickle(pcap_file, pickle_out, srv, cli): print('Opening a pcap file for pickling.... %s' % pcap_file) count = 0 interesting_packages = 0 server_sequence_offset = None client_sequence_offset = None (server_ip, server_port) = srv.split(':') (client_ip, client_port) = cli.split(':') packets_for_analysis = [] for (pkt_data, pkt_metadata) in RawPcapReader(pcap_file): count +=1 ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != 0x0800: # disregard non-IPv4 packets continue ip_pkt = ether_pkt[IP] if ip_pkt.proto != 6: # Ignore non-TCP packet continue tcp_pkt = ip_pkt[TCP] # Determine the TCP payload length. IP fragmentation will mess up this # logic, so first check that this is an unfragmented packet if (ip_pkt.flags == 'MF') or (ip_pkt.frag != 0): print('No support for fragmented IP packets') return False tcp_payload_len = ip_pkt.len - (ip_pkt.ihl * 4) - (tcp_pkt.dataofs * 4) pkt_data = {} pkt_data['src'] = ip_pkt.src pkt_data['dest'] = ip_pkt.dst pkt_data['ip_flags'] = ip_pkt.flags pkt_data['sport'] = tcp_pkt.sport pkt_data['dport'] =tcp_pkt.dport packets_for_analysis.append(pkt_data) print('Writing to pickle file:%s' % pickle_out ) with open(pickle_out,'w') as pickle_fd: pickle.dump(client,pickle_fd) pickle.dump(server,pickle_fd) pickle.dump(packets_for_analysis, pickle_fd) print ('done.')
def process_pcap(file_name): print('Opening {}...'.format(file_name)) for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue #if ether_pkt.type != 0x0800: # disregard non-IPv4 packets #continue ip_pkt = ether_pkt[IP] if ip_pkt.proto != 17: # Ignore non-UDP packet continue if (ip_pkt.flags == 'MF') or (ip_pkt.frag != 0): print('No support for fragmented IP packets') return False udp_pkt = ip_pkt[UDP] raknet_packet = bytes(udp_pkt.payload) raknet_packet_id = raknet_packet[0] if raknet_packet_id == 0x84: raknet_message = raknet_packet[4:] messages = get_raknet_messages(raknet_message) for message in messages: if len(message) < 2: continue raknet_message_id = message[0] if raknet_message_id == 0x87: raknet_message_payload = message[1:] truckersmp_packet_id = raknet_message_payload[0] truckersmp_packet_payload = raknet_message_payload[1:] #if ip_pkt.src == "172.16.222.46" and truckersmp_packet_id == 0x8c: # continue #if ip_pkt.src == "172.16.222.46" and truckersmp_packet_id != 0x9c: # continue print(ip_pkt.src, "->", ip_pkt.dst, datetime.datetime.fromtimestamp(ip_pkt.time), hex(truckersmp_packet_id), truckersmp_packet_payload.hex())
def process_pcap(file_name): print('Opening {}...'.format(file_name)) count = 0 packets = [] flows = dict() for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 # get ethernet frame # and disregard non-IPv4 packets ether_pkt = Ether(pkt_data) if ether_pkt.type != 0x0800: continue # get ip packet from ethernet frame ip_pkt = ether_pkt[IP] if (ip_pkt.src in kungfu_ips) and (ip_pkt.dst in kungfu_ips): # build packet from given information packet = build_packet(ip_pkt, pkt_metadata) if packet.ack == 0: continue packets.append(packet) src_tuple = (packet.src_address, packet.src_port) dst_tuple = (packet.dst_address, packet.dst_port) flow_tuple = (src_tuple, dst_tuple) flows.setdefault(flow_tuple, [0, []])[1].append(packet) flows[flow_tuple][0] += packet.size_in_bytes # min_timestamp = min(min_timestamp, pkt_timestamp) # max_timestamp = max(max_timestamp, pkt_timestamp) # packets.append((pkt_timestamp, pkt_size)) # use below line for debugging # full results from the tcpdump # require no breaking # if count == 10 ** 3: break print('Closing {}'.format(file_name)) # sort packets in increasing timestamp order print('Sorting packets...') for flow_tuple in flows: flows[flow_tuple][1].sort(key=lambda pkt: pkt.timestamp) print('Sorted!') return packets, flows
def process_pcap(pcap_file, iterations, min_iteration_start, max_iteration_end): pkt_count = 0 flows = dict() packets = RawPcapReader(pcap_file).read_all() for ( pkt_data, pkt_metadata, ) in tqdm(packets, total=len(packets)): # get ethernet frame and disregard non-IPv4 packets ether_pkt = Ether(pkt_data) if ether_pkt.type != 0x0800: continue # get ip packet from ethernet frame ip_pkt = ether_pkt[IP] if (ip_pkt.src in kungfu_ips) and (ip_pkt.dst in kungfu_ips): pkt_count += 1 # extract packet information pkt_size = int(ip_pkt.sprintf("%IP.len%")) pkt_timestamp = pkt_metadata.sec + (pkt_metadata.usec / 10**6) src_tuple = (pkt_src_ip, pkt_src_port) = ip_pkt.src, ip_pkt.sport dst_tuple = (pkt_dst_ip, pkt_dst_port) = ip_pkt.dst, ip_pkt.dport flow_tuple = (src_tuple, dst_tuple) # instantiate new flow if flow_tuple not in flows: flows[flow_tuple] = dict() flows[flow_tuple]['flow_size_bytes'] = 0 flows[flow_tuple]['iteration_bins'] = [ 0 for _ in range(len(iterations)) ] flows[flow_tuple]['min_packet_timestamp'] = float('inf') flows[flow_tuple]['max_packet_timestamp'] = -1 * float('inf') # update min and max packet timestamps flows[flow_tuple]['min_packet_timestamp'] = min( pkt_timestamp, flows[flow_tuple]['min_packet_timestamp']) flows[flow_tuple]['max_packet_timestamp'] = max( pkt_timestamp, flows[flow_tuple]['max_packet_timestamp']) # assign packet to this flow flows[flow_tuple]['flow_size_bytes'] += pkt_size assign_packet_to_bin(pkt_timestamp, pkt_size, iterations, flows[flow_tuple]['iteration_bins'], min_iteration_start, max_iteration_end) # if pkt_count == 10 ** 4: # break return flows
def process_file(file_name): for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): ether_pkt = Ether(pkt_data) if apply_link_filters(ether_pkt): ip_pkt = ether_pkt[IP] if apply_network_filters(ip_pkt): tcp_pkt = ip_pkt[TCP] if apply_transport_filters(tcp_pkt): process_packet(pkt_metadata, ether_pkt, ip_pkt, tcp_pkt)
def destinations_contacted(folder, src_address): """ This method provides an overview about the destinations contacted by a devices with a specific ip address. Particularly, the methods return the number of packets send to all the destions Args: folder: folder containing pcap files src_address: ip address to analalyse plotting: plot the result obtained """ counter = Counter() output_file_name = "destination_contacted" + src_address + ".log" out_file = open(output_file_name, 'w') # Ordering Files in the directory (useful for window purposes) files = sorted(os.listdir(folder)) for filename in files: if not filename.endswith(".pcap"): print("Not processed {} ...".format(filename)) continue file_name = folder + filename print("Processing {} ...".format(file_name)) for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): # Obtaining ether packet ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != protocol_mapping_l2.get('IPv4'): continue ip_pkt = ether_pkt[IP] if ip_pkt.src != src_address: continue counter[ip_pkt.dst] += 1 out_file.write("Destinations contacted by {}: \n".format(src_address)) for key in counter.keys(): out_file.write("{}::{}\n".format(key, counter[key])) out_file.close()
def process_pcap(file_name, target, ports): global packetID for port in ports: portResultsMap[port] = [] count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != 0x0800: # disregard non-IPv4 packets continue ip_pkt = ether_pkt[IP] direction = None # true if inbound false if outbound if ip_pkt.src == target: direction = False elif ip_pkt.dst == target: direction = True else: continue if (ip_pkt.flags == 'MF') or (ip_pkt.frag != 0): print('No support for fragmented IP packets') break # Protocol numbers are the standard defined numbers here: https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers if ip_pkt.proto == 6: # TCP handleTCPPacket(ip_pkt, direction, ports) elif ip_pkt.proto == 17: # UDP handleUDPPacket(ip_pkt, direction, ports) # elif ip_pkt.proto == 1: # ICMP # handleICMPPacket(ip_pkt, direction, ports) else: continue packetID += 1 # print('{} contains {} packets ({} interesting)'. # format(file_name, count, interesting_packet_count)) return compileRequestResponseModel()
def get_specific_packet(self, packet_number): func_name = "PcapFileParser::get_specific_packet - " print(func_name + "trying to extract packet number:" + str(packet_number)) count = 0 for (pkt_data, pkt_metadata) in RawPcapReader(self.pcap_file_name): count += 1 print(func_name + "extracting packet[" + str(count) + "]") if count == packet_number: return pkt_data print(func_name + "packet number:" + str(packet_number) + " does not exist in this pcap file") return None
def pcap_count_conn(file_name, client, server): (client_ip, client_port) = client.split(':') (server_ip, server_port) = server.split(':') count = 0 interesting_packet_count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 ether_pkt = Ether(pkt_data) if 'type' not in ether_pkt.fields: # LLC frames will have 'len' instead of 'type'. # We disregard those continue if ether_pkt.type != 0x0800: # disregard non-IPv4 packets continue ip_pkt = ether_pkt[IP] if ip_pkt.proto != 6: # Ignore non-TCP packet continue if (ip_pkt.src != server_ip) and (ip_pkt.src != client_ip): # Uninteresting source IP address continue if (ip_pkt.dst != server_ip) and (ip_pkt.dst != client_ip): # Uninteresting destination IP address continue tcp_pkt = ip_pkt[TCP] if (tcp_pkt.sport != int(server_port)) and \ (tcp_pkt.sport != int(client_port)): # Uninteresting source TCP port continue if (tcp_pkt.dport != int(server_port)) and \ (tcp_pkt.dport != int(client_port)): # Uninteresting destination TCP port continue interesting_packet_count += 1 return count, interesting_packet_count
def test_pcap_aggr1(): root = None for pkt, _ in RawPcapReader(testfile): ether = Ether(pkt) if not 'type' in ether.fields: continue if ether.type != 0x0800: continue ip = ether[IP] if root is None: root = Node(ip_address(ip.src), ip.len) else: root.add(ip_address(ip.src), ip.len) gotree(root)
def process_pcap(file_name): print('Opening {}...'.format(file_name)) count = 0 interesting_packet_count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 message = (pkt_data[144:-4].hex()) print((message[0])) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(message, (UDP_IP, UDP_PORT))
def get_specific_packet(self, packet_number): print(self.log_me() + "trying to extract packet number:" + str(packet_number)) count = 0 for (pkt_data, pkt_metadata) in RawPcapReader(self.pcap_file_name): print(self.log_me() + "packet meta data:" + str(pkt_metadata)) packet_seconds_since_epoc_time_stamp = pkt_metadata.sec print(self.log_me() + "packet meta seconds since epoc:" + str(packet_seconds_since_epoc_time_stamp)) count += 1 if count == packet_number: return pkt_data, packet_seconds_since_epoc_time_stamp print(self.log_me() + "packet number:" + str(packet_number) + " does not exist in this pcap file") return None
def process_pcap(file_name): print('Opening {}...'.format(file_name)) count = 0 for ( pkt_data, pkt_metadata, ) in RawPcapReader(file_name): count += 1 #print("LEN: %s" % len(pkt_data)) if (len(pkt_data) != 449): continue process_packet(pkt_data) print('{} contains {} packets'.format(file_name, count))