def analyze(self,pcapFilename): hl7Output =BASE_DIR+"/hl7/networkFiles/hl7PacketDump.pcap" hl7Dump = PcapWriter((hl7Output),append=True,sync=True) G = nx.DiGraph(directed=True) print(unquote(pcapFilename)) pcapFilename = unquote(pcapFilename) packets = rdpcap(pcapFilename) networkSession = packets.sessions() for session in networkSession: for packet in networkSession[session]: try: if (str(packet[TCP].payload).startswith("b\'")) and str(packet[TCP].payload).endswith("r\'"): hl7Dump.write(packet) G.add_edge( (str(packet[IP].dst)+":"+str(packet[IP].dport)),(str(packet[IP].src)+":"+str(packet[TCP].sport))) except: continue nx.draw(G, with_labels=True) plt.show() hl7Dump.close() return hl7Output
def clsMessagesByIp(fileFrom, fileTo): packages = scapy.rdpcap(fileFrom) t_results = {} for p in packages: srcIp = MessageConvert.getClsSrcIp(p) desIp = MessageConvert.getClsDesIp(p) if srcIp == 'null' or desIp == 'null': continue mesKeyFirst = srcIp + desIp mesKeySecond = desIp + srcIp if mesKeyFirst in t_results: t_results[mesKeyFirst].append(p) elif (mesKeySecond in t_results): t_results[mesKeySecond].append(p) else: t_results[mesKeyFirst] = [] t_results[mesKeyFirst].append(p) for key in t_results: t_temp = t_results[key] t_writer = PcapWriter('%s%s.pcap' %(fileTo, key), append=True) #t_writer = PcapWriter('/home/wxw/data/cip_datanew/' + key + '.pcap', append=True) for p in t_results[key]: t_writer.write(p) t_writer.flush() t_writer.close()
class WrpcapSink(Sink): """Packets received on low input are written to PCAP file .. code:: +----------+ >>-| |->> | | >-|--[pcap] |-> +----------+ """ def __init__(self, fname, name=None, linktype=None): Sink.__init__(self, name=name) self.fname = fname self.f = None self.linktype = linktype def start(self): self.f = PcapWriter(self.fname, linktype=self.linktype) def stop(self): if self.f: self.f.flush() self.f.close() def push(self, msg): if msg: self.f.write(msg)
def create_pcap_file_from_frames(self, file_name, frames): writer = PcapWriter(file_name, append=False) for frame in frames: writer.write(frame) writer.close()
def gen_ipv4_assemble_pcap(): """Generate ipv4 assemble test case input pcap file.""" writer = PcapWriter(con.TLDK_TESTCONFIG + "/test_ipv4_assemble_rx.pcap", append=False) packets = create_packet(1066, False, 1024) for packet in packets: writer.write(packet) writer.close()
class WrpcapSink(Sink): """ Writes :py:class:`Packet` on the low entry to a ``pcap`` file. Ignores all messages on the high entry. .. note:: Due to limitations of the ``pcap`` format, all packets **must** be of the same link type. This class will not mutate packets to conform with the expected link type. .. code:: +----------+ >>-| |->> | | >-|--[pcap] |-> +----------+ :param fname: Filename to write packets to. :type fname: str :param linktype: See :py:attr:`linktype`. :type linktype: None or int .. py:attribute:: linktype Set an explicit link-type (``DLT_``) for packets. This must be an ``int`` or ``None``. This is the same as the :py:func:`wrpcap` ``linktype`` parameter. If ``None`` (the default), the linktype will be auto-detected on the first packet. This field will *not* be updated with the result of this auto-detection. This attribute has no effect after calling :py:meth:`PipeEngine.start`. """ def __init__(self, fname, name=None, linktype=None): # type: (str, Optional[str], Optional[int]) -> None Sink.__init__(self, name=name) self.fname = fname self.f = None # type: Optional[PcapWriter] self.linktype = linktype def start(self): # type: () -> None self.f = PcapWriter(self.fname, linktype=self.linktype) def stop(self): # type: () -> None if self.f: self.f.flush() self.f.close() def push(self, msg): # type: (Packet) -> None if msg and self.f: self.f.write(msg)
def gen_ipv4_checksum_pcap(): """Generate ipv4 checksum test case input pcap file.""" writer = PcapWriter(con.TLDK_TESTCONFIG + "/test_ipv4_checksum_rx.pcap", append=False) for i in range(1, 1474): packets = create_packet(i, False) for packet in packets: writer.write(packet) writer.close()
def sanitize(filepath_in, filepath_out=None, sequential=True, ipv4_mask=0, ipv6_mask=0, mac_mask=0, start_ipv4='10.0.0.1', start_ipv6='2001:aa::1', start_mac='00:aa:00:00:00:00'): if not filepath_out: timestamp = datetime.datetime.now().strftime('%y%m%d-%H%m%S') filepath_out = os.path.splitext(filepath_in)[ 0] + '_sanitized_' + timestamp + os.path.splitext(filepath_in)[1] mac_gen = MACGenerator(sequential=sequential, mask=mac_mask, start_mac=start_mac) ip4_gen = IPv4Generator(sequential=sequential, mask=ipv4_mask, start_ip=start_ipv4) ip6_gen = IPv6Generator(sequential=sequential, mask=ipv6_mask, start_ip=start_ipv6) with open(filepath_in) as capfile: #open cap file with pcapfile cap = savefile.load_savefile(capfile, verbose=False) #use scapy's pcapwriter pktwriter = PcapWriter(filepath_out, append=True) try: for pkt in cap.packets: #create scapy packet from pcapfile packet raw output pkt = Ether(pkt.raw()) #MAC addresses pkt.src = mac_gen.get_mac(pkt.src) pkt.dst = mac_gen.get_mac(pkt.dst) #IP Address try: pkt['IP'].src = ip4_gen.get_ip(pkt['IP'].src) pkt['IP'].dst = ip4_gen.get_ip(pkt['IP'].dst) except IndexError: pkt['IPv6'].src = ip6_gen.get_ip(pkt['IPv6'].src) pkt['IPv6'].dst = ip6_gen.get_ip(pkt['IPv6'].dst) pktwriter.write(pkt) finally: pktwriter.close() return filepath_out.split('/')[-1]
def gen_ipv6_fragment_pcap(): """Generate ipv6 fragment test case input pcap file.""" writer = PcapWriter(con.TLDK_TESTCONFIG + "/test_ipv6_fragment_rx.pcap", append=False) for i in range(1, 1454): packets = create_packet(i, True) for packet in packets: writer.write(packet) writer.close()
def gen_traffic(self, output_filename, dst_ip_addr, dst_mac, src_mac="00:00:00:00:00:01", ip_num=10000, packet_num=500000, payload="Normal Traffic pkts"): print("PAcket Number: %d" % packet_num) if not output_filename.endswith(".pcap"): print("Invalid PCAP filename! Exiting...") return self.pcap_filename = output_filename self.ip_hc_filename = str.join(".", output_filename.split(".")[:-1]) + "_ip_hc.txt" self.ip_fake_ttl_filename = str.join(".", output_filename.split(".")[:-1]) + "_fake_ttl.txt" # show modify request request = "Generating " + output_filename + " with\n" if src_mac is not None: request += " src mac:" + src_mac + "\n" if dst_mac is not None: request += " dst mac:" + dst_mac + "\n" if dst_ip_addr is not None: request += " dst ip addr:" + dst_ip_addr + "\n" print(request + "\n") pcap_writer = PcapWriter(output_filename) src_ips = [] src_hosts_with_ttl = {} for i in range(packet_num): if len(self.src_hosts) < ip_num: # pick a random src ip src_ip = self.__generate_ip() # pick a random hc ttl = self.__generate_rand_ttl() else: src_ip = src_ips[random.randint(0, len(src_ips) - 1)] ttl = src_hosts_with_ttl[src_ip] # calculate ttl according to hc pkt = Ether(src=src_mac, dst=dst_mac)/IP(src=self.__ip_int2str(src_ip), dst=dst_ip_addr, ttl=ttl)/TCP(flags=0x10)/payload pcap_writer.write(pkt) if src_ip not in src_ips: src_ips.append(src_ip) src_hosts_with_ttl[src_ip] = ttl self.src_hosts[src_ip] = self.__ttl2hc(ttl) if i % 10000 == 0: print("%d packets have been produced\n" % i) print(str(len(self.src_hosts)) + " source hosts produced") print("Writing ip,hc dict into " + self.ip_hc_filename + "...") with open(self.ip_hc_filename, "w") as f: json.dump(self.src_hosts, f) f.close() pcap_writer.flush() pcap_writer.close()
class NdpProxyPcapWriter: """ Class for writing incoming respective generated packets to a *.pcap file. Writing is triggered through the REST api. """ def __init__(self, logger): self.logger = logger self.pcap_all_path = self._make_pcap_path('all_') self.pcap_generated_path = self._make_pcap_path('generated_') self.write_pcap_all_handle = None self.write_pcap_all_writer = None self.write_pcap_generated_writer = None def write_pcap_all(self, msg): if self.write_pcap_all_writer: self.logger.debug("Writing received packet...") self.write_pcap_all_writer.write_pkt(msg.data) self.write_pcap_all_handle.flush() def write_pcap_generated(self, msg): if self.write_pcap_generated_writer: self.logger.debug("Writing generated packet...") self.write_pcap_generated_writer.write(msg) @staticmethod def _make_pcap_path(prefix): time_string = strftime('%Y%m%d_%H%M%S', gmtime()) return pcap_path + '/' + prefix + time_string + '.pcap' def toggle_write_pcap(self, flags): self._toggle_write_pcap_all(bool(flags['all'])) self._toggle_write_pcap_generated(bool(flags['generated'])) def _toggle_write_pcap_all(self, flag): if flag: if not self.write_pcap_all_writer: self.write_pcap_all_handle = open(self.pcap_all_path, 'ab') self.write_pcap_all_writer = pcaplib.Writer(self.write_pcap_all_handle) else: try: self.write_pcap_all_handle.close() self.write_pcap_all_writer = None except AttributeError: pass def _toggle_write_pcap_generated(self, flag): if flag: if not self.write_pcap_generated_writer: self.write_pcap_generated_writer = PcapWriter(self.pcap_generated_path, append=True, sync=True) else: try: self.write_pcap_generated_writer.close() self.write_pcap_generated_writer = None except AttributeError: pass
def remove_a_packet_from_pcap(file='test.pcap'): # acquire all packets from pcap pkts = rdpcap(file) # ignore the first packet pkts = pkts[1:] # recreate pcap os.remove(file) pktdump = PcapWriter(file, append=True, sync=True) # write other packets to pcap for pkt in pkts: pktdump.write(pkt) pktdump.close()
def pcap_truncate(path_in, path_out): print(f"'{path_in.name}' ({os.path.getsize(str(path_in))/(1024 * 1024)})...") cap = rdpcap(str(path_in)) out = PcapWriter(str(path_out)) for packet in tqdm(cap, desc="Truncating PCAP", leave=False): if packet.haslayer("IP"): if packet.haslayer("TCP"): packet["IP"]["TCP"].remove_payload() elif packet.haslayer("UDP"): packet["IP"]["UDP"].remove_payload() out.write(packet) out.close()
def save(p): global filepath global filename if _is_need_change(filepath,filename): filename = _change_filename(filepath,filename) print "modifying filename: {}".format(filename) writer = PcapWriter(filepath+filename,append=True) writer.write(p) writer.flush() writer.close() if _is_need_change(filepath,filename): shutil.move(filepath+filename,toUploadPath+filename)
def sanitize( filepath_in, filepath_out=None, sequential=True, ipv4_mask=0, ipv6_mask=0, mac_mask=0, start_ipv4="10.0.0.1", start_ipv6="2001:aa::1", start_mac="00:aa:00:00:00:00", ): if not filepath_out: timestamp = datetime.datetime.now().strftime("%y%m%d-%H%m%S") filepath_out = os.path.splitext(filepath_in)[0] + "_sanitized_" + timestamp + os.path.splitext(filepath_in)[1] mac_gen = MACGenerator(sequential=sequential, mask=mac_mask, start_mac=start_mac) ip4_gen = IPv4Generator(sequential=sequential, mask=ipv4_mask, start_ip=start_ipv4) ip6_gen = IPv6Generator(sequential=sequential, mask=ipv6_mask, start_ip=start_ipv6) with open(filepath_in) as capfile: # open cap file with pcapfile cap = savefile.load_savefile(capfile, verbose=False) # use scapy's pcapwriter pktwriter = PcapWriter(filepath_out, append=True) try: for pkt in cap.packets: # create scapy packet from pcapfile packet raw output pkt = Ether(pkt.raw()) # MAC addresses pkt.src = mac_gen.get_mac(pkt.src) pkt.dst = mac_gen.get_mac(pkt.dst) # IP Address try: pkt["IP"].src = ip4_gen.get_ip(pkt["IP"].src) pkt["IP"].dst = ip4_gen.get_ip(pkt["IP"].dst) except IndexError: pkt["IPv6"].src = ip6_gen.get_ip(pkt["IPv6"].src) pkt["IPv6"].dst = ip6_gen.get_ip(pkt["IPv6"].dst) pktwriter.write(pkt) finally: pktwriter.close() return filepath_out.split("/")[-1]
def save_handshake(self, pkts, bss): f_name = 'handshakes' fi_name = '%s.cap' % (bss.replace(':', '').lower()) self.c_v_path(os.path.join(os.getcwd(), f_name)) file__ = PcapWriter(os.path.join(os.getcwd(), f_name, fi_name), append=True, sync=True) for pkt in pkts: file__.write(pkt) file__.close() return os.path.join(os.getcwd(), f_name, fi_name)
def modify_traffic(self, output_filename, fake_ttl=False, src_mac=None, dst_mac=None, src_ip_addr=None, dst_ip_addr=None): if fake_ttl is True and len(self.src_hosts_with_fake_ttl) == 0: print("Please extract ip2hc table before modify traffic with fake ttl.") # show modify request request = "Generating " + output_filename + " with\n" if fake_ttl: request += " fake ttl\n" if src_mac is not None: request += " src mac:" + src_mac + "\n" if dst_mac is not None: request += " dst mac:" + dst_mac + "\n" if src_ip_addr is not None: request += " src ip addr:" + src_ip_addr + "\n" if dst_ip_addr is not None: request += " dst ip addr:" + dst_ip_addr + "\n" print(request + "\n") pcap_reader = PcapReader(self.pcap_filename) pcap_writer = PcapWriter(output_filename) counter = 0 while True: pkt = pcap_reader.read_packet() if pkt is None: break if not pkt.haslayer('Ether') or not pkt.haslayer('IP'): continue # ipv4 packets counter += 1 ip_int = self.__ip_str2int(pkt['IP'].src) if fake_ttl: pkt['IP'].ttl = self.src_hosts_with_fake_ttl[ip_int] if src_mac is not None: pkt['Ethernet'].src = src_mac if dst_mac is not None: pkt['Ethernet'].dst = dst_mac if src_ip_addr is not None: pkt['IP'].src = src_ip_addr if dst_ip_addr is not None: pkt['IP'].dst = dst_ip_addr pcap_writer.write(pkt) if counter % 10000 == 0: print("%d packets have been processed\n" % counter) pcap_writer.flush() pcap_writer.close() pcap_reader.close()
class WrpcapSink(Sink): """Packets received on low input are written to PCA file +----------+ >>-| |->> | | >-|--[pcap] |-> +----------+ """ def __init__(self, fname, name=None): Sink.__init__(self, name=name) self.f = PcapWriter(fname) def stop(self): self.f.flush() self.f.close() def push(self, msg): self.f.write(msg)
def beacon_histogram(): import pprint, json global beacons, science channels = range(1, 14) start = time.time() gmt = time.strftime('%d.%m.%Y %H:%M:%S', time.gmtime(start)) f_prefix = time.strftime('%d.%m.%Y-%H:%M:%S', time.gmtime(start)) print("[*] start time: {} GMT".format(gmt)) # clear data beacons = {} for ch in channels: print("\t[i] channel: ", ch) subprocess.run("iw dev {} set channel {}".format(if0, ch), shell=True) pkts = sniff(iface=if0, lfilter=lambda pkt: pkt.haslayer(Dot11Beacon), prn=do_science, timeout=(60 * 1)) b_uniq = len(beacons.keys()) b_total = sum(beacons.values()) p_total = len(pkts) if p_total > 0: b_size_avg = sum([len(p) for p in pkts]) / float(p_total) science[ch] = { 'b_uniq': b_uniq, 'b_size_avg': b_size_avg, 'b_total': b_total } print( "\t[i] unique beacons: {}, size avg: {}, total beacons: {} (sanity:{})" .format(b_uniq, b_size_avg, b_total, b_total == p_total)) n_pcap = '{}-ch{}.pcap'.format(f_prefix, ch) f_pcap = PcapWriter(n_pcap) f_pcap.write(pkts) print("\t[i] wrote packets to: {}\n".format(n_pcap)) #cleanup beacons = {} b_size_avg = 0 f_pcap.close() print("[*] results:\n") pprint.pprint(science) f_report = '{}-science.json'.format(f_prefix) f = open(f_report, 'w+') f.write(json.dumps(science)) print("[*] results saved to file: {}".format(f_report)) end = time.time() print("[*] end of experiment, duration: {} seconds".format(end - start))
def scapy_sniff(self): file = open('iface.setting', 'r') iface = file.read() file.close() if iface == 'None': data = sniff(prn=lambda x: x.summary()) #scapy的sniff嗅探 else: data = sniff(iface=iface, prn=lambda x: x.summary()) print("Start analyzing packets...") file = "sniff_data/" + time.strftime('%Y_%m_%d_%H_%M_%S') + ".pcap" writer = PcapWriter(file, append=True) for i in data: writer.write(i) writer.flush() writer.close()
def classify_bysrc(file_name, file_to): packages = scapy.rdpcap(file_name) t_results = {} for p in packages: if (get_srcip(p) + get_dstip(p) in t_results): t_results[get_srcip(p) + get_dstip(p)].append(p) elif (get_dstip(p) + get_srcip(p) in t_results): t_results[get_dstip(p) + get_srcip(p)].append(p) else: t_results[get_srcip(p) + get_dstip(p)] = [] t_results[get_srcip(p) + get_dstip(p)].append(p) for key in t_results: t_temp = t_results[key] t_writer = PcapWriter(file_to + key + '.pcap', append=True) for p in t_results[key]: t_writer.write(p) t_writer.flush() t_writer.close()
def split_pcap(self, filename, rate): package_pr = scapy.PcapReader(filename) package_one = [] i = 1 while (True): package = package_pr.read_packet() if package is None: break package_one.append(package) package_pr.close() length = len(package_one) i = 0 final_len = int(length * length) t_writer = PcapWriter('/home/wxw/data/Ethernetip/' + 'modbus_pure' + '.pcap', append=True) for p in package_one[0:final_len]: t_writer.write(p) t_writer.flush() t_writer.close()
def create_pcap_file(self, file_name, frame_size, number_of_frames, incremental_ip_address, src_ip="0.0.0.0", dst_ip="0.0.0.0"): current_frame = 0 writer = PcapWriter(file_name, append=False) while current_frame < number_of_frames: ip_id = 0 # current_frame % 0x10000 frame = self.create_tcp_ipv4_frame(ip_id, src_ip, dst_ip, frame_size) writer.write(frame) if incremental_ip_address: dst_ip = self.increment_ip_addr(dst_ip, 1) current_frame += 1 writer.close()
def write_shuffled_pcap(self, fragments): """ Receives a list of fragmented packets and writes them into a PCAP file using the Scapy API. Before saving the frames to a file it will reorder them like this: pkt0-frag3 -> pkt1-frag3 -> pkt2-frag3 -> ... -> pktN-frag3 -> pkt0-frag2 -> pkt1-frag2 -> pkt2-frag2 -> ... -> pktN-frag2 -> pkt0-frag1 -> pkt1-frag1 -> pkt2-frag1 -> ... -> pktN-frag1 -> pkt0-frag0 -> pkt1-frag0 -> pkt2-frag0 -> ... -> pktN-frag0. """ writer = PcapWriter(self.test_config.pcap_file) rounds = self.test_config.frags_per_frame while rounds > 0: index = rounds - 1 rounds -= 1 while index < len(fragments): writer.write(fragments[index]) index += self.test_config.frags_per_frame writer.close()
def noise_remove(self, filename, protocolname, t_lo): package_two = FileCapture(filename) package_pr = scapy.PcapReader(filename) package_one = [] i = 1 while (True): package = package_pr.read_packet() if package is None: break package_one.append(package) package_pr.close() package_three = [] length = len(package_one) i = 0 while (i < length): if package_two[i].layers[t_lo].layer_name == protocolname: package_three.append(package_one[i]) i = i + 1 t_writer = PcapWriter('/home/wxw/data/' + 'modbus_pure' + '.pcap', append=True) for p in package_three: t_writer.write(p) t_writer.flush() t_writer.close()
class Module(BaseModule): # ref: https://github.com/aircrack-ng/aircrack-ng/blob/master/src/airodump-ng.h#L141 CHANNEL_HOPS = { "2.4G": (1, 7, 13, 2, 8, 3, 14, 9, 4, 10, 5, 11, 6, 12), "5G": (36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 132, 134, 136, 138, 140, 142, 144, 149, 151, 153, 155, 157, 159, 161, 165, 169, 173), "mix": (1, 7, 13, 2, 8, 3, 14, 9, 4, 10, 5, 11, 6, 12, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 132, 134, 136, 138, 140, 142, 144, 149, 151, 153, 155, 157, 159, 161, 165, 169, 173) } META = { "id": "discovery/recon", "name": "802.11 networks reconnaissance module", "author": "Valentín Blanco (https://github.com/valenbg1)", "version": "1.1.0", "description": "Detects 802.11 APs and clients info and saves it to the recon database for further use.", "options": argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter), "depends": {} } META["options"].add_argument("-i", "--ifaces", help="monitor mode capable WLAN interfaces", default="wlan0", required=False, nargs="*", metavar="INTERFACE") META["options"].add_argument("-c", "--channel", help="fix interface to specify channel", required=False, type=int, metavar="CHANNEL") META["options"].add_argument( "-r", "--read", dest="input_file", help="read a pcap file instead of use interface", required=False, metavar="INPUT_FILE") META["options"].add_argument( "-w", "--write", dest="output_file", help="write a pcap file with processed packages", required=False, metavar="OUTPUT_FILE") META["options"].add_argument( "-b", "--band", help="Scan on specific band. Use 'mix' for all bands", default="2.4G", choices=CHANNEL_HOPS.keys(), metavar="<{}>".format("|".join(CHANNEL_HOPS.keys()))) def __init__(self): self.bssids_cache = None self.clients_cache = None self.connections_cache = None self.iface_current_channel = None self.running = False self.cmd: Pinecone = None self.in_pkcs_queue = Queue() self.out_writer: PcapWriter = None def sig_int_handler(self, signal, frame): self.running = False self.cmd.pfeedback("\n[i] Exiting...\n") def sniff(self, iface: str) -> None: try: sniff(iface=iface, prn=self.handle_packet, store=False, stop_filter=lambda p: not self.running) except Exception as e: self.cmd.perror("[!] Exception while sniffing: {}".format(e)) self.running = False def channel_hopping(self, interfaces: Card, band: str) -> None: channel_iterator = itertools.cycle(self.CHANNEL_HOPS[band]) while self.running: for interface in interfaces: if not self.running: break try: self._hop_to_channel(interface, next(channel_iterator)) except: pass # ref: https://github.com/aircrack-ng/aircrack-ng/blob/master/src/airodump-ng.h#L40 sleep(0.250) def run(self, args, cmd): self.cmd = cmd self.clear_caches() self.running = True if args.input_file is not None: self._run_on_pcap(args) else: self._run_on_interface(args) def stop(self, cmd): pass def clear_caches(self) -> None: self.bssids_cache = set() self.clients_cache = set() self.connections_cache = set() @db_session def handle_dot11_header(self, packet: Packet) -> None: now = datetime.now() radiotap_pkg = packet[RadioTap] dot11_pkg = packet[Dot11] if dot11_pkg.sprintf("%type%") != "Control": client_mac = None dot11_addrs_info = get_dot11_addrs_info(dot11_pkg) dot11_ds_bits = dot11_addrs_info["ds_bits"] if not dot11_ds_bits: # no to-DS & no from-DS bssid = dot11_addrs_info["bssid"] if dot11_addrs_info["sa"] != bssid: client_mac = dot11_addrs_info["sa"] elif len(dot11_ds_bits) == 1: # to-DS or from-DS bssid = dot11_addrs_info["bssid"] client_mac = dot11_addrs_info[ "sa"] if "to-DS" in dot11_ds_bits else dot11_addrs_info[ "da"] else: # to-DS & from-DS bssid = dot11_addrs_info["ta"] if is_multicast_mac(bssid): bssid = None if client_mac and is_multicast_mac(client_mac): client_mac = None bss = None if bssid: try: bss = BasicServiceSet[bssid] bss.last_seen = now except: bss = BasicServiceSet(bssid=bssid, last_seen=now) if dot11_addrs_info["ta"] != dot11_addrs_info["bssid"]: # Transmission Address match bssid, so packet came from an AP # get signal strength and update DB if needed current_dbm = radiotap_pkg.dBm_AntSignal if current_dbm and (not bss.max_dbm_power or current_dbm > bss.max_dbm_power): bss.max_dbm_power = current_dbm # TODO: Get GPS fix and update max power position if client_mac: try: client = Client[client_mac] except: client = Client(mac=client_mac) if client_mac not in self.clients_cache: self.clients_cache.add(client_mac) self.cmd.pfeedback( "[i] Detected client ({})".format(client)) if bss: try: Connection[client, bss].last_seen = now except: Connection(client=client, bss=bss, last_seen=now) if (client_mac, bssid) not in self.connections_cache: self.connections_cache.add((client_mac, bssid)) self.cmd.pfeedback( "[i] Detected connection between client ({}) and BSS (BSSID: {})" .format(client, bss.bssid)) @db_session def handle_authn_res(self, packet: Packet) -> None: authn_packet = packet[Dot11Auth] if authn_packet.sprintf( "%status%") == "success" and authn_packet.seqnum in {2, 4}: bssid = packet[Dot11].addr3 bss = BasicServiceSet[bssid] if bss.encryption_types == "WEP" and authn_packet.algo in WEP_AUTHN_TYPE_IDS: bss.authn_types = WEP_AUTHN_TYPE_IDS[authn_packet.algo] @db_session def handle_probe_req(self, packet: Packet) -> None: now = datetime.now() ssid = process_dot11elts(packet[Dot11Elt])["ssid"] if ssid: try: ess = ExtendedServiceSet[ssid] except: ess = ExtendedServiceSet(ssid=ssid) client_mac = packet[Dot11].addr2 client = Client[client_mac] try: ProbeReq[client, ess].last_seen = now except: ProbeReq(client=client, ess=ess, last_seen=now) if (client_mac, ssid) not in self.clients_cache: self.clients_cache.add((client_mac, ssid)) self.cmd.pfeedback( "[i] Detected client ({}) probing for ESS ({})".format( client, ess)) @db_session def handle_beacon(self, packet: Packet) -> None: dot11elts_info = process_dot11elts(packet[Dot11Elt]) channel = dot11elts_info["channel"] encryption_types = ", ".join(dot11elts_info["encryption_types"]) if channel is None: channel = self.iface_current_channel if not encryption_types: encryption_types = "WEP" if "privacy" in str( packet[Dot11Beacon].cap) else "OPN" ssid = dot11elts_info["ssid"] ess = None if ssid: try: ess = ExtendedServiceSet[ssid] except: ess = ExtendedServiceSet(ssid=ssid) hides_ssid = ssid == "" bssid = packet.addr3 cipher_types = ", ".join(dot11elts_info["cipher_types"]) authn_types = ", ".join(dot11elts_info["authn_types"]) bss = BasicServiceSet[bssid] bss.channel = channel bss.encryption_types = encryption_types if encryption_types == "WEP": bss.cipher_types = "WEP" else: bss.cipher_types = cipher_types bss.authn_types = authn_types bss.ess = ess bss.hides_ssid = hides_ssid if bssid not in self.bssids_cache: self.bssids_cache.add(bssid) ssid = "\"{}\"".format(ssid) if ssid else "<empty>" current_dbm = packet[RadioTap].dBm_AntSignal self.cmd.pfeedback( "[i] Detected AP (SSID: {}, BSSID: {}, ch: {}, enc: ({}), cipher: ({}), authn: ({}), dBm: {})." .format(ssid, bss.bssid, bss.channel, bss.encryption_types, bss.cipher_types, bss.authn_types, current_dbm)) @db_session def handle_packet_queue(self) -> None: while self.running: try: packet = self.in_pkcs_queue.get(timeout=1) except Empty: # Allow re evaluation of self.running for controlled cleanup continue if self.out_writer: self.out_writer.write(packet) try: if packet.haslayer(Dot11) or packet.haslayer(Dot11FCS): self.handle_dot11_header(packet) if packet.haslayer(Dot11Beacon): self.handle_beacon(packet) elif packet.haslayer(Dot11ProbeReq): self.handle_probe_req(packet) elif packet.haslayer(Dot11Auth): self.handle_authn_res(packet) except Exception as e: self.cmd.perror( "[!] Exception while handling packet: {}\n{}".format( e, packet.show(dump=True))) def handle_packet(self, packet: Packet) -> None: try: if packet.haslayer(Dot11) or packet.haslayer(Dot11FCS): self.in_pkcs_queue.put(packet) except Exception as e: self.cmd.perror( "[!] Exception while handling packet: {}\n{}".format( e, packet.show(dump=True))) def _hop_to_channel(self, interface: Card, channel: int) -> None: check_chset(interface, channel) self.iface_current_channel = channel def _run_on_interface(self, args): interfaces = [] join_to = [] if args.output_file: self.out_writer = PcapWriter(args.output_file) handle_queue_thread = Thread(target=self.handle_packet_queue) handle_queue_thread.start() join_to.append(handle_queue_thread) for iface in args.ifaces: interfaces.append(set_monitor_mode(iface)) sniff_thread = Thread(target=self.sniff, kwargs={"iface": iface}) sniff_thread.start() join_to.append(sniff_thread) if args.channel is None: hopping_thread = Thread(target=self.channel_hopping, kwargs={ "interfaces": interfaces, "band": args.band }) hopping_thread.start() join_to.append(hopping_thread) else: for interface in interfaces: check_chset(interface, args.channel) prev_sig_handler = signal.signal(signal.SIGINT, self.sig_int_handler) self.cmd.pfeedback( "[i] Starting reconnaissance, press ctrl-c to stop...\n") for th in join_to: th.join() if self.out_writer: self.out_writer.close() signal.signal(signal.SIGINT, prev_sig_handler) def _run_on_pcap(self, args): reader = PcapReader(args.input_file) try: while True: self.handle_packet(reader.read_packet()) except EOFError: pass reader.close()
def write(file_name, packets): writer = PcapWriter(file_name, append = True) for p in packets: writer.write(p) writer.flush() writer.close()
def gen_spoofing_attack(self, output_filename, dst_ip_addr, dst_mac, src_mac="00:00:00:00:00:11", syn=True, ack=True, udp=True, packet_num=500000, payload="Spoofing pkts"): if len(self.src_hosts_with_fake_ttl) == 0: print("Please extract ip2hc table before modify traffic with fake ttl.") # convert dict to array src_hosts_array = [] for (src_ip, hc) in self.src_hosts.items(): src_hosts_array.append(src_ip) # show modify request request = "Generating " + output_filename + " including\n" if syn: request += " spoofing SYN packets\n" if ack: request += " spoofing ACK packets\n" if udp: request += " spoofing UDP packets\n" request += "with\n" if src_mac is not None: request += " src mac:" + src_mac + "\n" if dst_mac is not None: request += " dst mac:" + dst_mac + "\n" if dst_ip_addr is not None: request += " dst ip addr:" + dst_ip_addr + "\n" print(request + "\n") pkt_types = [] if syn: pkt_types.append("SYN") if ack: pkt_types.append("ACK") if udp: pkt_types.append("UDP") pcap_writer = PcapWriter(output_filename) for i in range(packet_num): # pick a random packet type pkt_type = pkt_types[random.randint(0, len(pkt_types) - 1)] # pick a random src ip src_host = src_hosts_array[random.randint(0, len(src_hosts_array) - 1)] # pick a random hc rand_ttl = self.__generate_rand_ttl() # calculate ttl according to hc if pkt_type == "SYN": pkt = Ether(src=src_mac, dst=dst_mac)/IP(src=self.__ip_int2str(int(src_host)), dst=dst_ip_addr, ttl=rand_ttl)/TCP(flags=0x02)/payload elif pkt_type == "ACK": pkt = Ether(src=src_mac, dst=dst_mac)/IP(src=self.__ip_int2str(int(src_host)), dst=dst_ip_addr, ttl=rand_ttl)/TCP(flags=0x10)/payload elif pkt_type == "UDP": pkt = Ether(src=src_mac, dst=dst_mac)/IP(src=self.__ip_int2str(int(src_host)), dst=dst_ip_addr, ttl=rand_ttl)/UDP()/payload pcap_writer.write(pkt) if i % 10000 == 0: print("%d packets have been produced\n" % i) pcap_writer.flush() pcap_writer.close()
def saz_to_pcap(sazpath): if not sazpath.lower().endswith(".saz"): return None if not HAVE_SCAPY: log.error("Scapy is required for SAZ to PCAP conversion.") return None tmpdir = "" pcappath = "%s/%s.pcap" % (tempfile.mkdtemp(), os.path.basename(sazpath)) fiddler_raw_dir = "" pktdump = PcapWriter(pcappath, sync=True) try: tmpdir = tempfile.mkdtemp() except Exception as e: log.error("Failed to Create temp dir for SAZ extraction %s" % (e)) return None try: z = zipfile.ZipFile(sazpath, "r") except Exception as e: log.error("Failed to open SAZ file as Zip extraction %s" % (e)) return None try: z.extractall(tmpdir) z.close() except Exception as e: log.error("Failed to extract SAZ file to temp dir %s" % (e)) return None if not os.path.isdir("%s/raw/" % (tmpdir)): return None fiddler_raw_dir = "%s/raw/" % (tmpdir) m_file_list = glob.glob("%s/%s" % (fiddler_raw_dir, "*_m.xml")) m_file_list.sort() if m_file_list: for xml_file in m_file_list: sport = random.randint(1024, 65535) src = "192.168.1.1" smac = "00:11:22:aa:bb:cc" dport = 80 dst = "10.1.1.1" dmac = "c0:c1:c0:b7:ce:63" dom = parse(xml_file) m = re.match(r"^(?P<fid>\d+)_m\.xml", os.path.basename(xml_file)) if m: fid = m.group("fid") else: log.error("Failed to find fiddler ID tag") return None xmlTags = dom.getElementsByTagName("SessionFlag") for xmlTag in xmlTags: xmlTag = xmlTag.toxml() m = re.match( r"\<SessionFlag N=\x22x-(?:client(?:ip\x22 V=\x22[^\x22]*?(?P<clientip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|port\x22 V=\x22(?P<sport>\d+))|hostip\x22 V=\x22[^\x22]*?(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))\x22", xmlTag, ) # TODO:to enable this we need to track 5 tuples otherwise we have session reuse issues # if m and m.group("sport"): # sport = int(m.group("sport")) if m and m.group("clientip") and src == None: src = m.group("clientip") elif m and m.group("hostip"): dst = m.group("hostip") req = open(fiddler_raw_dir + fid + "_c.txt").read() m = re.match( r"^(?P<verb>[^\r\n\s]+)\s+(?P<host_and_port>https?\:\/\/[^\/\r\n\:]+(\:(?P<dport>\d{1,5}))?)\/", req ) if m and m.group("verb") != "CONNECT": req = req.replace(m.group("host_and_port"), "", 1) if m.group("dport") and int(m.group("dport")) <= 65535: dport = int(m.group("dport")) resp = open(fiddler_raw_dir + fid + "_s.txt").read() (seq, ack) = build_handshake(src, dst, sport, dport, pktdump, smac, dmac) (seq, ack) = make_pkts(src, dst, sport, dport, seq, ack, req, pktdump, smac, dmac) (seq, ack) = make_pkts(dst, src, dport, sport, seq, ack, resp, pktdump, dmac, smac) build_finshake(src, dst, sport, dport, seq, ack, pktdump, smac, dmac) else: m_file_list = glob.glob("%s/%s" % (fiddler_raw_dir, "*_c.txt")) m_file_list.sort() if m_file_list: for xml_file in m_file_list: sport = random.randint(1024, 65535) dport = 80 src = "192.168.1.1" smac = "00:11:22:aa:bb:cc" dst = "10.1.1.1" dmac = "c0:c1:c0:b7:ce:63" m = re.match(r"^(?P<fid>\d+)_c\.txt", os.path.basename(xml_file)) if m: fid = m.group("fid") else: log.error("Failed to find fiddler ID tag") return None req = open(fiddler_raw_dir + fid + "_c.txt").read() m = re.match( r"^(?P<verb>[^\r\n\s]+)\s+(?P<host_and_port>https?\:\/\/[^\/\r\n\:]+(\:(?P<dport>\d{1,5}))?)\/", req ) if m and m.group("verb") != "CONNECT": req = req.replace(m.group("host_and_port"), "", 1) if m.group("dport") and int(m.group("dport")) <= 65535: dport = int(m.group("dport")) resp = open(fiddler_raw_dir + fid + "_s.txt").read() (seq, ack) = build_handshake(src, dst, sport, dport, pktdump, smac, dmac) (seq, ack) = make_pkts(src, dst, sport, dport, seq, ack, req, pktdump, smac, dmac) (seq, ack) = make_pkts(dst, src, dport, sport, seq, ack, resp, pktdump, dmac, smac) build_finshake(src, dst, sport, dport, seq, ack, pktdump, smac, dmac) else: log.error("Unsupported SAZ format") return None pktdump.close() if tmpdir: try: shutil.rmtree(tmpdir) except: pass return pcappath
pcap_name = get_pcap_name(sys.argv[3]) target_pcap = target_path + '/' target_pcap += "%s_by_%s_part_%d.pcap" % (pcap_name, split_type, i + 1) writer = PcapWriter(target_pcap) writer_array.append(writer) # Start to split according to split type if split_type == "mod": split_by_mod(reader) elif split_type == "mod_execution": split_by_mod_without_for_train(reader, train_num, split_num) elif split_type == "random": split_by_random(reader, split_num) elif split_type == "random_execution": split_by_random_without_for_train(reader, train_num, split_num) elif split_type == "ecmp": split_by_ecmp(reader, split_num) elif split_type == "ecmp_execution": split_by_ecmp_without_for_train(reader, train_num, split_num) elif split_type == "host": split_by_host(reader, split_num) elif split_type == "host_execution": split_by_host_without_for_train(reader, train_num, split_num) # Flush and close all parts' writers for i in range(split_num): writer = writer_array[i] writer.flush() writer.close()
class NetworkStack(): """Emulated network stack, processing log entries into network packets""" class ConnState(): """State for a single TCP connection""" def __init__(self, logentry, tm, ctx): self.src_addr = logentry['src_addr'] self.src_port = logentry['src_port'] self.dst_addr = logentry['dst_addr'] self.dst_port = logentry['dst_port'] self.tm = tm self._ctx = ctx def touch(self, tm): if self.tm < tm: self.tm = tm def _write_packet(self, pkt): pkt.time = (self.tm - datetime.datetime(1970, 1, 1)).total_seconds() self._ctx.pcap.write(pkt) self._ctx.last_packet_tm = self.tm def _seq(self, addr, port, inc): if self.src_addr == addr and self.src_port == port: seq = self.src_seq ack = self.dst_seq self.src_seq += inc else: seq = self.dst_seq ack = self.src_seq self.dst_seq += inc return (seq, ack) def syn(self): """Send a TCP SYN handshake, opeining the connection""" self.src_seq = random.randint(1024, (2**32)-1) self.dst_seq = random.randint(1024, (2**32)-1) self._write_packet( Ether()/ IP(src=self.src_addr, dst=self.dst_addr)/ TCP(flags='S', sport=self.src_port, dport=self.dst_port, seq=self.src_seq) ) self.src_seq += 1 self._write_packet( Ether()/ IP(src=self.dst_addr, dst=self.src_addr)/ TCP(flags='SA', sport=self.dst_port, dport=self.src_port, seq=self.dst_seq, ack=self.src_seq) ) self.dst_seq += 1 self._write_packet( Ether()/ IP(src=self.src_addr, dst=self.dst_addr)/ TCP(flags='A', sport=self.src_port, dport=self.dst_port, seq=self.src_seq, ack=self.dst_seq) ) def fin(self): """Send a TCP FIN handshake, closing current connection""" self._write_packet( Ether()/ IP(src=self.src_addr, dst=self.dst_addr)/ TCP(flags="FA", sport=self.src_port, dport=self.dst_port, seq=self.src_seq, ack=self.dst_seq) ) self.src_seq += 1 self._write_packet( Ether()/ IP(src=self.dst_addr, dst=self.src_addr)/ TCP(flags='A', sport=self.dst_port, dport=self.src_port, seq=self.dst_seq, ack=self.src_seq) ) def data(self, logentry): """Send a TCP data segment within the connection""" for segment in chunks(logentry['data'], self._ctx.mss): seq, ack = self._seq(logentry['src_addr'], logentry['src_port'], len(segment)) self._write_packet( Ether()/ IP(src=logentry['src_addr'], dst=logentry['dst_addr'])/ TCP(flags='PA', sport=logentry['src_port'], dport=logentry['dst_port'], seq=seq, ack=ack)/ segment ) def __init__(self, outfile, mtu=1500): self.pcap = PcapWriter(filename=outfile, linktype=1) self.mss = mtu - 40 self.connstate = {} self.last_packet_tm = datetime.datetime(1970, 1, 1, 0, 0, 0) self.last_timeout_tm = datetime.datetime(1970, 1, 1, 0, 0, 0) def _make5tuple(self, logentry): """Construct a canonical per-connection 5-tuple""" if (logentry['src_addr'] < logentry['dst_addr']) or \ (logentry['src_addr'] == logentry['dst_addr'] and \ logentry['src_port'] < logentry['dst_port']): return "tcp|%s|%d|%s|%d" % (logentry['src_addr'], logentry['src_port'], logentry['dst_addr'], logentry['dst_port']) else: return "tcp|%s|%d|%s|%d" % (logentry['dst_addr'], logentry['dst_port'], logentry['src_addr'], logentry['src_port']) # Note that the chosen data structure for the internal state scales badly # for large numbers of connections: O(n) search every minute. This needs # to be rewritten using better data structures for scalability. def add(self, logentry): """Process a log entry, keeping internal state""" tm = parse_timestamp(logentry['timestamp']) conn5tuple = self._make5tuple(logentry) if not conn5tuple in self.connstate: self.connstate[conn5tuple] = NetworkStack.ConnState(logentry, tm, self) self.connstate[conn5tuple].syn() else: self.connstate[conn5tuple].touch(tm) self.connstate[conn5tuple].data(logentry) # at most very 60 seconds, time out old connections (doesn't scale!) if tm > self.last_timeout_tm + datetime.timedelta(0, 1, 0): for conn in self.connstate: if self.last_timeout_tm > self.connstate[conn5tuple].tm + \ datetime.timedelta(0, 1, 0): self.connstate[conn5tuple].fin() del self.connstate[conn5tuple] self.last_timeout_tm = tm def done(self): """We are done, all active connections can be closed""" for conn in self.connstate: self.connstate[conn].touch(self.last_packet_tm) self.connstate[conn].fin() self.pcap.close()
def saz_to_pcap(sazpath): if not sazpath.lower().endswith(".saz"): return None if not HAVE_SCAPY: log.error("Scapy is required for SAZ to PCAP conversion.") return None tmpdir = "" pcappath = "%s/%s.pcap" % (tempfile.mkdtemp(), os.path.basename(sazpath)) fiddler_raw_dir = "" pktdump = PcapWriter(pcappath, sync=True) try: tmpdir = tempfile.mkdtemp() except Exception as e: log.error("Failed to Create temp dir for SAZ extraction %s" % (e)) return None try: z = zipfile.ZipFile(sazpath, "r") except Exception as e: log.error("Failed to open SAZ file as Zip extraction %s" % (e)) return None try: z.extractall(tmpdir) z.close() except Exception as e: log.error("Failed to extract SAZ file to temp dir %s" % (e)) return None if not os.path.isdir("%s/raw/" % (tmpdir)): return None fiddler_raw_dir = "%s/raw/" % (tmpdir) m_file_list = glob.glob("%s/%s" % (fiddler_raw_dir, "*_m.xml")) m_file_list.sort() if m_file_list: for xml_file in m_file_list: sport = random.randint(1024, 65535) src = "192.168.1.1" smac = "00:11:22:aa:bb:cc" dport = 80 dst = "10.1.1.1" dmac = "c0:c1:c0:b7:ce:63" dom = parse(xml_file) m = re.match(r"^(?P<fid>\d+)_m\.xml", os.path.basename(xml_file)) if m: fid = m.group("fid") else: log.error("Failed to find fiddler ID tag") return None xmlTags = dom.getElementsByTagName('SessionFlag') for xmlTag in xmlTags: xmlTag = xmlTag.toxml() m = re.match( r"\<SessionFlag N=\x22x-(?:client(?:ip\x22 V=\x22[^\x22]*?(?P<clientip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|port\x22 V=\x22(?P<sport>\d+))|hostip\x22 V=\x22[^\x22]*?(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))\x22", xmlTag) #TODO:to enable this we need to track 5 tuples otherwise we have session reuse issues #if m and m.group("sport"): #sport = int(m.group("sport")) if m and m.group("clientip") and src == None: src = m.group("clientip") elif m and m.group("hostip"): dst = m.group("hostip") req = open(fiddler_raw_dir + fid + "_c.txt").read() m = re.match( r"^(?P<verb>[^\r\n\s]+)\s+(?P<host_and_port>https?\:\/\/[^\/\r\n\:]+(\:(?P<dport>\d{1,5}))?)\/", req) if m and m.group("verb") != "CONNECT": req = req.replace(m.group("host_and_port"), "", 1) if m.group("dport") and int(m.group("dport")) <= 65535: dport = int(m.group("dport")) resp = open(fiddler_raw_dir + fid + "_s.txt").read() (seq, ack) = build_handshake(src, dst, sport, dport, pktdump, smac, dmac) (seq, ack) = make_pkts(src, dst, sport, dport, seq, ack, req, pktdump, smac, dmac) (seq, ack) = make_pkts(dst, src, dport, sport, seq, ack, resp, pktdump, dmac, smac) build_finshake(src, dst, sport, dport, seq, ack, pktdump, smac, dmac) else: m_file_list = glob.glob("%s/%s" % (fiddler_raw_dir, "*_c.txt")) m_file_list.sort() if m_file_list: for xml_file in m_file_list: sport = random.randint(1024, 65535) dport = 80 src = "192.168.1.1" smac = "00:11:22:aa:bb:cc" dst = "10.1.1.1" dmac = "c0:c1:c0:b7:ce:63" m = re.match(r"^(?P<fid>\d+)_c\.txt", os.path.basename(xml_file)) if m: fid = m.group("fid") else: log.error("Failed to find fiddler ID tag") return None req = open(fiddler_raw_dir + fid + "_c.txt").read() m = re.match( r"^(?P<verb>[^\r\n\s]+)\s+(?P<host_and_port>https?\:\/\/[^\/\r\n\:]+(\:(?P<dport>\d{1,5}))?)\/", req) if m and m.group("verb") != "CONNECT": req = req.replace(m.group("host_and_port"), "", 1) if m.group("dport") and int(m.group("dport")) <= 65535: dport = int(m.group("dport")) resp = open(fiddler_raw_dir + fid + "_s.txt").read() (seq, ack) = build_handshake(src, dst, sport, dport, pktdump, smac, dmac) (seq, ack) = make_pkts(src, dst, sport, dport, seq, ack, req, pktdump, smac, dmac) (seq, ack) = make_pkts(dst, src, dport, sport, seq, ack, resp, pktdump, dmac, smac) build_finshake(src, dst, sport, dport, seq, ack, pktdump, smac, dmac) else: log.error("Unsupported SAZ format") return None pktdump.close() if tmpdir: try: shutil.rmtree(tmpdir) except: pass return pcappath
if m and m.group("sport"): sport = int(m.group("sport")) #sport = random.randint(1024, 65535) elif m and m.group("clientip") and src == None: src = m.group("clientip") elif m and m.group("hostip") and dst == None: dst = m.group("hostip") req = open(options.fiddler_raw_dir + fid + "_c.txt").read() m=re.match(r"^[^\r\n\s]+\s+(?P<host_and_port>https?\:\/\/[^\/\r\n\:]+(\:(?P<dport>\d{1,5}))?)\/",req) if m and options.dproxy and m.group("host_and_port"): req = req.replace(m.group("host_and_port"),"",1) if m.group("dport") and int(m.group("dport")) <= 65535: dport = int(m.group("dport")) resp = open(options.fiddler_raw_dir + fid + "_s.txt").read() print "src: %s dst: %s sport: %s dport: %s" % (src, dst, sport, dport) (seq,ack)=build_handshake(src,dst,sport,dport) (seq,ack)=make_poop(src,dst,sport,dport,seq,ack,req) (seq,ack)=make_poop(dst,src,dport,sport,seq,ack,resp) build_finshake(src,dst,sport,dport,seq,ack) if options.tmpdir: try: shutil.rmtree(options.tmpdir) except: print "failed to clean up tmpdir %s you will have to do it" % (options.tmpdir) else: print "fiddler raw dir specified:%s dos not exist" % (options.fiddler_raw_dir) sys.exit(-1) pktdump.close()
class NetworkStack(): """Emulated network stack, processing log entries into network packets""" class ConnState(): """State for a single TCP connection""" def __init__(self, logentry, tm, ctx): self.src_addr = logentry['src_addr'] self.src_port = logentry['src_port'] self.dst_addr = logentry['dst_addr'] self.dst_port = logentry['dst_port'] self.tm = tm self._ctx = ctx def touch(self, tm): if self.tm < tm: self.tm = tm def _write_packet(self, pkt): pkt.time = (self.tm - datetime.datetime(1970, 1, 1)).total_seconds() self._ctx.pcap.write(pkt) self._ctx.last_packet_tm = self.tm def _seq(self, addr, port, inc): if self.src_addr == addr and self.src_port == port: seq = self.src_seq ack = self.dst_seq self.src_seq += inc else: seq = self.dst_seq ack = self.src_seq self.dst_seq += inc return (seq, ack) def syn(self): """Send a TCP SYN handshake, opeining the connection""" self.src_seq = random.randint(1024, (2**32) - 1) self.dst_seq = random.randint(1024, (2**32) - 1) self._write_packet(Ether() / IP(src=self.src_addr, dst=self.dst_addr) / TCP(flags='S', sport=self.src_port, dport=self.dst_port, seq=self.src_seq)) self.src_seq += 1 self._write_packet(Ether() / IP(src=self.dst_addr, dst=self.src_addr) / TCP(flags='SA', sport=self.dst_port, dport=self.src_port, seq=self.dst_seq, ack=self.src_seq)) self.dst_seq += 1 self._write_packet(Ether() / IP(src=self.src_addr, dst=self.dst_addr) / TCP(flags='A', sport=self.src_port, dport=self.dst_port, seq=self.src_seq, ack=self.dst_seq)) def fin(self): """Send a TCP FIN handshake, closing current connection""" self._write_packet(Ether() / IP(src=self.src_addr, dst=self.dst_addr) / TCP(flags="FA", sport=self.src_port, dport=self.dst_port, seq=self.src_seq, ack=self.dst_seq)) self.src_seq += 1 self._write_packet(Ether() / IP(src=self.dst_addr, dst=self.src_addr) / TCP(flags='A', sport=self.dst_port, dport=self.src_port, seq=self.dst_seq, ack=self.src_seq)) def data(self, logentry): """Send a TCP data segment within the connection""" for segment in chunks(logentry['data'], self._ctx.mss): seq, ack = self._seq(logentry['src_addr'], logentry['src_port'], len(segment)) self._write_packet( Ether() / IP(src=logentry['src_addr'], dst=logentry['dst_addr']) / TCP(flags='PA', sport=logentry['src_port'], dport=logentry['dst_port'], seq=seq, ack=ack) / segment) def __init__(self, outfile, mtu=1500): self.pcap = PcapWriter(filename=outfile, linktype=1) self.mss = mtu - 40 self.connstate = {} self.last_packet_tm = datetime.datetime(1970, 1, 1, 0, 0, 0) self.last_timeout_tm = datetime.datetime(1970, 1, 1, 0, 0, 0) def _make5tuple(self, logentry): """Construct a canonical per-connection 5-tuple""" if (logentry['src_addr'] < logentry['dst_addr']) or \ (logentry['src_addr'] == logentry['dst_addr'] and \ logentry['src_port'] < logentry['dst_port']): return "tcp|%s|%d|%s|%d" % ( logentry['src_addr'], logentry['src_port'], logentry['dst_addr'], logentry['dst_port']) else: return "tcp|%s|%d|%s|%d" % ( logentry['dst_addr'], logentry['dst_port'], logentry['src_addr'], logentry['src_port']) # Note that the chosen data structure for the internal state scales badly # for large numbers of connections: O(n) search every minute. This needs # to be rewritten using better data structures for scalability. def add(self, logentry): """Process a log entry, keeping internal state""" tm = parse_timestamp(logentry['timestamp']) conn5tuple = self._make5tuple(logentry) if logentry['eof']: if conn5tuple in self.connstate: self.connstate[conn5tuple].fin() del self.connstate[conn5tuple] else: if not conn5tuple in self.connstate: self.connstate[conn5tuple] = NetworkStack.ConnState( logentry, tm, self) self.connstate[conn5tuple].syn() else: self.connstate[conn5tuple].touch(tm) self.connstate[conn5tuple].data(logentry) # at most every 60s, time out old connections (should not happen) if tm > self.last_timeout_tm + datetime.timedelta(0, 1, 0): for conn in self.connstate: if self.last_timeout_tm > self.connstate[conn5tuple].tm + \ datetime.timedelta(0, 1, 0): self.connstate[conn5tuple].fin() del self.connstate[conn5tuple] self.last_timeout_tm = tm def done(self): """We are done, all active connections can be closed""" for conn in self.connstate: self.connstate[conn].touch(self.last_packet_tm) self.connstate[conn].fin() self.pcap.close()
src = m.group("clientip") elif m and m.group("hostip") and dst == None: dst = m.group("hostip") req = open(options.fiddler_raw_dir + fid + "_c.txt").read() m = re.match( r"^[^\r\n\s]+\s+(?P<host_and_port>https?\:\/\/[^\/\r\n\:]+(\:(?P<dport>\d{1,5}))?)\/", req) if m and options.dproxy and m.group("host_and_port"): req = req.replace(m.group("host_and_port"), "", 1) if m.group("dport") and int(m.group("dport")) <= 65535: dport = int(m.group("dport")) resp = open(options.fiddler_raw_dir + fid + "_s.txt").read() print "src: %s dst: %s sport: %s dport: %s" % (src, dst, sport, dport) (seq, ack) = build_handshake(src, dst, sport, dport) (seq, ack) = make_poop(src, dst, sport, dport, seq, ack, req) (seq, ack) = make_poop(dst, src, dport, sport, seq, ack, resp) build_finshake(src, dst, sport, dport, seq, ack) if options.tmpdir: try: shutil.rmtree(options.tmpdir) except: print "failed to clean up tmpdir %s you will have to do it" % ( options.tmpdir) else: print "fiddler raw dir specified:%s dos not exist" % ( options.fiddler_raw_dir) sys.exit(-1) pktdump.close()