def main(self): pcap = PcapFile(self.options.pcap_path) info = pcap.info() Formatter.print_table([(k, unicode(v, errors='replace')) for k, v in info.iteritems()], headers=['Key', 'Value'])
def main(self): pcap = PcapFile(self.options.pcap_path) info = pcap.info() data = list() for k, v in info.items(): if isinstance(v, bytes): v = v.decode('utf-8') data.append((k, v)) Formatter.print_table(data, headers=['Key', 'Value'])
def run(self): criteria = self.job.criteria pcapfilename = get_pcap_file(criteria) pcapfile = PcapFile(pcapfilename) pcapfile.info() self.data = [['Start time', str(pcapfile.starttime)], ['End time', str(pcapfile.endtime)], ['Number of packets', pcapfile.numpackets]] return True
def main(self): columns = self.options.columns.split(',') pcap = PcapFile(self.options.pcap_path) data = pcap.query(columns) if self.options.join == 'INNER': data = [row for row in data if None not in row] if not data: print('No rows found matching your input') max_rows = int(self.options.max_rows) data_out = data[:max_rows] Formatter.print_table(data_out, headers=columns)
def run(self): criteria = self.job.criteria pcapfilename = criteria.pcapfilename if not pcapfilename: raise ValueError("No pcap file specified") elif not os.path.exists(pcapfilename): raise ValueError("No such file: %s" % pcapfilename) if not hasattr(settings, 'TSHARK_PATH'): raise ValueError('Please set local_settings.TSHARK_PATH ' 'to the proper path to the tshark executable') pcapfile = PcapFile(pcapfilename) pcapfile.info() self.data = [['Start time', str(pcapfile.starttime)], ['End time', str(pcapfile.endtime)], ['Number of packets', pcapfile.numpackets]] return True
def analyze(self, jobs=None): criteria = self.job.criteria if jobs: job = list(jobs.values())[0] if job.status == Job.ERROR: raise AnalysisException("%s for getting pcap file failed: %s" % (job, job.message)) criteria.entire_pcap = True self.filename = job.data()['filename'][0] else: self.filename = criteria.pcapfilename pcap = PcapFile(self.filename) try: pcap_info = pcap.info() except ValueError: raise AnalysisException("No packets in %s" % self.filename) logger.debug("%s: File info %s" % (self.__class__.__name__, pcap_info)) self.pkt_num = int(pcap_info['Number of packets']) min_pkt_num = self.table.options.split_threshold wt = Table.from_ref(self.table.options.related_tables['wireshark']) depjobs = {} if self.pkt_num < min_pkt_num: # No need to split the pcap file criteria.pcapfilename = self.filename criteria.entire_pcap = True job = Job.create(table=wt, criteria=criteria, update_progress=False, parent=self.job) depjobs[job.id] = job logger.debug("%s starting single job" % self.__class__.__name__) return QueryContinue(self.collect, depjobs) self.output_dir = os.path.join(SPLIT_DIR, self.file_handle) self.split_pcap() split_files = os.listdir(self.output_dir) if not split_files: raise AnalysisException('No pcap file found after splitting %s' % self.filename) for split in split_files: # use wireshark table ws_criteria = copy.copy(criteria) ws_criteria.pcapfilename = os.path.join(self.output_dir, split) # for ease of removing the split directory in collect func ws_criteria.output_dir = self.output_dir job = Job.create(table=wt, criteria=ws_criteria, update_progress=False, parent=self.job) depjobs[job.id] = job logger.debug("%s starting multiple jobs" % self.__class__.__name__) return QueryContinue(self.collect, jobs=depjobs)
def run(self): criteria = self.job.criteria table = self.table columns = table.get_columns(synthetic=False) pcapfilename = get_pcap_file(criteria) pcapfile = PcapFile(pcapfilename) fieldnames = [] basecolnames = [] # list of colummns # dict by field name of the base (or first) column to use this field fields = {} for tc in columns: tc_options = tc.options if tc_options.field in fields.keys(): # Asking for the same field name twice doesn't work, but # is useful when aggregating and choosing a different operation # like "min", or "max". Will populate these columns later continue fields[tc_options.field] = tc.name fieldnames.append(tc_options.field) basecolnames.append(tc.name) if criteria.entire_pcap: starttime = None endtime = None else: starttime = criteria.starttime endtime = criteria.endtime data = pcapfile.query(fieldnames, starttime=starttime, endtime=endtime, filterexpr=criteria.wireshark_filterexpr, use_tshark_fields=True) # Can be list of 0 elements or None if not data: self.data = None return True df = pandas.DataFrame(data, columns=basecolnames) # At this point we have a dataframe with the one column for each # unique field (the first column to reference the field) if table.rows > 0: df = df[:table.rows] logger.info("Data returned (first 3 rows...):\n%s", df[:3]) # Convert the data into the right format for tc in columns: if tc.name not in basecolnames: continue tc_options = tc.options if tc.datatype == "time": df[tc.name] = pandas.DatetimeIndex(df[tc.name]) colnames = [col.name for col in columns] self.data = df.ix[:, colnames].values.tolist() return True
def run(self): criteria = self.job.criteria table = self.table columns = table.get_columns(synthetic=False) pcapfilename = get_pcap_file(criteria) pcapfile = PcapFile(pcapfilename) fieldnames = [] basecolnames = [] # list of colummns # dict by field name of the base (or first) column to use this field fields = {} for tc in columns: tc_options = tc.options if tc_options.field in fields.keys(): # Asking for the same field name twice doesn't work, but # is useful when aggregating and choosing a different operation # like "min", or "max". Will populate these columns later continue fields[tc_options.field] = tc.name fieldnames.append(tc_options.field) basecolnames.append(tc.name) if criteria.entire_pcap: starttime = None endtime = None else: starttime = criteria.starttime endtime = criteria.endtime data = pcapfile.query( fieldnames, starttime=starttime, endtime=endtime, filterexpr=criteria.wireshark_filterexpr, use_tshark_fields=True) # Can be list of 0 elements or None if not data: self.data = None return True df = pandas.DataFrame(data, columns=basecolnames) # At this point we have a dataframe with the one column for each # unique field (the first column to reference the field) if table.rows > 0: df = df[:table.rows] logger.info("Data returned (first 3 rows...):\n%s", df[:3]) # Convert the data into the right format for tc in columns: if tc.name not in basecolnames: continue tc_options = tc.options if tc.datatype == "time": df[tc.name] = pandas.DatetimeIndex(df[tc.name]) colnames = [col.name for col in columns] self.data = df.ix[:, colnames].values.tolist() return True
def gen_data_frame(path_str): pcap = PcapFile(path_str) #print '=========' #print repr(pcap.info()) #print '=========' pcap.info() pdf = pcap.query( [ # 'frame.time_epoch', 'frame.time_delta', # 'frame.pkt_len', # 'frame.len', # 'frame.cap_len', # 'frame.marked', 'ip.src', 'ip.dst', 'ip.len', 'ip.flags', # 'ip.flags.rb', # 'ip.flags.df', # 'ip.flags.mf', # 'ip.frag_offset', # Generates unexpected behaviour in steelscript-wireshark 'ip.ttl', # 'ip.proto', # 'ip.checksum_good', 'tcp.srcport', 'tcp.dstport', 'tcp.len', # 'tcp.nxtseq', # 'tcp.hdr_len', # 'tcp.flags.cwr', # 'tcp.flags.urg', # 'tcp.flags.push', # 'tcp.flags.syn', # 'tcp.window_size', # 'tcp.checksum', # 'tcp.checksum_good', # 'tcp.checksum_bad', # 'udp.length', # 'udp.checksum_coverage', # 'udp.checksum', # 'udp.checksum_good', # 'udp.checksum_bad' ], #starttime = pcap.starttime, as_dataframe=True) """ pdf = pcap.query([ 'frame.time_delta', 'ip.src', 'ip.dst', 'ip.len', 'tcp.srcport', 'tcp.dstport', 'tcp.len', ], starttime = pcap.starttime, as_dataframe=True) """ print('=======') print('pdf len: ') + repr(len(pdf)) return pdf
from steelscript.packets.core.pcap import PCAPReader, PCAPWriter from steelscript.packets.core.inetpkt import Ethernet from steelscript.packets.query.pcap_query import PcapQuery from steelscript.wireshark.core.pcap import PcapFile import pandas as pd pcap_file = PcapFile('./syn_attack.pcap') print pcap_file.info() f_read = open('./syn_attack.pcap', 'rb') rdr = PCAPReader(f_read) pkt_type_ethernet = 1 paquetes = rdr.pkts() ''' Time First packet: 2013-04-24 22:09:30 Last packet : 2013-04-24 22:10:26 ''' print("Time") print " First packet:" , str(pd.to_datetime(paquetes[0][0], unit='s')) print " Last packet :" , str(pd.to_datetime(paquetes[len(paquetes) - 1][0], unit='s')) ''' Statistics Packets : 145