def main(self): if self.options.jobname: export_name = self.options.jobname source = self.netshark.get_capture_job_by_name(export_name) elif self.options.clipname: export_name = self.options.clipname source = self.netshark.get_trace_clip_by_description(export_name) filename = self.options.filename if not filename: filename = '%s_export.pcap' % export_name if self.options.timerange: timefilter = TimeFilter.parse_range(self.options.timerange) elif self.options.start_time and self.options.end_time: start = string_to_datetime(float(self.options.start_time)) end = string_to_datetime(float(self.options.end_time)) timefilter = TimeFilter(start, end) else: self.parser.error( 'Select either --timerange or --start and --end times') if self.options.filters: kvs = [f.split('=') for f in self.options.filters] filters = [NetSharkFilter(r'%s="%s"' % (k, v)) for k, v in kvs] else: filters = None with self.netshark.create_export(source, timefilter, filters=filters) as e: print 'beginning download to file %s' % filename e.download(filename, overwrite=self.options.overwrite)
def run(self): criteria = self.job.criteria netshark = DeviceManager.get_device(criteria.netshark_device) self.export_name = str( path_to_class(netshark, criteria.netshark_source_name)) source = netshark.get_capture_job_by_name(self.export_name) timefilter = TimeFilter(criteria.starttime, criteria.endtime) handle = Job._compute_handle(self.table, criteria) # check if pcaps directory exists, if not make the directory if not os.path.exists(PCAP_DIR): os.mkdir(PCAP_DIR) while self.all_pcap_size > settings.PCAP_SIZE_LIMIT: self.delete_oldest_pcap() self.filename = add_pcap_dir('%s.pcap' % handle) filters = ([BpfFilter(filt) for filt in self.table.options.filters] or None) with netshark.create_export( source, timefilter, filters=filters, wait_for_data=self.table.options.wait_for_data, wait_duration=self.table.options.wait_duration) as e: self.download(e) return QueryComplete(pandas.DataFrame([dict(filename=self.filename)]))
def test_create_clip(self): interface = self.shark.get_interfaces()[0] job = self.shark.create_job(interface, 'test_create_clip', '300M') filters = [ TimeFilter(datetime.datetime.now() - datetime.timedelta(1), datetime.datetime.now()) ] clip = self.shark.create_clip(job, filters, description='test_clip') clip.delete() #lets create a clip from a job with job.add_clip(filters, 'test_add_clip') as clip: pass
def create_view(self, src, columns, filters=None, start_time=None, end_time=None, name=None, charts=None, sync=True, sampling_time_msec=None): """ Create a new view on this NetShark. :param src: identifies the source of packets to be analyzed. It may be any packet source object. :param columns: specifies what information is extracted from packets and presented in this view. It should be a list of :py:class:`Key <steelscript.netshark.core.types.Key>` and :py:class:`Value <steelscript.netshark.core.types.Value>` objects :param filters: an optional list of filters that can be used to limit which packets from the packet source are processed by this view. :returns: :class:`View4` """ if start_time is not None or end_time is not None: if start_time is None or end_time is None: raise ValueError('must specify both start and end times') if filters is None: filters = [] filters.append(TimeFilter(start_time, end_time)) filterobjs = [] if filters is not None: filterobjs.extend([filt.bind(self) for filt in filters]) view = self.classes.View._create(self, src, columns, filterobjs, name=name, sync=sync, sampling_time_msec=sampling_time_msec) self._add_view(view) return view
def main(self): if self.options.timerange is not None: try: timefilter = TimeFilter.parse_range(self.options.timerange) except ValueError: print "Could not parse time filter expression." return elif (self.options.starttime is not None or self.options.endtime is not None): timeparser = TimeParser() if self.options.starttime is None: start_time = datetime.min else: try: start_time = timeparser.parse(self.options.starttime) except ValueError: print "Could not parse start timestamp" return if self.options.endtime is None: end_time = datetime.now() else: try: end_time = timeparser.parse(self.options.endtime) except ValueError: print "Could not parse end timestamp" return timefilter = TimeFilter(start_time, end_time) else: timefilter = None filters = [NetSharkFilter(f) for f in self.options.filters] if timefilter is not None: filters.append(timefilter) if self.options.file is None: sharks_info = [[self.options.host, self.options.username, self.options.password]] else: sharks_info = self.get_csv_sharks_info(self.options.file) out_table = [] for host, username, password in sharks_info: shark = NetShark(host, auth=UserAuth(username, password)) jobs_bytes = self.get_jobs_bytes(shark, filters) if not jobs_bytes: print "(No data returned from NetShark {0}.)".format(host) else: for job_name, job_bytes in self.get_jobs_bytes(shark, filters): out_table.append([host, job_name, job_bytes]) if not out_table: print "No data found by any NetShark." else: out_table_sorted = sorted(out_table, reverse=True, key=operator.itemgetter(2)) heads = ["NetShark", "Job", "Total bytes"] Formatter.print_table(out_table_sorted, heads)
def run(self): """ Main execution method """ criteria = self.job.criteria self.timeseries = False # if key column called 'time' is created self.column_names = [] # Resolution comes in as a time_delta resolution = timedelta_total_seconds(criteria.resolution) default_delta = 1000000000 # one second self.delta = int(default_delta * resolution) # sample size interval if criteria.netshark_device == '': logger.debug('%s: No netshark device selected' % self.table) self.job.mark_error("No NetShark Device Selected") return False shark = DeviceManager.get_device(criteria.netshark_device) logger.debug("Creating columns for NetShark table %d" % self.table.id) # Create Key/Value Columns columns = [] for tc in self.table.get_columns(synthetic=False): tc_options = tc.options if (tc.iskey and tc.name == 'time' and tc_options.extractor == 'sample_time'): # don't create column, use the sample time for timeseries self.timeseries = True self.column_names.append('time') continue elif tc.iskey: c = Key(tc_options.extractor, description=tc.label, default_value=tc_options.default_value) else: if tc_options.operation: try: operation = getattr(Operation, tc_options.operation) except AttributeError: operation = Operation.sum print('ERROR: Unknown operation attribute ' '%s for column %s.' % (tc_options.operation, tc.name)) else: operation = Operation.none c = Value(tc_options.extractor, operation, description=tc.label, default_value=tc_options.default_value) self.column_names.append(tc.name) columns.append(c) # Identify Sort Column sortidx = None if self.table.sortcols is not None: sortcol = Column.objects.get(table=self.table, name=self.table.sortcols[0]) sort_name = sortcol.options.extractor for i, c in enumerate(columns): if c.field == sort_name: sortidx = i break # Initialize filters criteria = self.job.criteria filters = [] if hasattr(criteria, 'netshark_filterexpr'): logger.debug('calculating netshark filter expression ...') filterexpr = self.job.combine_filterexprs( exprs=criteria.netshark_filterexpr, joinstr="&") if filterexpr: logger.debug('applying netshark filter expression: %s' % filterexpr) filters.append(NetSharkFilter(filterexpr)) if hasattr(criteria, 'netshark_bpf_filterexpr'): # TODO evaluate how to combine multiple BPF filters # this will just apply one at a time filterexpr = criteria.netshark_bpf_filterexpr logger.debug('applying netshark BPF filter expression: %s' % filterexpr) filters.append(BpfFilter(filterexpr)) resolution = criteria.resolution if resolution.seconds == 1: sampling_time_msec = 1000 elif resolution.microseconds == 1000: sampling_time_msec = 1 if criteria.duration > parse_timedelta('1s'): msg = ("Cannot run a millisecond report with a duration " "longer than 1 second") raise ValueError(msg) else: sampling_time_msec = 1000 # Get source type from options logger.debug("NetShark Source: %s" % self.job.criteria.netshark_source_name) source = path_to_class(shark, self.job.criteria.netshark_source_name) live = source.is_live() persistent = criteria.get('netshark_persistent', False) if live and not persistent: raise ValueError("Live views must be run with persistent set") view = None if persistent: # First, see a view by this title already exists # Title is the table name plus a criteria hash including # all criteria *except* the timeframe h = hashlib.md5() h.update('.'.join([c.name for c in self.table.get_columns()])) for k, v in criteria.iteritems(): if criteria.is_timeframe_key(k): continue h.update('%s:%s' % (k, v)) title = '/'.join([ 'steelscript-appfwk', str(self.table.id), self.table.namespace, self.table.name, h.hexdigest() ]) view = NetSharkViews.find_by_name(shark, title) logger.debug("Persistent view title: %s" % title) else: # Only assign a title for persistent views title = None timefilter = TimeFilter(start=criteria.starttime, end=criteria.endtime) if not view: # Not persistent, or not yet created... if not live: # Cannot attach time filter to a live view, # it will be added later at get_data() time if criteria.starttime and criteria.endtime: filters.append(timefilter) logger.info("Setting netshark table %d timeframe to %s" % (self.table.id, str(timefilter))) else: # if times are set to zero, don't add to filter # this will process entire timeframe of source instead logger.info("Not setting netshark table %d timeframe" % self.table.id) # Create it with lock: logger.debug("%s: Creating view for table %s" % (str(self), str(self.table))) view = shark.create_view(source, columns, filters=filters, sync=False, name=title, sampling_time_msec=sampling_time_msec) if not live: done = False logger.debug("Waiting for netshark table %d to complete" % self.table.id) while not done: time.sleep(0.5) with lock: s = view.get_progress() self.job.mark_progress(s) self.job.save() done = view.is_ready() logger.debug("Retrieving data for timeframe: %s" % timefilter) # Retrieve the data with lock: getdata_kwargs = {} if sortidx: getdata_kwargs['sortby'] = sortidx if self.table.options.aggregated: getdata_kwargs['aggregated'] = self.table.options.aggregated else: getdata_kwargs['delta'] = self.delta if live: # For live views, attach the time frame to the get_data() getdata_kwargs['start'] = (datetime_to_nanoseconds( criteria.starttime)) getdata_kwargs['end'] = (datetime_to_nanoseconds( criteria.endtime)) self.data = view.get_data(**getdata_kwargs) if not persistent: view.close() if self.table.rows > 0: self.data = self.data[:self.table.rows] self.parse_data() logger.info("NetShark Report %s returned %s rows" % (self.job, len(self.data))) return QueryComplete(self.data)