def post(self, request, device_type): if device_type == 'netprofiler': device = 'NetProfiler' form = NetProfilerInputForm(request.POST) elif device_type == 'netshark': device = 'NetShark' form = NetSharkInputForm(request.POST) else: raise Http404 results = None if form.is_valid(): data = form.cleaned_data if device_type == 'netprofiler': profiler = DeviceManager.get_device(data['device']) results = profiler.search_columns(realms=[data['realm']], centricities=[data['centricity']], groupbys=[data['groupby']]) results.sort(key=operator.attrgetter('key')) results.sort(key=operator.attrgetter('iskey'), reverse=True) results = [(c.iskey, c.key, c.label, c.id) for c in results] elif device_type == 'netshark': shark = DeviceManager.get_device(data['device']) results = [(f.id, f.description, f.type) for f in shark.get_extractor_fields()] results.sort(key=operator.itemgetter(0)) return render_to_response('help.html', {'device': device, 'form': form, 'results': results}, context_instance=RequestContext(request))
def test_get_devices_with_unknown_module(self): dev = copy.copy(self.dev) dev['id'] = 2 dev['module'] = 'unknown_module' dev_obj = Device(**dev) dev_obj.save() with self.assertRaises(DeviceModuleNotFound): DeviceManager.get_device(2)
def run(self): """ Main execution method """ criteria = self.job.criteria if criteria.netprofiler_device == '': logger.debug('%s: No netprofiler device selected' % (self.table)) self.job.mark_error("No NetProfiler Device Selected") return False profiler = DeviceManager.get_device(criteria.netprofiler_device) columns = [col.name for col in self.table.get_columns(synthetic=False)] # This returns an array of rows, one row per device # Each row is a dict containing elements such as: # id, ipaddr, name, type, type_id, and version with lock: devicedata = profiler.api.devices.get_all() # Convert to a DataFrame to make it easier to work with df = pandas.DataFrame(devicedata) for col in columns: if col not in df: raise KeyError("Devices table has no column '%s'" % col.name) df = df.ix[:,columns] self.data = df logger.info("DeviceTable job %s returning %d devices" % (self.job, len(self.data))) return True
def netshark_source_name_choices(form, id, field_kwargs, params): """ Query netshark for available capture jobs / trace clips. """ netshark_device = form.get_field_value('netshark_device', id) if netshark_device == '': label = 'Source' choices = [('', '<No netshark device>')] else: netshark = DeviceManager.get_device(netshark_device) #source_type = form.get_field_value('shark_source_type', id) source_type = 'job' choices = [] if source_type == 'job': for job in netshark.get_capture_jobs(): choices.append(('jobs/' + job.name, job.name)) label = 'Capture Job' elif source_type == 'clip': # Not tested label = 'Trace Clip' for clip in netshark.get_clips(): choices.append((clip, clip)) else: raise KeyError('Unknown source type: %s' % source_type) field_kwargs['label'] = label field_kwargs['choices'] = choices
def add_widgets_to_live_report(report, template_id, widget_query_ids, netprofiler_name=None): if netprofiler_name: netprofiler_id = Device.objects.filter(name=netprofiler_name)[0].id else: netprofiler_id = Device.objects.\ filter(enabled=True, module='netprofiler')[0].id profiler = DeviceManager.get_device(netprofiler_id) lr = LiveReport(profiler, template_id) for wid, qid in widget_query_ids.items(): q = [q for q in lr.queries if q.id.endswith(qid)][0] t = NetProfilerLiveTable.create( 'live-{0}-{1}'.format(template_id, wid), netprofiler_id=netprofiler_id, template_id=template_id, query_id=q.id, widget_id=wid, cacheable=False) if q.is_time_series: widget_cls = yui3.TimeSeriesWidget t.add_column('time', 'Time', datatype='time', iskey=True) else: widget_cls = yui3.TableWidget widget_title = 'Template %s Widget %s' % (template_id, wid) report.add_widget(widget_cls, t, widget_title, width=12)
def run(self): criteria = self.job.criteria netshark = DeviceManager.get_device(criteria.netshark_device) self.export_name = str(path_to_class(netshark, criteria.netshark_source_name)) source = netshark.get_capture_job_by_name(self.export_name) timefilter = TimeFilter(criteria.starttime, criteria.endtime) handle = Job._compute_handle(self.table, criteria) # check if pcaps directory exists, if not make the directory if not os.path.exists(PCAP_DIR): os.mkdir(PCAP_DIR) while self.all_pcap_size > settings.PCAP_SIZE_LIMIT: self.delete_oldest_pcap() self.filename = add_pcap_dir('%s.pcap' % handle) filters = ([BpfFilter(filt) for filt in self.table.options.filters] or None) with netshark.create_export( source, timefilter, filters=filters, wait_for_data=self.table.options.wait_for_data, wait_duration=self.table.options.wait_duration) as e: self.download(e) return QueryComplete(pandas.DataFrame([dict(filename=self.filename)]))
def appresponse_source_choices(form, id_, field_kwargs, params): """ Query AppResponse for available capture jobs / files.""" # most of these results will be cached by the underlying AR object ar_id = form.get_field_value('appresponse_device', id_) with lock: if ar_id == '': choices = [('', '<No AppResponse Device>')] else: ar = DeviceManager.get_device(ar_id) choices = [] for job in ar.capture.get_jobs(): if job.status == 'RUNNING': choices.append((SourceProxy(job).path, job.name)) for clip in ar.clips.get_clips(): choices.append((SourceProxy(clip).path, clip.name)) if params['include_files']: for f in ar.fs.get_files(): choices.append((SourceProxy(f).path, f.id)) if params['include_msa_files_only']: choices = [] for f in ar.fs.get_files(force=True): if f.is_msa(): choices.append((SourceProxy(f).path, f.id)) field_kwargs['label'] = 'Source' field_kwargs['choices'] = choices
def run(self): """ Main execution method """ criteria = self.job.criteria if criteria.sharepoint_device == '': logger.debug('%s: No sharepoint device selected' % self.table) self.job.mark_error("No Sharepoint Device Selected") return False sp = DeviceManager.get_device(criteria.sharepoint_device) site = sp.get_site_object(self.table.options.site_url) site_instance = site.lists[self.table.options.list_name] fields = [tc.name for tc in self.table.get_columns(synthetic=False)] self.data = [] for row in site_instance.rows: d = [getattr(row, f) for f in fields] self.data.append(d) logger.info("SharepointTable job %s returning %s data" % (self.job, len(self.data))) return True
def analyze(self, jobs): criteria = self.job.criteria sharks_query_table = Table.from_ref( self.table.options.related_tables['basetable']) depjobs = {} # For every (shark, job), we spin off a new job to grab the data, then # merge everything into one dataframe at the end. for s in Device.objects.filter(module='netshark', enabled=True): shark = DeviceManager.get_device(s.id) for capjob in shark.get_capture_jobs(): # Start with criteria from the primary table -- this gives us # endtime, duration and netshark_filterexpr. bytes_criteria = copy.copy(criteria) bytes_criteria.netshark_device = s.id bytes_criteria.netshark_source_name = 'jobs/' + capjob.name bytes_criteria.resolution = datetime.timedelta(0, 1) bytes_criteria.aggregated = True job = Job.create(table=sharks_query_table, criteria=bytes_criteria) depjobs[job.id] = job return QueryContinue(self.collect, depjobs)
def analyze(self, jobs): criteria = self.job.criteria ar_query_table = Table.from_ref( self.table.options.related_tables['basetable'] ) depjobs = {} # For every (ar, job), we spin off a new job to grab the data, then # merge everything into one dataframe at the end. for s in Device.objects.filter(module='appresponse', enabled=True): ar = DeviceManager.get_device(s.id) for job in ar.capture.get_jobs(): # Start with criteria from the primary table -- this gives us # endtime, duration and filterexpr. bytes_criteria = copy.copy(criteria) bytes_criteria.appresponse_device = s.id bytes_criteria.appresponse_source = 'jobs/' + job.id bytes_criteria.granularity = datetime.timedelta(0, 1) newjob = Job.create(table=ar_query_table, criteria=bytes_criteria) depjobs[newjob.id] = newjob return QueryContinue(self.collect, depjobs)
def run(self): # For each of the widget, get all the data profiler = DeviceManager.get_device(self.table.options.netprofiler_id) lr = LiveReport(profiler, template_id=self.table.options.template_id) # Figure out columns by querying the widget # cols = lr.get_columns(self.table.options.widget_id) # Find the query object query_idx = lr.get_query_names().index(self.table.options.query_id) # refresh the columns of the table self._refresh_columns(profiler, report=lr, query=lr.queries[query_idx]) data = lr.get_data(index=query_idx) col_names = [ col.label if col.ephemeral else col.key for col in lr.queries[query_idx].columns ] df = pd.DataFrame(columns=col_names, data=data) return QueryComplete(df)
def add_widgets_to_live_report(report, template_id, widget_query_ids, netprofiler_name=None): if netprofiler_name: netprofiler_id = Device.objects.filter(name=netprofiler_name)[0].id else: netprofiler_id = Device.objects.\ filter(enabled=True, module='netprofiler')[0].id profiler = DeviceManager.get_device(netprofiler_id) lr = LiveReport(profiler, template_id) for wid, qid in widget_query_ids.iteritems(): q = [q for q in lr.queries if q.id.endswith(qid)][0] t = NetProfilerLiveTable.create('live-{0}-{1}'.format( template_id, wid), netprofiler_id=netprofiler_id, template_id=template_id, query_id=q.id, widget_id=wid, cacheable=False) if q.is_time_series: widget_cls = yui3.TimeSeriesWidget t.add_column('time', 'Time', datatype='time', iskey=True) else: widget_cls = yui3.TableWidget widget_title = 'Template %s Widget %s' % (template_id, wid) report.add_widget(widget_cls, t, widget_title, width=12)
def netshark_source_name_choices(form, id, field_kwargs, params): """ Query netshark for available capture jobs / trace clips. """ netshark_device = form.get_field_value('netshark_device', id) if netshark_device == '': choices = [('', '<No netshark device>')] else: netshark = DeviceManager.get_device(netshark_device) choices = [] for job in netshark.get_capture_jobs(): choices.append((job.source_path, job.name)) for clip in netshark.get_clips(): choices.append((clip.source_path, 'Clip: ' + clip.description)) if params['include_files']: for f in netshark.get_files(): choices.append((f.source_path, 'File: ' + f.path)) if params['include_interfaces']: for iface in netshark.get_interfaces(): choices.append((iface.source_path, 'If: ' + iface.description)) field_kwargs['label'] = 'Source' field_kwargs['choices'] = choices
def run(self): """ Main execution method """ criteria = self.job.criteria if criteria.netprofiler_device == '': logger.debug('%s: No netprofiler device selected' % (self.table)) self.job.mark_error("No NetProfiler Device Selected") return False profiler = DeviceManager.get_device(criteria.netprofiler_device) columns = [col.name for col in self.table.get_columns(synthetic=False)] # This returns an array of rows, one row per device # Each row is a dict containing elements such as: # id, ipaddr, name, type, type_id, and version with lock: devicedata = profiler.api.devices.get_all() # Convert to a DataFrame to make it easier to work with df = pandas.DataFrame(devicedata) for col in columns: if col not in df: raise KeyError("Devices table has no column '%s'" % col.name) df = df.ix[:, columns] self.data = df logger.info("DeviceTable job %s returning %d devices" % (self.job, len(self.data))) return True
def netshark_source_name_choices(form, id_, field_kwargs, params): """ Query netshark for available capture jobs / trace clips. """ netshark_device = form.get_field_value('netshark_device', id_) if netshark_device == '': choices = [('', '<No netshark device>')] else: netshark = DeviceManager.get_device(netshark_device) choices = [] for job in netshark.get_capture_jobs(): choices.append((job.source_path, job.name)) for clip in netshark.get_clips(): choices.append((clip.source_path, 'Clip: ' + clip.description)) if params['include_files']: for f in netshark.get_files(): choices.append((f.source_path, 'File: ' + f.path)) if params['include_interfaces']: for iface in netshark.get_interfaces(): choices.append((iface.source_path, 'If: ' + iface.description)) field_kwargs['label'] = 'Source' field_kwargs['choices'] = choices
def run(self): criteria = self.job.criteria netshark = DeviceManager.get_device(criteria.netshark_device) self.export_name = str( path_to_class(netshark, criteria.netshark_source_name)) source = netshark.get_capture_job_by_name(self.export_name) timefilter = TimeFilter(criteria.starttime, criteria.endtime) handle = Job._compute_handle(self.table, criteria) # check if pcaps directory exists, if not make the directory if not os.path.exists(PCAP_DIR): os.mkdir(PCAP_DIR) while self.all_pcap_size > settings.PCAP_SIZE_LIMIT: self.delete_oldest_pcap() self.filename = add_pcap_dir('%s.pcap' % handle) filters = ([BpfFilter(filt) for filt in self.table.options.filters] or None) with netshark.create_export( source, timefilter, filters=filters, wait_for_data=self.table.options.wait_for_data, wait_duration=self.table.options.wait_duration) as e: self.download(e) return QueryComplete(pandas.DataFrame([dict(filename=self.filename)]))
def _prepare_report_args(self): class Args(object): pass args = Args() criteria = self.job.criteria if criteria.netprofiler_device == '': logger.debug('%s: No netprofiler device selected' % self.table) self.job.mark_error("No NetProfiler Device Selected") return False args.profiler = DeviceManager.get_device(criteria.netprofiler_device) args.columns = [ col.name for col in self.table.get_columns(synthetic=False) ] args.sortcol = None if self.table.sortcols is not None: args.sortcol = self.table.sortcols[0] args.timefilter = TimeFilter(start=criteria.starttime, end=criteria.endtime) logger.info("Running NetProfiler table %d report for timeframe %s" % (self.table.id, str(args.timefilter))) if ('datafilter' in criteria) and (criteria.datafilter is not None): args.datafilter = criteria.datafilter.split(',') else: args.datafilter = None args.trafficexpr = TrafficFilter( self.job.combine_filterexprs( exprs=criteria.netprofiler_filterexpr)) # Incoming criteria.resolution is a timedelta logger.debug('NetProfiler report got criteria resolution %s (%s)' % (criteria.resolution, type(criteria.resolution))) if criteria.resolution != 'auto': rsecs = int(timedelta_total_seconds(criteria.resolution)) args.resolution = Report.RESOLUTION_MAP[rsecs] else: args.resolution = 'auto' logger.debug('NetProfiler report using resolution %s (%s)' % (args.resolution, type(args.resolution))) args.limit = (self.table.options.limit if hasattr( self.table.options, 'limit') else None) if getattr(self.table.options, 'interface', False): args.centricity = 'int' else: args.centricity = 'hos' return args
def test_get_devices(self): with patch("dev_pkg.new_device_instance", MockDevice): device = DeviceManager.get_device(1) self.assertEqual(device.host, self.dev['host']) self.assertEqual(device.port, self.dev['port']) self.assertEqual(device.auth.username, self.dev['username']) self.assertEqual(device.auth.password, self.dev['password'])
def post(self, request, data_type): if data_type not in ['columns', 'sources']: raise Http404 device = 'AppResponse' if data_type == 'columns': form = AppResponseColumnsInputForm(request.POST) else: form = AppResponseInputForm(request.POST) results = None if form.is_valid(): data = form.cleaned_data ar = DeviceManager.get_device(data['device']) if data_type == 'columns': rawcols = ar.reports.sources[data['source']]['columns'] for col in rawcols.values(): if 'synthesized' in col: synth = col['synthesized'] if isinstance(synth, dict): col['synthesized'] = \ (', '.join(['{}: {}'.format(k, v) for k, v in synth.iteritems()])) colkeys = [ 'id', 'field', 'label', 'metric', 'type', 'unit', 'description', 'synthesized', 'grouped_by' ] coldf = pandas.DataFrame(rawcols.values(), columns=colkeys) coldf.fillna('---', inplace=True) coldf['iskey'] = coldf['grouped_by'].apply( lambda x: True if x is True else '---') coldf.sort_values(by='id', inplace=True) results = list(coldf.to_records(index=False)) else: colkeys = [ 'name', 'filters_on_metrics', 'granularities', 'groups' ] coldf = pandas.DataFrame(ar.reports.sources.values(), columns=colkeys) coldf['groups'] = coldf['name'].apply( lambda x: ', '.join(report_source_to_groups[x])) coldf.sort_values(by='name', inplace=True) results = list(coldf.to_records(index=False)) serialized_sources = json.dumps(report_sources) return render_to_response('help.html', { 'device': device, 'report_sources': serialized_sources, 'data_type': data_type, 'form': form, 'results': results }, context_instance=RequestContext(request))
def _prepare_report_args(self): class Args(object): pass args = Args() criteria = self.job.criteria if criteria.netprofiler_device == '': logger.debug('%s: No netprofiler device selected' % self.table) self.job.mark_error("No NetProfiler Device Selected") return False args.profiler = DeviceManager.get_device(criteria.netprofiler_device) args.columns = [col.name for col in self.table.get_columns(synthetic=False)] args.sortcol = None if self.table.sortcols is not None: args.sortcol = self.table.sortcols[0] args.timefilter = TimeFilter(start=criteria.starttime, end=criteria.endtime) logger.info("Running NetProfiler table %d report for timeframe %s" % (self.table.id, str(args.timefilter))) if ('datafilter' in criteria) and (criteria.datafilter is not None): args.datafilter = criteria.datafilter.split(',') else: args.datafilter = None args.trafficexpr = TrafficFilter( self.job.combine_filterexprs(exprs=criteria.netprofiler_filterexpr) ) # Incoming criteria.resolution is a timedelta logger.debug('NetProfiler report got criteria resolution %s (%s)' % (criteria.resolution, type(criteria.resolution))) if criteria.resolution != 'auto': rsecs = int(timedelta_total_seconds(criteria.resolution)) args.resolution = Report.RESOLUTION_MAP[rsecs] else: args.resolution = 'auto' logger.debug('NetProfiler report using resolution %s (%s)' % (args.resolution, type(args.resolution))) args.limit = (self.table.options.limit if hasattr(self.table.options, 'limit') else None) if getattr(self.table.options, 'interface', False): args.centricity = 'int' else: args.centricity = 'hos' return args
def run(self): sh_db = self.job.criteria.dev cmd = self.job.criteria.command sh = DeviceManager.get_device(sh_db.id) output = sh.cli.exec_command(cmd, mode=CLIMode.ENABLE) return QueryComplete([dict(dev_name=sh_db.name, output=output)])
def run(self): sh = DeviceManager.get_device(self.job.criteria.steelhead_device) flows = Model.get(sh, feature='flows') res = flows.show_flows('all') for k, v in res['flows_summary'].iteritems(): v['category'] = k return QueryComplete(res['flows_summary'].values())
def post(self, request, data_type): if data_type not in ['columns', 'sources']: raise Http404 device = 'AppResponse' if data_type == 'columns': form = AppResponseColumnsInputForm(request.POST) else: form = AppResponseInputForm(request.POST) results = None if form.is_valid(): data = form.cleaned_data ar = DeviceManager.get_device(data['device']) if data_type == 'columns': rawcols = ar.reports.sources[data['source']]['columns'] for col in rawcols.values(): if 'synthesized' in col: synth = col['synthesized'] if isinstance(synth, dict): col['synthesized'] = \ (', '.join(['{}: {}'.format(k, v) for k, v in synth.iteritems()])) colkeys = ['id', 'field', 'label', 'metric', 'type', 'unit', 'description', 'synthesized', 'grouped_by'] coldf = pandas.DataFrame(rawcols.values(), columns=colkeys) coldf.fillna('---', inplace=True) coldf['iskey'] = coldf['grouped_by'].apply( lambda x: True if x is True else '---') coldf.sort_values(by='id', inplace=True) results = list(coldf.to_records(index=False)) else: colkeys = ['name', 'filters_on_metrics', 'granularities', 'groups'] coldf = pandas.DataFrame(ar.reports.sources.values(), columns=colkeys) coldf['groups'] = coldf['name'].apply( lambda x: ', '.join(report_source_to_groups[x])) coldf.sort_values(by='name', inplace=True) results = list(coldf.to_records(index=False)) serialized_sources = json.dumps(report_sources) return render_to_response('help.html', {'device': device, 'report_sources': serialized_sources, 'data_type': data_type, 'form': form, 'results': results}, context_instance=RequestContext(request))
def run(self): criteria = self.job.criteria if criteria.scc_device == '': logger.debug('%s: No scc device selected' % (self.table)) self.job.mark_error("No SCC Device Selected") return False columns = [col.name for col in self.table.get_columns(synthetic=False)] scc = DeviceManager.get_device(criteria.scc_device) # obtain the report class definition report_cls = get_scc_report_class(self.service, self.resource) # instatiate a report object report_obj = report_cls(scc) # Build criteria kwargs kwargs = {} for name in set(report_obj.required_fields + report_obj.non_required_fields): # criteria has attrs as starttime, endtime # which maps to start_time and end_time # referenced in a SCC service if name in ['start_time', 'end_time']: name_in_criteria = name.replace('_', '') else: name_in_criteria = name if hasattr(criteria, name_in_criteria): kwargs[name] = getattr(criteria, name_in_criteria) report_obj.run(**kwargs) df = self.extract_dataframe(report_obj.data) if df is not None: for col in columns: if col not in df: raise KeyError("Table %s has no column '%s'" % (self.job.table.name, col)) df = df.ix[:, columns] self.data = df logger.info("SCC job %s returning %d rows of data" % (self.job, len(self.data))) else: self.data = None return QueryComplete(self.data)
def netprofiler_live_templates(form, id, field_kwargs): """Query netprofiler for available live templates. """ netprofiler_device = form.get_field_value('netprofiler_device', id) if netprofiler_device == '': choices = [('', '<No netprofiler device>')] else: netprofiler = DeviceManager.get_device(netprofiler_device) choices = [(t['id'], t['name']) for t in netprofiler.api.templates.get_live_templates()] field_kwargs['choices'] = choices field_kwargs['label'] = 'Live Template'
def post(self, request, device_type): if device_type == 'netprofiler': device = 'NetProfiler' form = NetProfilerInputForm(request.POST) elif device_type == 'netshark': device = 'NetShark' form = NetSharkInputForm(request.POST) else: raise Http404 results = None if form.is_valid(): data = form.cleaned_data if device_type == 'netprofiler': profiler = DeviceManager.get_device(data['device']) results = profiler.search_columns( realms=[data['realm']], centricities=[data['centricity']], groupbys=[data['groupby']]) results.sort(key=operator.attrgetter('key')) results.sort(key=operator.attrgetter('iskey'), reverse=True) results = [(c.iskey, c.key, c.label, c.id) for c in results] elif device_type == 'netshark': shark = DeviceManager.get_device(data['device']) results = [(f.id, f.description, f.type) for f in shark.get_extractor_fields()] results.sort(key=operator.itemgetter(0)) return render_to_response('help.html', { 'device': device, 'form': form, 'results': results }, context_instance=RequestContext(request))
def run(self): sh = DeviceManager.get_device(self.job.criteria.steelhead_device) stats = Model.get(sh, feature='stats') duration = self.job.criteria.duration directions = ['lan-to-wan', 'wan-to-lan', 'bi-directional'] total = [] for d in directions: res = stats.show_stats_bandwidth('all', d, duration) res['direction'] = d total.append(res) return QueryComplete(total)
def run(self): criteria = self.job.criteria profiler = DeviceManager.get_device(criteria.netprofiler_device) widget_config = profiler.api.templates.get_config(criteria.template_id) recs = [] for w in widget_config: dict0 = {'template_id': str(criteria.template_id)} dict1 = dict((k, w[k]) for k in ['widget_id', 'title']) dict2 = dict((k, w['config'][k]) for k in ['widget_type', 'visualization', 'datasource']) recs.append(dict((k, v) for d in [dict0, dict1, dict2] for k, v in d.items())) return QueryComplete(pd.DataFrame(recs))
def netprofiler_application_choices(form, id, field_kwargs, params): # let's get all the applications and store them netprofiler_device = form.get_field_value('netprofiler_device', id) if netprofiler_device == '': choices = [('', '<No netprofiler device>')] else: netprofiler = DeviceManager.get_device(netprofiler_device) apps = get_netprofiler_apps(netprofiler) # now we've got the apps return just name and id choices = [(x['name'], x['name']) for x in apps] field_kwargs['label'] = 'Application' field_kwargs['choices'] = choices
def netprofiler_hostgroup_types(form, id, field_kwargs, params): """ Query netprofiler for all hostgroup types. """ netprofiler_device = form.get_field_value('netprofiler_device', id) if netprofiler_device == '': choices = [('', '<No netprofiler device>')] else: netprofiler = DeviceManager.get_device(netprofiler_device) choices = [] for hgt in netprofiler.api.host_group_types.get_all(): choices.append((hgt['name'], hgt['name'])) field_kwargs['label'] = 'HostGroupType' field_kwargs['choices'] = choices
def run(self): criteria = self.job.criteria profiler = DeviceManager.get_device(criteria.netprofiler_device) widget_config = profiler.api.templates.get_config(criteria.template_id) recs = [] for w in widget_config: dict0 = {'template_id': str(criteria.template_id)} dict1 = dict((k, w[k]) for k in ['widget_id', 'title']) dict2 = dict( (k, w['config'][k]) for k in ['widget_type', 'visualization', 'datasource']) recs.append( dict((k, v) for d in [dict0, dict1, dict2] for k, v in d.iteritems())) return QueryComplete(pd.DataFrame(recs))
def run(self): obj_class = self.table.options.obj_class feature = self.table.options.feature method = self.table.options.method args = self.table.options.args sh = DeviceManager.get_device(self.job.criteria.steelhead_device) obj = obj_class.get(sh, feature=feature) res = getattr(obj, method)(*args) if not isinstance(res, list): res = [res] for e in res: for k, v in e.iteritems(): e[k] = str(v) return QueryComplete(res)
def run(self): """ Main execution method """ criteria = self.job.criteria if criteria.solarwinds_device == '': logger.debug('%s: No solarwinds device selected' % self.table) self.job.mark_error("No Solarwinds Device Selected") return False sw = DeviceManager.get_device(criteria.solarwinds_device) # TODO add queries self.data = None logger.info("SolarwindsTable job %s returning %s data" % (self.job, len(self.data))) return True
def run(self): sks = Device.objects.filter(enabled=True, module='netshark') res = [] for sk in sks: sk_dev = DeviceManager.get_device(sk.id) for job in sk_dev.get_capture_jobs(): if_name = job.data['config']['interface_name'] start = str(nsec_string_to_datetime(job.packet_start_time)) end = str(nsec_string_to_datetime(job.packet_end_time)) bpf_filter = job.data['config'].get('bpf_filter', '') if len(bpf_filter) > self.MAX_LENGTH: bpf_filter = bpf_filter[:self.MAX_LENGTH - 2] + '...' pkts_dropped = job.get_stats()['packets_dropped'] pkts_written = job.get_stats()['packets_written'] job_data = dict(netshark=sk.name, job_id=job.data['id'], job_name=job.data['config']['name'], interface=if_name, state=job.data['status']['state'], size=job.data['status']['packet_size'], start_time=start, end_time=end, bpf_filter=bpf_filter, dpi_enabled=str(job.dpi_enabled), index_enabled=str(job.index_enabled), last_sec_dropped=pkts_dropped['last_second'], last_min_dropped=pkts_dropped['last_minute'], last_hr_dropped=pkts_dropped['last_hour'], last_sec_written=pkts_written['last_second'], last_min_written=pkts_written['last_minute'], last_hr_written=pkts_written['last_hour'] ) res.append(job_data) return QueryComplete(pandas.DataFrame(res))
def run(self): sks = Device.objects.filter(enabled=True, module='netshark') res = [] for sk in sks: sk_dev = DeviceManager.get_device(sk.id) for job in sk_dev.get_capture_jobs(): if_name = job.data['config']['interface_name'] start = str(nsec_string_to_datetime(job.packet_start_time)) end = str(nsec_string_to_datetime(job.packet_end_time)) bpf_filter = job.data['config'].get('bpf_filter', '') if len(bpf_filter) > self.MAX_LENGTH: bpf_filter = bpf_filter[:self.MAX_LENGTH - 2] + '...' pkts_dropped = job.get_stats()['packets_dropped'] pkts_written = job.get_stats()['packets_written'] job_data = dict(netshark=sk.name, job_id=job.data['id'], job_name=job.data['config']['name'], interface=if_name, state=job.data['status']['state'], size=job.data['status']['packet_size'], start_time=start, end_time=end, bpf_filter=bpf_filter, dpi_enabled=str(job.dpi_enabled), index_enabled=str(job.index_enabled), last_sec_dropped=pkts_dropped['last_second'], last_min_dropped=pkts_dropped['last_minute'], last_hr_dropped=pkts_dropped['last_hour'], last_sec_written=pkts_written['last_second'], last_min_written=pkts_written['last_minute'], last_hr_written=pkts_written['last_hour']) res.append(job_data) return QueryComplete(pandas.DataFrame(res))
def netshark_source_choices(form, id_, field_kwargs, params): """Query netshark for available capture jobs / trace clips.""" # simplified clone from base netshark datasource that allows for # custom field names netshark_device = form.get_field_value(params['field'], id_) if netshark_device == '': choices = [('', '<No netshark device>')] else: netshark = DeviceManager.get_device(netshark_device) choices = [] for job in netshark.get_capture_jobs(): choices.append((job.source_path, job.name)) for clip in netshark.get_clips(): choices.append((clip.source_path, 'Clip: ' + clip.description)) field_kwargs['choices'] = choices
def appresponse_source_choices(form, id_, field_kwargs, params): """ Query AppResponse for available capture jobs / files.""" ar_id = form.get_field_value('appresponse_device', id_) if ar_id == '': choices = [('', '<No AppResponse Device>')] else: ar = DeviceManager.get_device(ar_id) choices = [] for job in ar.capture.get_jobs(): if job.status == 'RUNNING': choices.append((SourceProxy(job).path, job.name)) if params['include_files']: for f in ar.fs.get_files(): choices.append((SourceProxy(f).path, f.id)) field_kwargs['label'] = 'Source' field_kwargs['choices'] = choices
def netshark_msa_file_choices(form, id_, field_kwargs, params): """Query netshark for available MSA files.""" netshark_device = form.get_field_value('netshark_device', id_) if netshark_device == '': choices = [('', '<No netshark device>')] else: netshark = DeviceManager.get_device(netshark_device) choices = [] # TODO - this enables automatic refresh # whenever the report or criteria get reloaded, but # it will cause some extra load when the report runs # since each time a TableForm gets created all the files # will be re-queried for f in netshark.get_files(force_refetch=True): if hasattr(f, 'list_linked_files'): choices.append((f.source_path, 'MSA File: ' + f.path)) field_kwargs['choices'] = choices
def netprofiler_hostgroups(form, id, field_kwargs, params): """ Query netprofiler for groups within a given hostgroup. """ netprofiler_device = form.get_field_value('netprofiler_device', id) if netprofiler_device == '': choices = [('', '<No netprofiler device>')] else: netprofiler = DeviceManager.get_device(netprofiler_device) if params is not None and 'hostgroup_type' in params: hgt = HostGroupType.find_by_name(netprofiler, params['hostgroup_type']) else: hostgroup_type = form.get_field_value('hostgroup_type', id) hgt = HostGroupType.find_by_name(netprofiler, hostgroup_type) choices = [(group, group) for group in hgt.groups.keys()] field_kwargs['label'] = 'HostGroup' field_kwargs['choices'] = choices
def run(self): # For each of the widget, get all the data profiler = DeviceManager.get_device(self.table.options.netprofiler_id) lr = LiveReport(profiler, template_id=self.table.options.template_id) # Figure out columns by querying the widget # cols = lr.get_columns(self.table.options.widget_id) # Find the query object query_idx = lr.get_query_names().index(self.table.options.query_id) # refresh the columns of the table self._refresh_columns(profiler, report=lr, query=lr.queries[query_idx]) data = lr.get_data(index=query_idx) col_names = [col.label if col.ephemeral else col.key for col in lr.queries[query_idx].columns] df = pd.DataFrame(columns=col_names, data=data) return QueryComplete(df)
def run(self): criteria = self.job.criteria ar = DeviceManager.get_device(criteria.appresponse_device) if self.table.options.source == 'packets': source_name = criteria.appresponse_source if source_name.startswith(SourceProxy.JOB_PREFIX): job_id = source_name.lstrip(SourceProxy.JOB_PREFIX) source = SourceProxy(ar.capture.get_job_by_id(job_id)) else: file_id = source_name.lstrip(SourceProxy.FILE_PREFIX) source = SourceProxy(ar.fs.get_file_by_id(file_id)) else: source = SourceProxy(name=self.table.options.source) col_extractors, col_names = [], {} for col in self.table.get_columns(synthetic=False): col_names[col.options.extractor] = col.name if col.iskey: col_extractors.append(Key(col.options.extractor)) else: col_extractors.append(Value(col.options.extractor)) # If the data source is of file type and entire PCAP # is set True, then set start end times to None if isinstance(source, File) and criteria.entire_pcap: start = None end = None else: start = datetime_to_seconds(criteria.starttime) end = datetime_to_seconds(criteria.endtime) granularity = criteria.granularity.total_seconds() data_def = DataDef(source=source, columns=col_extractors, granularity=str(granularity), start=start, end=end) report = Report(ar) report.add(data_def) report.run() df = report.get_dataframe() df.columns = map(lambda x: col_names[x], df.columns) def to_int(x): return x if str(x).isdigit() else None def to_float(x): return x if str(x).replace('.', '', 1).isdigit() else None # Numerical columns can be returned as '#N/D' when not available # Thus convert them to None to help sorting for col in self.table.get_columns(synthetic=False): if col.datatype == Column.DATATYPE_FLOAT: df[col.name] = df[col.name].apply(lambda x: to_float(x)) elif col.datatype == Column.DATATYPE_INTEGER: df[col.name] = df[col.name].apply(lambda x: to_int(x)) elif col.datatype == Column.DATATYPE_TIME: if granularity < 1: # The fractional epoch time values are in string # Thus needs to be converted to float df[col.name] = df[col.name].apply(float) if self.table.options.sort_col_name: df.sort(columns=self.table.options.sort_col_name, ascending=self.table.options.ascending, inplace=True) return QueryComplete(df)
def run(self): """ Main execution method """ criteria = self.job.criteria self.timeseries = False # if key column called 'time' is created self.column_names = [] # Resolution comes in as a time_delta resolution = timedelta_total_seconds(criteria.resolution) default_delta = 1000000000 # one second self.delta = int(default_delta * resolution) # sample size interval if criteria.netshark_device == '': logger.debug('%s: No netshark device selected' % self.table) self.job.mark_error("No NetShark Device Selected") return False #self.fake_run() #return True shark = DeviceManager.get_device(criteria.netshark_device) logger.debug("Creating columns for NetShark table %d" % self.table.id) # Create Key/Value Columns columns = [] for tc in self.table.get_columns(synthetic=False): tc_options = tc.options if ( tc.iskey and tc.name == 'time' and tc_options.extractor == 'sample_time'): # don't create column, use the sample time for timeseries self.timeseries = True self.column_names.append('time') continue elif tc.iskey: c = Key(tc_options.extractor, description=tc.label, default_value=tc_options.default_value) else: if tc_options.operation: try: operation = getattr(Operation, tc_options.operation) except AttributeError: operation = Operation.sum print ('ERROR: Unknown operation attribute ' '%s for column %s.' % (tc_options.operation, tc.name)) else: operation = Operation.none c = Value(tc_options.extractor, operation, description=tc.label, default_value=tc_options.default_value) self.column_names.append(tc.name) columns.append(c) # Identify Sort Column sortidx = None if self.table.sortcols is not None: sortcol = Column.objects.get(table=self.table, name=self.table.sortcols[0]) sort_name = sortcol.options.extractor for i, c in enumerate(columns): if c.field == sort_name: sortidx = i break # Initialize filters criteria = self.job.criteria filters = [] filterexpr = self.job.combine_filterexprs( exprs=criteria.netshark_filterexpr, joinstr="&" ) if filterexpr: filters.append(NetSharkFilter(filterexpr)) tf = TimeFilter(start=criteria.starttime, end=criteria.endtime) filters.append(tf) logger.info("Setting netshark table %d timeframe to %s" % (self.table.id, str(tf))) # Get source type from options try: with lock: source = path_to_class(shark, self.job.criteria.netshark_source_name) except RvbdHTTPException, e: source = None raise e
def run(self): """ Main execution method """ criteria = self.job.criteria if criteria.netprofiler_device == '': logger.debug('%s: No netprofiler device selected' % self.table) self.job.mark_error("No NetProfiler Device Selected") return False profiler = DeviceManager.get_device(criteria.netprofiler_device) report = ServiceLocationReport(profiler) tf = TimeFilter(start=criteria.starttime, end=criteria.endtime) logger.info( 'Running NetProfilerServiceByLocTable %d report for timeframe %s' % (self.table.id, str(tf))) with lock: report.run(timefilter=tf, sync=False) done = False logger.info("Waiting for report to complete") while not done: time.sleep(0.5) with lock: s = report.status() self.job.mark_progress(progress=int(s['percent'])) done = (s['status'] == 'completed') # Retrieve the data with lock: data = report.get_data() query = report.get_query_by_index(0) tz = criteria.starttime.tzinfo # Update criteria criteria.starttime = (datetime.datetime .utcfromtimestamp(query.actual_t0) .replace(tzinfo=tz)) criteria.endtime = (datetime.datetime .utcfromtimestamp(query.actual_t1) .replace(tzinfo=tz)) self.job.safe_update(actual_criteria=criteria) if len(data) == 0: return QueryComplete(None) # Add ephemeral columns for everything Column.create(self.job.table, 'location', 'Location', ephemeral=self.job, datatype='string') for k in data[0].keys(): if k == 'location': continue Column.create(self.job.table, k, k, ephemeral=self.job, datatype='string', formatter='rvbd.formatHealth') df = pandas.DataFrame(data) if self.job.table.options.rgb: state_map = {Service.SVC_NOT_AVAILABLE: 'gray', Service.SVC_DISABLED: 'gray', Service.SVC_INIT: 'gray', Service.SVC_NORMAL: 'green', Service.SVC_LOW: 'yellow', Service.SVC_MED: 'yellow', Service.SVC_HIGH: 'red', Service.SVC_NODATA: 'gray'} df = df.replace(state_map.keys(), state_map.values()) return QueryComplete(df)
def run(self): """ Main execution method """ criteria = self.job.criteria if criteria.netprofiler_device == '': logger.debug('%s: No netprofiler device selected' % self.table) self.job.mark_error("No NetProfiler Device Selected") return False #self.fake_run() #return True profiler = DeviceManager.get_device(criteria.netprofiler_device) report = steelscript.netprofiler.core.report.SingleQueryReport(profiler) columns = [col.name for col in self.table.get_columns(synthetic=False)] sortcol = None if self.table.sortcols is not None: sortcol = self.table.sortcols[0] tf = TimeFilter(start=criteria.starttime, end=criteria.endtime) logger.info("Running NetProfiler table %d report for timeframe %s" % (self.table.id, str(tf))) if ('datafilter' in criteria) and (criteria.datafilter is not None): datafilter = criteria.datafilter.split(',') else: datafilter = None trafficexpr = TrafficFilter( self.job.combine_filterexprs(exprs=criteria.netprofiler_filterexpr) ) # Incoming criteria.resolution is a timedelta logger.debug('NetProfiler report got criteria resolution %s (%s)' % (criteria.resolution, type(criteria.resolution))) if criteria.resolution != 'auto': rsecs = int(timedelta_total_seconds(criteria.resolution)) resolution = steelscript.netprofiler.core.report.Report.RESOLUTION_MAP[rsecs] else: resolution = 'auto' logger.debug('NetProfiler report using resolution %s (%s)' % (resolution, type(resolution))) with lock: centricity = 'int' if self.table.options.interface else 'hos' report.run(realm=self.table.options.realm, groupby=profiler.groupbys[self.table.options.groupby], centricity=centricity, columns=columns, timefilter=tf, trafficexpr=trafficexpr, data_filter=datafilter, resolution=resolution, sort_col=sortcol, sync=False ) done = False logger.info("Waiting for report to complete") while not done: time.sleep(0.5) with lock: s = report.status() self.job.safe_update(progress=int(s['percent'])) done = (s['status'] == 'completed') # Retrieve the data with lock: query = report.get_query_by_index(0) self.data = query.get_data() tz = criteria.starttime.tzinfo # Update criteria criteria.starttime = (datetime.datetime .utcfromtimestamp(query.actual_t0) .replace(tzinfo=tz)) criteria.endtime = (datetime.datetime .utcfromtimestamp(query.actual_t1) .replace(tzinfo=tz)) self.job.safe_update(actual_criteria=criteria) if self.table.rows > 0: self.data = self.data[:self.table.rows] logger.info("Report %s returned %s rows" % (self.job, len(self.data))) return True
def run(self): """ Main execution method. """ criteria = self.job.criteria if criteria.netprofiler_device == '': logger.debug('%s: No netprofiler device selected' % self.table) self.job.mark_error("No NetProfiler Device Selected") return False profiler = DeviceManager.get_device(criteria.netprofiler_device) report = steelscript.netprofiler.core.report.MultiQueryReport(profiler) tf = TimeFilter(start=criteria.starttime, end=criteria.endtime) logger.info("Running NetProfilerTemplateTable table %d report " "for timeframe %s" % (self.table.id, str(tf))) trafficexpr = TrafficFilter( self.job.combine_filterexprs(exprs=criteria.profiler_filterexpr) ) # Incoming criteria.resolution is a timedelta logger.debug('NetProfiler report got criteria resolution %s (%s)' % (criteria.resolution, type(criteria.resolution))) if criteria.resolution != 'auto': rsecs = int(timedelta_total_seconds(criteria.resolution)) resolution = steelscript.netprofiler.core.report.Report.RESOLUTION_MAP[rsecs] else: resolution = 'auto' logger.debug('NetProfiler report using resolution %s (%s)' % (resolution, type(resolution))) with lock: res = report.run(template_id=self.table.options.template_id, timefilter=tf, trafficexpr=trafficexpr, resolution=resolution) if res is True: logger.info("Report template complete.") self.job.safe_update(progress=100) # Retrieve the data with lock: query = report.get_query_by_index(0) data = query.get_data() headers = report.get_legend() tz = criteria.starttime.tzinfo # Update criteria criteria.starttime = (datetime.datetime .utcfromtimestamp(query.actual_t0) .replace(tzinfo=tz)) criteria.endtime = (datetime.datetime .utcfromtimestamp(query.actual_t1) .replace(tzinfo=tz)) self.job.safe_update(actual_criteria=criteria) # create dataframe with all of the default headers df = pandas.DataFrame(data, columns=[h.key for h in headers]) # now filter down to the columns requested by the table columns = [col.name for col in self.table.get_columns(synthetic=False)] self.data = df[columns] logger.info("Report %s returned %s rows" % (self.job, len(self.data))) return True
def collect(self, jobs=None): logger.info('%s: MSADownload.collect: %s' % (self, jobs)) c = self.job.criteria s = DeviceManager.get_device(c.netshark_device_upload) if self.table.options.msa_folder is None: upload_dir = ('/%s/msa-%s' % (s.auth.username, datetime_to_seconds(c.endtime))) else: upload_dir = self.table.options.msa_folder # check upload dir and delete if option set try: s.create_dir(upload_dir) except RvbdHTTPException: if self.table.options.overwrite_folder: logger.info('MSA Directory %s already created, ' 'deleting and creating a new one.' % upload_dir) d = s.get_dir(upload_dir) d.remove() s.create_dir(upload_dir) else: logger.info('MSA Directory %s already created, skipping' % upload_dir) # upload pcaps to upload dir, overwriting if needed filepaths = [] for jid, job in jobs.iteritems(): localfile = job.data()['filename'][0] # example naming: "1-source-6-jobs_00001E3" fname = '{}-{}-{}'.format( job.criteria.segment, job.criteria.netshark_device, job.criteria.netshark_source_name.replace('/', '_') ) remotefile = os.path.join(upload_dir, fname) check_netshark_file(s, remotefile, remove=True) logger.info('Uploading new PCAP %s to %s' % (localfile, remotefile)) s.upload_trace_file(remotefile, localfile) filepaths.append((remotefile, localfile)) # create msa from those files filepaths.sort() flist = [s.get_file(rfile) for rfile, _ in filepaths] msafile = os.path.join(upload_dir, 'msa_file.pvt') check_netshark_file(s, msafile, remove=True) # create the aggregated file and initiate a timeskew calculation msa = s.create_multisegment_file(msafile, flist) # pick arbitrary number 10,000 for number of pkts # to use for timeskew calculation msa.calculate_timeskew(10000) result = ['<strong>Done</strong> - PCAPs successfully downloaded ' 'from source NetSharks and uploaded for analysis'] result.append('') for remote, local in filepaths: result.append( '<strong>Uploaded file path:</strong> %s' % remote ) result.append('') config = pprint.pformat(msa.get_info()).splitlines() result.append('<strong>MSA file created with config:</strong> ' '<br>' '<pre>' '{}' '</pre>'.format('<br>'.join(config))) return QueryComplete(['<br>'.join(result)])
def run(self): """ Main execution method """ criteria = self.job.criteria self.timeseries = False # if key column called 'time' is created self.column_names = [] # Resolution comes in as a time_delta resolution = timedelta_total_seconds(criteria.resolution) default_delta = 1000000000 # one second self.delta = int(default_delta * resolution) # sample size interval if criteria.netshark_device == '': logger.debug('%s: No netshark device selected' % self.table) self.job.mark_error("No NetShark Device Selected") return False shark = DeviceManager.get_device(criteria.netshark_device) logger.debug("Creating columns for NetShark table %d" % self.table.id) # Create Key/Value Columns columns = [] for tc in self.table.get_columns(synthetic=False): tc_options = tc.options if (tc.iskey and tc.name == 'time' and tc_options.extractor == 'sample_time'): # don't create column, use the sample time for timeseries self.timeseries = True self.column_names.append('time') continue elif tc.iskey: c = Key(tc_options.extractor, description=tc.label, default_value=tc_options.default_value) else: if tc_options.operation: try: operation = getattr(Operation, tc_options.operation) except AttributeError: operation = Operation.sum print ('ERROR: Unknown operation attribute ' '%s for column %s.' % (tc_options.operation, tc.name)) else: operation = Operation.none c = Value(tc_options.extractor, operation, description=tc.label, default_value=tc_options.default_value) self.column_names.append(tc.name) columns.append(c) # Identify Sort Column sortidx = None if self.table.sortcols is not None: sortcol = Column.objects.get(table=self.table, name=self.table.sortcols[0]) sort_name = sortcol.options.extractor for i, c in enumerate(columns): if c.field == sort_name: sortidx = i break # Initialize filters criteria = self.job.criteria filters = [] if hasattr(criteria, 'netshark_filterexpr'): logger.debug('calculating netshark filter expression ...') filterexpr = self.job.combine_filterexprs( exprs=criteria.netshark_filterexpr, joinstr="&" ) if filterexpr: logger.debug('applying netshark filter expression: %s' % filterexpr) filters.append(NetSharkFilter(filterexpr)) if hasattr(criteria, 'netshark_bpf_filterexpr'): # TODO evaluate how to combine multiple BPF filters # this will just apply one at a time filterexpr = criteria.netshark_bpf_filterexpr logger.debug('applying netshark BPF filter expression: %s' % filterexpr) filters.append(BpfFilter(filterexpr)) resolution = criteria.resolution if resolution.seconds == 1: sampling_time_msec = 1000 elif resolution.microseconds == 1000: sampling_time_msec = 1 if criteria.duration > parse_timedelta('1s'): msg = ("Cannot run a millisecond report with a duration " "longer than 1 second") raise ValueError(msg) else: sampling_time_msec = 1000 # Get source type from options logger.debug("NetShark Source: %s" % self.job.criteria.netshark_source_name) source = path_to_class( shark, self.job.criteria.netshark_source_name) live = source.is_live() persistent = criteria.get('netshark_persistent', False) if live and not persistent: raise ValueError("Live views must be run with persistent set") view = None if persistent: # First, see a view by this title already exists # Title is the table name plus a criteria hash including # all criteria *except* the timeframe h = hashlib.md5() h.update('.'.join([c.name for c in self.table.get_columns()])) for k, v in criteria.iteritems(): if criteria.is_timeframe_key(k): continue h.update('%s:%s' % (k, v)) title = '/'.join(['steelscript-appfwk', str(self.table.id), self.table.namespace, self.table.name, h.hexdigest()]) view = NetSharkViews.find_by_name(shark, title) logger.debug("Persistent view title: %s" % title) else: # Only assign a title for persistent views title = None if not view: # Not persistent, or not yet created... if not live: # Cannot attach time filter to a live view, # it will be added later at get_data() time tf = TimeFilter(start=criteria.starttime, end=criteria.endtime) filters.append(tf) logger.info("Setting netshark table %d timeframe to %s" % (self.table.id, str(tf))) # Create it with lock: logger.debug("%s: Creating view for table %s" % (str(self), str(self.table))) view = shark.create_view( source, columns, filters=filters, sync=False, name=title, sampling_time_msec=sampling_time_msec) if not live: done = False logger.debug("Waiting for netshark table %d to complete" % self.table.id) while not done: time.sleep(0.5) with lock: s = view.get_progress() self.job.mark_progress(s) self.job.save() done = view.is_ready() logger.debug("Retrieving data for timeframe: %s - %s" % (datetime_to_nanoseconds(criteria.starttime), datetime_to_nanoseconds(criteria.endtime))) # Retrieve the data with lock: getdata_kwargs = {} if sortidx: getdata_kwargs['sortby'] = sortidx if self.table.options.aggregated: getdata_kwargs['aggregated'] = self.table.options.aggregated else: getdata_kwargs['delta'] = self.delta if live: # For live views, attach the time frame to the get_data() getdata_kwargs['start'] = ( datetime_to_nanoseconds(criteria.starttime)) getdata_kwargs['end'] = ( datetime_to_nanoseconds(criteria.endtime)) self.data = view.get_data(**getdata_kwargs) if not persistent: view.close() if self.table.rows > 0: self.data = self.data[:self.table.rows] self.parse_data() logger.info("NetShark Report %s returned %s rows" % (self.job, len(self.data))) return QueryComplete(self.data)