예제 #1
0
    def run(self):
        criteria = self.job.criteria

        netshark = DeviceManager.get_device(criteria.netshark_device)

        self.export_name = str(path_to_class(netshark,
                                             criteria.netshark_source_name))

        source = netshark.get_capture_job_by_name(self.export_name)

        timefilter = TimeFilter(criteria.starttime, criteria.endtime)

        handle = Job._compute_handle(self.table, criteria)

        # check if pcaps directory exists, if not make the directory
        if not os.path.exists(PCAP_DIR):
            os.mkdir(PCAP_DIR)

        while self.all_pcap_size > settings.PCAP_SIZE_LIMIT:
            self.delete_oldest_pcap()

        self.filename = add_pcap_dir('%s.pcap' % handle)

        filters = ([BpfFilter(filt) for filt in self.table.options.filters]
                   or None)
        with netshark.create_export(
                source, timefilter, filters=filters,
                wait_for_data=self.table.options.wait_for_data,
                wait_duration=self.table.options.wait_duration) as e:
            self.download(e)

        return QueryComplete(pandas.DataFrame([dict(filename=self.filename)]))
예제 #2
0
    def analyze(self, jobs):
        criteria = self.job.criteria

        tzname = criteria.business_hours_tzname
        tz = pytz.timezone(tzname)

        times = jobs['times'].data()

        if times is None or len(times) == 0:
            return QueryComplete(None)

        basetable = Table.from_ref(
            self.table.options.related_tables['basetable']
        )

        # Create all the jobs
        depjobs = {}

        for i, row in times.iterrows():
            (t0, t1) = (row['starttime'], row['endtime'])
            sub_criteria = copy.copy(criteria)
            sub_criteria.starttime = t0.astimezone(tz)
            sub_criteria.endtime = t1.astimezone(tz)

            job = Job.create(table=basetable, criteria=sub_criteria,
                             update_progress=False, parent=self.job)

            logger.debug("Created %s: %s - %s" % (job, t0, t1))
            depjobs[job.id] = job

        return QueryContinue(self.collect, depjobs)
예제 #3
0
    def analyze(self, jobs):
        criteria = self.job.criteria

        sharks_query_table = Table.from_ref(
            self.table.options.related_tables['basetable'])

        depjobs = {}

        # For every (shark, job), we spin off a new job to grab the data, then
        # merge everything into one dataframe at the end.
        for s in Device.objects.filter(module='netshark', enabled=True):
            shark = DeviceManager.get_device(s.id)

            for capjob in shark.get_capture_jobs():
                # Start with criteria from the primary table -- this gives us
                # endtime, duration and netshark_filterexpr.
                bytes_criteria = copy.copy(criteria)
                bytes_criteria.netshark_device = s.id
                bytes_criteria.netshark_source_name = 'jobs/' + capjob.name
                bytes_criteria.resolution = datetime.timedelta(0, 1)
                bytes_criteria.aggregated = True

                job = Job.create(table=sharks_query_table,
                                 criteria=bytes_criteria)

                depjobs[job.id] = job

        return QueryContinue(self.collect, depjobs)
예제 #4
0
    def run(self):
        # Collect all dependent tables
        tables = self.table.options.tables
        if not tables:
            return QueryContinue(self._analyze, {})

        logger.debug("%s: dependent tables: %s" % (self, tables))
        jobs = {}

        for (name, ref) in tables.items():
            table = Table.from_ref(ref)
            job = Job.create(table, self.job.criteria,
                             update_progress=self.job.update_progress,
                             parent=self.job)

            logger.debug("%s: dependent job %s" % (self, job))
            jobs[name] = job

        return QueryContinue(self._analyze, jobs)
예제 #5
0
    def run(self):
        # Collect all dependent tables
        tables = self.table.options.tables
        if not tables:
            return QueryContinue(self._analyze, {})

        logger.debug("%s: dependent tables: %s" % (self, tables))
        jobs = {}

        for (name, ref) in tables.items():
            table = Table.from_ref(ref)
            job = Job.create(table,
                             self.job.criteria,
                             update_progress=self.job.update_progress,
                             parent=self.job)

            logger.debug("%s: dependent job %s" % (self, job))
            jobs[name] = job

        return QueryContinue(self._analyze, jobs)
예제 #6
0
    def analyze(self, jobs):
        logger.debug('%s analyze - received jobs: %s' % (self, jobs))

        basetable = Table.from_ref(
            self.table.options['related_tables']['template']
        )
        data = jobs['source'].data()
        if data is None:
            return QueryError('No data available to analyze')

        # find column whose min/max is largest deviation from mean
        # then take row from that column where min/max occurs
        if self.table.options['max']:
            idx = (data.max() / data.mean()).idxmax()
            frow = data.ix[data[idx].idxmax()]
        else:
            idx = (data.min() / data.mean()).idxmin()
            frow = data.ix[data[idx].idxmin()]

        # get time value from extracted row to calculate new start/end times
        ftime = frow['time']
        duration = parse_timedelta(self.table.options['zoom_duration'])
        resolution = parse_timedelta(self.table.options['zoom_resolution'])
        stime = ftime - (duration / 2)
        etime = ftime + (duration / 2)

        criteria = self.job.criteria
        criteria['resolution'] = resolution
        criteria['duration'] = duration
        criteria['_orig_duration'] = duration
        criteria['starttime'] = stime
        criteria['_orig_starttime'] = stime
        criteria['endtime'] = etime
        criteria['_orig_endtime'] = etime

        logging.debug('Creating FocusedAnalysis job with updated criteria %s'
                      % criteria)

        job = Job.create(basetable, criteria, self.job.update_progress)
        return QueryContinue(self.finish, {'job': job})
예제 #7
0
    def analyze(self, jobs):
        logger.debug('%s analyze - received jobs: %s' % (self, jobs))

        basetable = Table.from_ref(
            self.table.options['related_tables']['template'])
        data = jobs['source'].data()
        if data is None:
            return QueryError('No data available to analyze')

        # find column whose min/max is largest deviation from mean
        # then take row from that column where min/max occurs
        if self.table.options['max']:
            idx = (data.max() / data.mean()).idxmax()
            frow = data.ix[data[idx].idxmax()]
        else:
            idx = (data.min() / data.mean()).idxmin()
            frow = data.ix[data[idx].idxmin()]

        # get time value from extracted row to calculate new start/end times
        ftime = frow['time']
        duration = parse_timedelta(self.table.options['zoom_duration'])
        resolution = parse_timedelta(self.table.options['zoom_resolution'])
        stime = ftime - (duration / 2)
        etime = ftime + (duration / 2)

        criteria = self.job.criteria
        criteria['resolution'] = resolution
        criteria['duration'] = duration
        criteria['_orig_duration'] = duration
        criteria['starttime'] = stime
        criteria['_orig_starttime'] = stime
        criteria['endtime'] = etime
        criteria['_orig_endtime'] = etime

        logging.debug('Creating FocusedAnalysis job with updated criteria %s' %
                      criteria)

        job = Job.create(basetable, criteria, self.job.update_progress)
        return QueryContinue(self.finish, {'job': job})