Exemple #1
0
    def create(cls,
               shark,
               source,
               timefilter,
               filters=None,
               wait_for_data=False,
               wait_duration=10):
        """Create a new pcap export from the given source."""
        config = {
            'output_format': 'PCAP_US',
            'start_time': datetime_to_seconds(timefilter.start),
            'end_time': datetime_to_seconds(timefilter.end)
        }

        if filters:
            config['filters'] = [filt.bind(shark) for filt in filters]

        r = None
        cnt = 0
        while r is None and cnt < 3:
            try:
                r = source._api.create_export(source.id, config=config)
            except RvbdHTTPException, e:
                if 'job is empty' in str(e) and wait_for_data:
                    logger.warning("Data is not available to export,"
                                   " waiting for %s seconds" % wait_duration)
                    time.sleep(wait_duration)
                    cnt += 1
                    continue
                else:
                    raise NetSharkExportException(str(e))
    def create(cls, shark, source, timefilter, filters=None,
               wait_for_data=False, wait_duration=10):
        """Create a new pcap export from the given source."""
        config = {
            'output_format': 'PCAP_US',
            'start_time': datetime_to_seconds(timefilter.start),
            'end_time': datetime_to_seconds(timefilter.end)
            }

        if filters:
            config['filters'] = [filt.bind(shark) for filt in filters]

        r = None
        cnt = 0
        while r is None and cnt < 3:
            try:
                r = source._api.create_export(source.id, config=config)
            except RvbdHTTPException, e:
                if 'job is empty' in str(e) and wait_for_data:
                    logger.warning("Data is not available to export,"
                                   " waiting for %s seconds" % wait_duration)
                    time.sleep(wait_duration)
                    cnt += 1
                    continue
                else:
                    raise NetSharkExportException(str(e))
Exemple #3
0
    def __init__(self, start=None, end=None, duration=None, time_range=None):
        """Initialize a TimeFilter object.

         :param start: integer, start time in epoch seconds
         :param end: integer, end time in epoch seconds
         :param duration: string, time duration, i.e. '1 hour'
         :param time_range: string, time range, i.e. 'last 1 hour'
            or '4/21/13 4:00 to 4/21/13 5:00'

        """
        invalid = False

        if not start and not end and not duration and not time_range:
            # when querying file or clip, usually no time filters are provided
            self.start = None
            self.end = None

        elif start and end:
            if duration or time_range:
                invalid = True
            else:
                self.start = str(start)
                self.end = str(end)

        elif time_range:
            if start or end or duration:
                invalid = True
            else:
                start, end = timeutils.parse_range(time_range)
                self.start = timeutils.datetime_to_seconds(start)
                self.end = timeutils.datetime_to_seconds(end)

        elif duration:
            if not start and not end:
                invalid = True
            else:
                td = timeutils.parse_timedelta(duration).total_seconds()
                if start:
                    self.start = str(start)
                    self.end = str(int(start + td))
                else:
                    self.start = str(int(end - td))
                    self.end = str(end)

        elif start or end:
            invalid = True

        if invalid:
            msg = ('Start/end timestamps can not be derived from start "{}" '
                   'end "{}" duration "{}" time_range "{}".'.format(
                       start, end, duration, time_range))
            raise AppResponseException(msg)
Exemple #4
0
    def post_run(self):
        t0 = datetime_to_seconds(self.job.criteria.starttime)
        t1 = datetime_to_seconds(self.job.criteria.endtime)

        data = []
        for t in range(t0, t1, self.table.options['source_resolution']):
            data.append([t, 1])

        df = pandas.DataFrame(data, columns=['time', 'value'])
        df['time'] = df['time'].astype('datetime64[s]')

        self.data = df
        return True
    def post_run(self):
        t0 = datetime_to_seconds(self.job.criteria.starttime)
        t1 = datetime_to_seconds(self.job.criteria.endtime)

        data = []
        for t in range(t0, t1, self.table.options['source_resolution']):
            data.append([t, 1])

        df = pandas.DataFrame(data, columns=['time', 'value'])
        df['time'] = df['time'].astype('datetime64[s]')

        self.data = df
        return True
Exemple #6
0
    def analyze(self, jobs):

        df = jobs['base'].data()

        criteria = self.job.criteria

        devid = criteria.appresponse_device
        duration = criteria.duration.seconds
        endtime = datetime_to_seconds(criteria.endtime)
        granularity = criteria.granularity.seconds

        def make_report_link(mod, v):
            s = ('<a href="/report/appresponse/{}/?'
                 'duration={}&appresponse_device={}&endtime={}&'
                 'pivot_column_names={}&granularity={}&auto_run=true" '
                 'target="_blank">{}</a>'.format(mod, duration, devid, endtime,
                                                 v, granularity, v))
            return s

        make_report_link_with_mod = functools.partial(
            make_report_link, self.table.options.ts_report_mod_name)

        pivot_col = self.table.options.pivot_column_name
        df[pivot_col] = df[pivot_col].map(make_report_link_with_mod)

        return QueryComplete(df)
Exemple #7
0
    def setup_snmp_trap(self, alert):
        oid = self.eoid  # cascade enterprise Object ID
        trapid = self.trapid  # base string for trap indicators
        self.trapname = '.'.join([oid, trapid])

        severity = self.severity
        description = alert.message or self.default_description
        alert_level = AlertLevels.get_integer(self.level)
        now = timeutils.datetime_to_seconds(alert.timestamp)

        self.binds = (
            ('1.3.6.1.2.1.1.3.0', rfc1902.Integer(0)),  # Uptime
            ('1.3.6.1.4.1.7054.71.2.1.0',
             rfc1902.Integer(severity)),  # Severity
            ('1.3.6.1.4.1.7054.71.2.3.0', rfc1902.OctetString(description)),
            ('1.3.6.1.4.1.7054.71.2.4.0', rfc1902.Integer(0)),  # Event ID
            ('1.3.6.1.4.1.7054.71.2.5.0', rfc1902.OctetString(self.trap_url)),
            ('1.3.6.1.4.1.7054.71.2.7.0',
             rfc1902.Integer(alert_level)),  # Alert Level
            ('1.3.6.1.4.1.7054.71.2.8.0', rfc1902.Integer(now)),  # Start Time
            ('1.3.6.1.4.1.7054.71.2.16.0', rfc1902.Integer(0)),  # Source Count
            ('1.3.6.1.4.1.7054.71.2.18.0',
             rfc1902.Integer(0)),  # Destination Count
            ('1.3.6.1.4.1.7054.71.2.20.0',
             rfc1902.Integer(0)),  # Protocol Count
            ('1.3.6.1.4.1.7054.71.2.22.0', rfc1902.Integer(0)),  # Port Count
        )
Exemple #8
0
    def process(cls, widget, job, data):
        newdata = []
        for row in data:
            newrow = []
            for col in row:
                if isinstance(col, datetime.datetime):
                    if col.tzinfo is None:
                        col = col.replace(tzinfo=pytz.utc)
                    col = datetime_to_seconds(col)
                newrow.append(col)
            newdata.append(newrow)

        return newdata
    def process(cls, widget, job, data):
        newdata = []
        for row in data:
            newrow = []
            for col in row:
                if isinstance(col, datetime.datetime):
                    if col.tzinfo is None:
                        col = col.replace(tzinfo=pytz.utc)
                    col = datetime_to_seconds(col)
                newrow.append(col)
            newdata.append(newrow)

        return newdata
Exemple #10
0
def create_debug_zipfile(no_summary=False):
    """ Collects logfiles and system info into a zipfile for download/email

        `no_summary` indicates whether to include system information from
                     the helper script `steel about` as part of the
                     zipped package.  Default is to include the file.
    """
    # setup correct timezone based on admin settings
    admin = AppfwkUser.objects.filter(is_superuser=True)[0]
    tz = pytz.timezone(admin.timezone)
    current_tz = os.environ['TZ']

    try:
        # save TZ to environment for zip to use
        os.environ['TZ'] = str(tz)

        # if zlib is available, then let's compress the files
        # otherwise we will just append them like a tarball
        try:
            import zlib
            compression = zipfile.ZIP_DEFLATED
        except ImportError:
            compression = zipfile.ZIP_STORED

        # setup the name, correct timezone, and open the zipfile
        now = datetime_to_seconds(datetime.now(tz))
        archive_name = os.path.join(settings.PROJECT_ROOT,
                                    'debug-%d.zip' % now)

        myzip = zipfile.ZipFile(archive_name, 'w', compression=compression)

        try:
            # find all of the usual logfiles
            logging.debug('zipping log files ...')
            for fname in find_logs():
                debug_fileinfo(fname)
                myzip.write(fname)

            if not no_summary:
                logging.debug('running about script')
                response = shell('steel about -v', save_output=True)
                logging.debug('zipping about script')
                myzip.writestr('system_summary.txt', response)
        finally:
            myzip.close()

    finally:
        # return env to its prior state
        os.environ['TZ'] = current_tz

    return archive_name
Exemple #11
0
 def _get_url_fields(flds):
     for k, v in flds.iteritems():
         if k in ['starttime', 'endtime']:
             yield (k, str(datetime_to_seconds(v)))
         elif k in ['duration', 'resolution']:
             try:
                 yield (k, str(int(timedelta_total_seconds(v))))
             except AttributeError:
                 # v is of special value, not a string of some duration
                 yield (k, v.replace(' ', '+'))
         else:
             # use + as encoded white space
             yield (k, str(v).replace(' ', '+'))
     yield ('auto_run', 'true')
Exemple #12
0
def create_debug_zipfile(no_summary=False):
    """ Collects logfiles and system info into a zipfile for download/email

        `no_summary` indicates whether to include system information from
                     the helper script `steel about` as part of the
                     zipped package.  Default is to include the file.
    """
    # setup correct timezone based on admin settings
    admin = AppfwkUser.objects.filter(is_superuser=True)[0]
    tz = pytz.timezone(admin.timezone)
    current_tz = os.environ['TZ']

    try:
        # save TZ to environment for zip to use
        os.environ['TZ'] = str(tz)

        # if zlib is available, then let's compress the files
        # otherwise we will just append them like a tarball
        try:
            import zlib
            compression = zipfile.ZIP_DEFLATED
        except ImportError:
            compression = zipfile.ZIP_STORED

        # setup the name, correct timezone, and open the zipfile
        now = datetime_to_seconds(datetime.now(tz))
        archive_name = os.path.join(settings.PROJECT_ROOT, 'debug-%d.zip' % now)

        myzip = zipfile.ZipFile(archive_name, 'w', compression=compression)

        try:
            # find all of the usual logfiles
            logging.debug('zipping log files ...')
            for fname in find_logs():
                debug_fileinfo(fname)
                myzip.write(fname)

            if not no_summary:
                logging.debug('running about script')
                response = shell('steel about -v', save_output=True)
                logging.debug('zipping about script')
                myzip.writestr('system_summary.txt', response)
        finally:
            myzip.close()

    finally:
        # return env to its prior state
        os.environ['TZ'] = current_tz

    return archive_name
Exemple #13
0
 def _get_url_fields(flds):
     for k, v in flds.iteritems():
         if k in ['starttime', 'endtime']:
             yield (k, str(datetime_to_seconds(v)))
         elif k in ['duration', 'resolution']:
             try:
                 yield (k, str(int(timedelta_total_seconds(v))))
             except AttributeError:
                 # v is of special value, not a string of some duration
                 yield (k, v.replace(' ', '+'))
         else:
             # use + as encoded white space
             yield (k, str(v).replace(' ', '+'))
     yield ('auto_run', 'true')
Exemple #14
0
    def send_trap(self):
        """ Send a SNMP trap with id `trapid` to the IP address `manager`
        """
        oid = self.options.eoid  # cascade enterprise Object ID
        trapid = self.options.trapid  # base string for trap indicators

        community = self.options.community
        manager_ip = self.options.manager_ip

        severity = self.options.severity
        description = self.trap_description
        url = self.options.trap_url
        alert_level = self.options.alert_level
        now = timeutils.datetime_to_seconds(datetime.datetime.now())

        trapname = '.'.join([oid, trapid])

        ntf = ntforg.NotificationOriginator()

        err = ntf.sendNotification(
            ntforg.CommunityData(community),
            ntforg.UdpTransportTarget((manager_ip, 162)),
            'trap',
            trapname,
            ('1.3.6.1.2.1.1.3.0', rfc1902.Integer(0)),  # Uptime
            ('1.3.6.1.4.1.7054.71.2.1.0',
             rfc1902.Integer(severity)),  # Severity
            ('1.3.6.1.4.1.7054.71.2.3.0', rfc1902.OctetString(description)),
            ('1.3.6.1.4.1.7054.71.2.4.0', rfc1902.Integer(0)),  # Event ID
            ('1.3.6.1.4.1.7054.71.2.5.0', rfc1902.OctetString(url)),
            ('1.3.6.1.4.1.7054.71.2.7.0',
             rfc1902.Integer(alert_level)),  # Alert Level
            ('1.3.6.1.4.1.7054.71.2.8.0', rfc1902.Integer(now)),  # Start Time
            ('1.3.6.1.4.1.7054.71.2.16.0', rfc1902.Integer(0)),  # Source Count
            ('1.3.6.1.4.1.7054.71.2.18.0',
             rfc1902.Integer(0)),  # Destination Count
            ('1.3.6.1.4.1.7054.71.2.20.0',
             rfc1902.Integer(0)),  # Protocol Count
            ('1.3.6.1.4.1.7054.71.2.22.0', rfc1902.Integer(0)),  # Port Count
        )
    def write(self, index, doctype, data_frame, timecol):

        df = data_frame.fillna('')

        actions = [{
            "_index": index,
            "_type": doctype,
            "_id": datetime_to_seconds(df.iloc[i][timecol]),
            "_source": df.iloc[i].to_dict()
        } for i in xrange(len(df))]

        logger.debug(
            "Writing %s records from %s to %s into db. Index: %s, "
            "doc_type: %s." %
            (len(df), df[timecol].min(), df[timecol].max(), index, doctype))

        written, errors = helpers.bulk(self.client,
                                       actions=actions,
                                       stats_only=True)
        logger.debug("Successfully wrote %s records, %s errors." %
                     (written, errors))
        return
    def setup_snmp_trap(self, alert):
        oid = self.eoid             # cascade enterprise Object ID
        trapid = self.trapid        # base string for trap indicators
        self.trapname = '.'.join([oid, trapid])

        severity = self.severity
        description = alert.message or self.default_description
        alert_level = AlertLevels.get_integer(self.level)
        now = timeutils.datetime_to_seconds(alert.timestamp)

        self.binds = (
            ('1.3.6.1.2.1.1.3.0', rfc1902.Integer(0)),                       # Uptime
            ('1.3.6.1.4.1.7054.71.2.1.0', rfc1902.Integer(severity)),        # Severity
            ('1.3.6.1.4.1.7054.71.2.3.0', rfc1902.OctetString(description)),
            ('1.3.6.1.4.1.7054.71.2.4.0', rfc1902.Integer(0)),               # Event ID
            ('1.3.6.1.4.1.7054.71.2.5.0', rfc1902.OctetString(self.trap_url)),
            ('1.3.6.1.4.1.7054.71.2.7.0', rfc1902.Integer(alert_level)),     # Alert Level
            ('1.3.6.1.4.1.7054.71.2.8.0', rfc1902.Integer(now)),             # Start Time
            ('1.3.6.1.4.1.7054.71.2.16.0', rfc1902.Integer(0)),              # Source Count
            ('1.3.6.1.4.1.7054.71.2.18.0', rfc1902.Integer(0)),              # Destination Count
            ('1.3.6.1.4.1.7054.71.2.20.0', rfc1902.Integer(0)),              # Protocol Count
            ('1.3.6.1.4.1.7054.71.2.22.0', rfc1902.Integer(0)),              # Port Count
        )
Exemple #17
0
    def _fill_criteria(self, **kwargs):

        if ('timefilter' in kwargs and
                not all(['start_time' in kwargs, 'end_time' in kwargs])):

            # when timefilter is passed in, need to convert to start/end time
            timefilter = TimeFilter.parse_range(kwargs['timefilter'])

            kwargs['start_time'] = timefilter.start
            kwargs['end_time'] = timefilter.end
            del kwargs['timefilter']

        for name in ['start_time', 'end_time']:
            if name in kwargs:
                kwargs[name] = datetime_to_seconds(kwargs[name])

        if 'devices' in kwargs and kwargs['devices']:
            kwargs['devices'] = kwargs['devices'].split(',')

        if 'port' in kwargs and kwargs['port']:
            kwargs['port'] = int(kwargs['port'])

        super(BaseStatsReport, self)._fill_criteria(**kwargs)
Exemple #18
0
    def _fill_criteria(self, **kwargs):

        if ('timefilter' in kwargs
                and not all(['start_time' in kwargs, 'end_time' in kwargs])):

            # when timefilter is passed in, need to convert to start/end time
            timefilter = TimeFilter.parse_range(kwargs['timefilter'])

            kwargs['start_time'] = timefilter.start
            kwargs['end_time'] = timefilter.end
            del kwargs['timefilter']

        for name in ['start_time', 'end_time']:
            if name in kwargs:
                kwargs[name] = datetime_to_seconds(kwargs[name])

        if 'devices' in kwargs and kwargs['devices']:
            kwargs['devices'] = kwargs['devices'].split(',')

        if 'port' in kwargs and kwargs['port']:
            kwargs['port'] = int(kwargs['port'])

        super(BaseStatsReport, self)._fill_criteria(**kwargs)
    def send_trap(self):
        """ Send a SNMP trap with id `trapid` to the IP address `manager`
        """
        oid = self.options.eoid             # cascade enterprise Object ID
        trapid = self.options.trapid        # base string for trap indicators

        community = self.options.community
        manager_ip = self.options.manager_ip

        severity = self.options.severity
        description = self.trap_description
        url = self.options.trap_url
        alert_level = self.options.alert_level
        now = timeutils.datetime_to_seconds(datetime.datetime.now())

        trapname = '.'.join([oid, trapid])

        ntf = ntforg.NotificationOriginator()

        err = ntf.sendNotification(
            ntforg.CommunityData(community),
            ntforg.UdpTransportTarget((manager_ip, 162)),
            'trap',
            trapname,
            ('1.3.6.1.2.1.1.3.0', rfc1902.Integer(0)),                       # Uptime
            ('1.3.6.1.4.1.7054.71.2.1.0', rfc1902.Integer(severity)),        # Severity
            ('1.3.6.1.4.1.7054.71.2.3.0', rfc1902.OctetString(description)),
            ('1.3.6.1.4.1.7054.71.2.4.0', rfc1902.Integer(0)),               # Event ID
            ('1.3.6.1.4.1.7054.71.2.5.0', rfc1902.OctetString(url)),
            ('1.3.6.1.4.1.7054.71.2.7.0', rfc1902.Integer(alert_level)),     # Alert Level
            ('1.3.6.1.4.1.7054.71.2.8.0', rfc1902.Integer(now)),             # Start Time
            ('1.3.6.1.4.1.7054.71.2.16.0', rfc1902.Integer(0)),              # Source Count
            ('1.3.6.1.4.1.7054.71.2.18.0', rfc1902.Integer(0)),              # Destination Count
            ('1.3.6.1.4.1.7054.71.2.20.0', rfc1902.Integer(0)),              # Protocol Count
            ('1.3.6.1.4.1.7054.71.2.22.0', rfc1902.Integer(0)),              # Port Count
        )
Exemple #20
0
    def run(self):
        criteria = self.job.criteria

        ar = DeviceManager.get_device(criteria.appresponse_device)

        if self.table.options.source == 'packets':

            source_name = criteria.appresponse_source

            if source_name.startswith(SourceProxy.JOB_PREFIX):
                job_id = source_name.lstrip(SourceProxy.JOB_PREFIX)
                source = SourceProxy(ar.capture.get_job_by_id(job_id))
            else:
                file_id = source_name.lstrip(SourceProxy.FILE_PREFIX)
                source = SourceProxy(ar.fs.get_file_by_id(file_id))

        else:
            source = SourceProxy(name=self.table.options.source)

        col_extractors, col_names = [], {}

        for col in self.table.get_columns(synthetic=False):
            col_names[col.options.extractor] = col.name

            if col.iskey:
                col_extractors.append(Key(col.options.extractor))
            else:
                col_extractors.append(Value(col.options.extractor))

        # If the data source is of file type and entire PCAP
        # is set True, then set start end times to None

        if isinstance(source, File) and criteria.entire_pcap:
            start = None
            end = None
        else:
            start = datetime_to_seconds(criteria.starttime)
            end = datetime_to_seconds(criteria.endtime)

        granularity = criteria.granularity.total_seconds()

        data_def = DataDef(source=source,
                           columns=col_extractors,
                           granularity=str(granularity),
                           start=start,
                           end=end)

        report = Report(ar)
        report.add(data_def)
        report.run()

        df = report.get_dataframe()
        df.columns = map(lambda x: col_names[x], df.columns)

        def to_int(x):
            return x if str(x).isdigit() else None

        def to_float(x):
            return x if str(x).replace('.', '', 1).isdigit() else None

        # Numerical columns can be returned as '#N/D' when not available
        # Thus convert them to None to help sorting
        for col in self.table.get_columns(synthetic=False):
            if col.datatype == Column.DATATYPE_FLOAT:
                df[col.name] = df[col.name].apply(lambda x: to_float(x))
            elif col.datatype == Column.DATATYPE_INTEGER:
                df[col.name] = df[col.name].apply(lambda x: to_int(x))
            elif col.datatype == Column.DATATYPE_TIME:
                if granularity < 1:
                    # The fractional epoch time values are in string
                    # Thus needs to be converted to float
                    df[col.name] = df[col.name].apply(float)

        if self.table.options.sort_col_name:
            df.sort(columns=self.table.options.sort_col_name,
                    ascending=self.table.options.ascending,
                    inplace=True)
        return QueryComplete(df)
Exemple #21
0
    def run(self):
        criteria = self.job.criteria

        ar = DeviceManager.get_device(criteria.appresponse_device)

        if self.table.options.source == 'packets':

            source_name = criteria.appresponse_source

            if source_name.startswith(SourceProxy.JOB_PREFIX):
                job_id = source_name.lstrip(SourceProxy.JOB_PREFIX)
                source = SourceProxy(ar.capture.get_job_by_id(job_id))
            else:
                file_id = source_name.lstrip(SourceProxy.FILE_PREFIX)
                source = SourceProxy(ar.fs.get_file_by_id(file_id))

        else:
            source = SourceProxy(name=self.table.options.source)

        col_extractors = []
        col_names = {}
        aliases = {}

        for col in self.table.get_columns(synthetic=False):
            col_names[col.options.extractor] = col.name

            if col.iskey:
                col_extractors.append(Key(col.options.extractor))
            else:
                col_extractors.append(Value(col.options.extractor))

            if col.options.alias:
                aliases[col.options.extractor] = col.options.alias
                col_extractors.append(Value(col.options.alias))

        # If the data source is of file type and entire PCAP
        # is set True, then set start end times to None

        if (self.table.options.source == 'packets' and
                source.path.startswith(SourceProxy.FILE_PREFIX) and
                criteria.entire_pcap):
            start = None
            end = None
        else:
            start = datetime_to_seconds(criteria.starttime)
            end = datetime_to_seconds(criteria.endtime)

        granularity = criteria.granularity.total_seconds()

        resolution = None

        # temp fix for https://bugzilla.nbttech.com/show_bug.cgi?id=305478
        # if we aren't asking for a timeseries, make sure the data gets
        # aggregated by making resolution greater than the report duration
        if (self.table.options.source == 'packets' and
                'start_time' not in col_names.keys() and
                'end_time' not in col_names.keys()):
            resolution = end - start + granularity

        data_def = DataDef(
            source=source,
            columns=col_extractors,
            granularity=granularity,
            resolution=resolution,
            start=start,
            end=end)

        if hasattr(criteria, 'appresponse_steelfilter'):
            logger.debug('calculating steelfilter expression ...')
            filterexpr = self.job.combine_filterexprs(
                exprs=criteria.appresponse_steelfilter
            )
            if filterexpr:
                logger.debug('applying steelfilter expression: %s'
                             % filterexpr)
                data_def.add_filter(TrafficFilter(type_='steelfilter',
                                                  value=filterexpr))

        report = Report(ar)
        report.add(data_def)
        report.run()

        df = report.get_dataframe()

        report.delete()

        if aliases:
            # overwrite columns with their alias values, then drop 'em
            for k, v in aliases.iteritems():
                df[k] = df[v]
                df.drop(v, 1, inplace=True)

        df.columns = map(lambda x: col_names[x], df.columns)

        def to_int(x):
            return x if str(x).isdigit() else None

        def to_float(x):
            return x if str(x).replace('.', '', 1).isdigit() else None

        # Numerical columns can be returned as '#N/D' when not available
        # Thus convert them to None to help sorting
        for col in self.table.get_columns(synthetic=False):
            if col.datatype == Column.DATATYPE_FLOAT:
                df[col.name] = df[col.name].apply(lambda x: to_float(x))
            elif col.datatype == Column.DATATYPE_INTEGER:
                df[col.name] = df[col.name].apply(lambda x: to_int(x))
            elif col.datatype == Column.DATATYPE_TIME:
                if granularity < 1:
                    # The fractional epoch time values are in string
                    # Thus needs to be converted to float
                    df[col.name] = df[col.name].apply(float)

        if self.table.options.sort_col_name:
            df.sort(columns=self.table.options.sort_col_name,
                    ascending=self.table.options.ascending,
                    inplace=True)
        return QueryComplete(df)
Exemple #22
0
    def run(self, template_id, timefilter=None, resolution="auto",
            query=None, trafficexpr=None, data_filter=None, sync=True,
            custom_criteria=None):
        """Create the report and begin running the report on NetProfiler.

        If the `sync` option is True, periodically poll until the report is
        complete, otherwise return immediately.

        :param int template_id: numeric id of the template to use for the report

        :param timefilter: range of time to query,
            instance of :class:`TimeFilter`

        :param str resolution: data resolution, such as (1min, 15min, etc.),
             defaults to 'auto'

        :param str query: query object containing criteria

        :param trafficexpr: instance of :class:`TrafficFilter`

        :param str data_filter: deprecated filter to run against report data

        :param bool sync: if True, poll for status until the report is complete

        """

        self.template_id = template_id

        if timefilter is None:
            self.timefilter = TimeFilter.parse_range("last 5 min")
        else:
            self.timefilter = timefilter
        self.query = query
        self.trafficexpr = trafficexpr

        self.data_filter = data_filter

        self.id = None
        self.queries = list()
        self.last_status = None

        if resolution not in ["auto", "1min", "15min", "hour",
                              "6hour", "day", "week", "month"]:
            rd = parse_timedelta(resolution)
            resolution = self.RESOLUTION_MAP[int(timedelta_total_seconds(rd))]

        self.resolution = resolution

        start = datetime_to_seconds(self.timefilter.start)
        end = datetime_to_seconds(self.timefilter.end)

        criteria = RecursiveUpdateDict(**{"time_frame": {"start": int(start),
                                                         "end": int(end)}
                                          })

        if self.query is not None:
            criteria["query"] = self.query

        if self.resolution != "auto":
            criteria["time_frame"]["resolution"] = self.resolution

        if self.data_filter:
            criteria['deprecated'] = {self.data_filter[0]: self.data_filter[1]}

        if self.trafficexpr is not None:
            criteria["traffic_expression"] = self.trafficexpr.filter

        if custom_criteria:
            for k, v in custom_criteria.iteritems():
                criteria[k] = v

        to_post = {"template_id": self.template_id,
                   "criteria": criteria}

        logger.debug("Posting JSON: %s" % to_post)

        response = self.profiler.api.report.reports(data=to_post)

        try:
            self.id = int(response['id'])
        except KeyError:
            raise ValueError(
                "failed to retrieve report id from report creation response: %s"
                % response)

        logger.info("Created report %d" % self.id)

        if sync:
            self.wait_for_complete()
Exemple #23
0
    def run(self,
            template_id,
            timefilter=None,
            resolution="auto",
            query=None,
            trafficexpr=None,
            data_filter=None,
            sync=True,
            custom_criteria=None):
        """Create the report and begin running the report on NetProfiler.

        If the `sync` option is True, periodically poll until the report is
        complete, otherwise return immediately.

        :param int template_id: numeric id of the template to use for the report

        :param timefilter: range of time to query,
            instance of :class:`TimeFilter`

        :param str resolution: data resolution, such as (1min, 15min, etc.),
             defaults to 'auto'

        :param str query: query object containing criteria

        :param trafficexpr: instance of :class:`TrafficFilter`

        :param str data_filter: deprecated filter to run against report data

        :param bool sync: if True, poll for status until the report is complete

        """

        self.template_id = template_id

        if timefilter is None:
            self.timefilter = TimeFilter.parse_range("last 5 min")
        else:
            self.timefilter = timefilter
        self.query = query
        self.trafficexpr = trafficexpr

        self.data_filter = data_filter

        self.id = None
        self.queries = list()
        self.last_status = None

        if resolution not in [
                "auto", "1min", "15min", "hour", "6hour", "day", "week",
                "month"
        ]:
            rd = parse_timedelta(resolution)
            resolution = self.RESOLUTION_MAP[int(timedelta_total_seconds(rd))]

        self.resolution = resolution

        start = datetime_to_seconds(self.timefilter.start)
        end = datetime_to_seconds(self.timefilter.end)

        criteria = RecursiveUpdateDict(
            **{"time_frame": {
                "start": int(start),
                "end": int(end)
            }})

        if self.query is not None:
            criteria["query"] = self.query

        if self.resolution != "auto":
            criteria["time_frame"]["resolution"] = self.resolution

        if self.data_filter:
            criteria['deprecated'] = {self.data_filter[0]: self.data_filter[1]}

        if self.trafficexpr is not None:
            criteria["traffic_expression"] = self.trafficexpr.filter

        if custom_criteria:
            for k, v in custom_criteria.iteritems():
                criteria[k] = v

        to_post = {"template_id": self.template_id, "criteria": criteria}

        logger.debug("Posting JSON: %s" % to_post)

        response = self.profiler.api.report.reports(data=to_post)

        try:
            self.id = int(response['id'])
        except KeyError:
            raise ValueError(
                "failed to retrieve report id from report creation response: %s"
                % response)

        logger.info("Created report %d" % self.id)

        if sync:
            self.wait_for_complete()
    def main(self):
        if self.options.jobname:
            export_name = self.options.jobname
            source = self.netshark.get_capture_job_by_name(export_name)
            create_export = self.netshark.api.jobs.create_export
            status_export = self.netshark.api.jobs.get_export_details
            download_export = self.netshark.api.jobs.get_packets_from_export
            delete_export = self.netshark.api.jobs.delete_export
        elif self.options.clipname:
            export_name = self.options.clipname
            source = self.netshark.get_trace_clip_by_description(export_name)
            create_export = self.netshark.api.clips.create_export
            status_export = self.netshark.api.clips.get_export_details
            download_export = self.netshark.api.clips.get_packets_from_export
            delete_export = self.netshark.api.clips.delete_export

        filename = self.options.filename
        if not filename:
            filename = '%s_export.pcap' % export_name

        if self.options.timerange:
            timefilter = TimeFilter.parse_range(self.options.timerange)
        elif self.options.start_time and self.options.end_time:
            start = string_to_datetime(float(self.options.start_time))
            end = string_to_datetime(float(self.options.end_time))
            timefilter = TimeFilter(start, end)
        else:
            self.optparse.error('Select either --timerange or --start and --end times')

        config = {
            #'output_filename': filename,
            'output_format': 'PCAP_US',
            'start_time': datetime_to_seconds(timefilter.start),
            'end_time': datetime_to_seconds(timefilter.end),
        }

        if self.options.filters:
            filters = [f.split('=') for f in self.options.filters]
            filters = [r'%s="%s"' % (k, v) for k, v in filters]
            config['filters'] = [NetSharkFilter(filt).bind(self.netshark) for filt in filters]

        try:
            export_id = create_export(source.id, config=config)

            print 'Export created with ID: %s' % export_id

            status = status_export(source.id, export_id['id'])

            print 'Current status of export is:\n%s' % status

            if status['status']['state'] == 'RUNNING':
                print 'beginning download to file %s' % filename
                download_export(source.id, export_id['id'], filename)
        finally:
            try:
                print 'Trying to delete export ... ',
                delete_export(source.id, export_id['id'])
                print 'deleted.'
            except:
                print 'Error when trying to delete export.  Ignoring.'
                pass