def main(self):

        if self.options.showsources:
            svcdef = self.appresponse.find_service('npm.reports')
            dr = svcdef.bind('source_names')
            source_names = dr.execute('get').data
            print('\n'.join(source_names))
            return

        source = SourceProxy(name=self.options.sourcename)

        columns = []
        headers = []
        if self.options.keycolumns:
            for col in self.options.keycolumns.split(','):
                columns.append(Key(col))
                headers.append(col)

        for col in self.options.valuecolumns.split(','):
            columns.append(Value(col))
            headers.append(col)

        topbycolumns = []

        if self.options.topbycolumns:
            for col in self.options.topbycolumns.split(','):
                topbycolumns.append(Key(col))

        data_def = DataDef(source=source,
                           columns=columns,
                           granularity=self.options.granularity,
                           resolution=self.options.resolution,
                           time_range=self.options.timerange,
                           limit=self.options.limit,
                           topbycolumns=topbycolumns)

        if self.options.filterexpr:
            data_def.add_filter(
                TrafficFilter(type_='steelfilter',
                              value=self.options.filterexpr))

        report = Report(self.appresponse)
        report.add(data_def)
        report.run()

        data = report.get_data()
        headers = report.get_legend()

        report.delete()

        if self.options.csvfile:
            with open(self.options.csvfile, 'w') as f:
                for line in Formatter.get_csv(data, headers):
                    f.write(line)
                    f.write('\n')
        else:
            Formatter.print_csv(data, headers)
    def main(self):
        if self.options.sourcetype == 'file':
            source = self.appresponse.fs.get_file_by_id(self.options.sourceid)
        elif self.options.sourcetype == 'job':
            source = self.appresponse.capture.\
                get_job_by_name(self.options.sourceid)
        else:
            source = self.appresponse.clips.\
                get_clip_by_id(self.options.sourceid)

        data_source = SourceProxy(source)
        columns = []
        headers = []

        if self.options.keycolumns:
            for col in self.options.keycolumns.split(','):
                columns.append(Key(col))
                headers.append(col)

        for col in self.options.valuecolumns.split(','):
            columns.append(Value(col))
            headers.append(col)

        data_def = DataDef(source=data_source,
                           columns=columns,
                           granularity=self.options.granularity,
                           resolution=self.options.resolution,
                           time_range=self.options.timerange)

        if self.options.filterexpr:
            data_def.add_filter(
                TrafficFilter(type_=self.options.filtertype,
                              value=self.options.filterexpr))

        report = Report(self.appresponse)
        report.add(data_def)
        report.run()

        data = report.get_data()
        headers = report.get_legend()

        report.delete()

        if self.options.csvfile:
            with open(self.options.csvfile, 'w') as f:
                for line in Formatter.get_csv(data, headers):
                    f.write(line)
                    f.write('\n')
        else:
            Formatter.print_csv(data, headers)
Esempio n. 3
0
    def get_column_objects(self, source_name, columns):
        """Return Key/Value objects for given set of string names."""
        coldefs = self.sources[source_name]['columns']

        def iskey(coldef):
            if 'grouped_by' in coldef and coldef['grouped_by'] is True:
                return True
            return False

        cols = []
        for c in columns:
            obj = Key(c) if iskey(coldefs[c]) else Value(c)
            cols.append(obj)
        return cols
Esempio n. 4
0
    def run(self):
        criteria = self.job.criteria

        ar = DeviceManager.get_device(criteria.appresponse_device)

        if self.table.options.source == 'packets':

            source_name = criteria.appresponse_source

            if source_name.startswith(SourceProxy.JOB_PREFIX):
                job_id = source_name.lstrip(SourceProxy.JOB_PREFIX)
                source = SourceProxy(ar.capture.get_job_by_id(job_id))
            else:
                file_id = source_name.lstrip(SourceProxy.FILE_PREFIX)
                source = SourceProxy(ar.fs.get_file_by_id(file_id))

        else:
            source = SourceProxy(name=self.table.options.source)

        col_extractors, col_names = [], {}

        for col in self.table.get_columns(synthetic=False):
            col_names[col.options.extractor] = col.name

            if col.iskey:
                col_extractors.append(Key(col.options.extractor))
            else:
                col_extractors.append(Value(col.options.extractor))

        # If the data source is of file type and entire PCAP
        # is set True, then set start end times to None

        if isinstance(source, File) and criteria.entire_pcap:
            start = None
            end = None
        else:
            start = datetime_to_seconds(criteria.starttime)
            end = datetime_to_seconds(criteria.endtime)

        granularity = criteria.granularity.total_seconds()

        data_def = DataDef(source=source,
                           columns=col_extractors,
                           granularity=str(granularity),
                           start=start,
                           end=end)

        report = Report(ar)
        report.add(data_def)
        report.run()

        df = report.get_dataframe()
        df.columns = map(lambda x: col_names[x], df.columns)

        def to_int(x):
            return x if str(x).isdigit() else None

        def to_float(x):
            return x if str(x).replace('.', '', 1).isdigit() else None

        # Numerical columns can be returned as '#N/D' when not available
        # Thus convert them to None to help sorting
        for col in self.table.get_columns(synthetic=False):
            if col.datatype == Column.DATATYPE_FLOAT:
                df[col.name] = df[col.name].apply(lambda x: to_float(x))
            elif col.datatype == Column.DATATYPE_INTEGER:
                df[col.name] = df[col.name].apply(lambda x: to_int(x))
            elif col.datatype == Column.DATATYPE_TIME:
                if granularity < 1:
                    # The fractional epoch time values are in string
                    # Thus needs to be converted to float
                    df[col.name] = df[col.name].apply(float)

        if self.table.options.sort_col_name:
            df.sort(columns=self.table.options.sort_col_name,
                    ascending=self.table.options.ascending,
                    inplace=True)
        return QueryComplete(df)
Esempio n. 5
0
def packet_columns():
    key_cols = [Key(key) for key in PACKETS_KEY_COLS]
    value_cols = [Value(key) for key in PACKETS_VALUE_COLS]
    return key_cols, value_cols
Esempio n. 6
0
    def main(self):

        if self.options.sourcename == 'packets':
            if self.options.sourceid is None:
                source = self.appresponse.capture.get_vifgs()[0]
            else:
                source = SourceProxy(name='packets',
                                     path=self.options.sourceid)
        else:
            source = SourceProxy(name='aggregates')

        columns = []
        headers = []

        for col in self.options.keycolumns.split(','):
            columns.append(Key(col))
            headers.append(col)

        for col in self.options.valuecolumns.split(','):
            columns.append(Value(col))
            headers.append(col)

        data_def = DataDef(source=source,
                           columns=columns,
                           granularity=self.options.granularity,
                           resolution=self.options.resolution,
                           live=True)

        if self.options.filterexpr:
            data_def.add_filter(TrafficFilter(type_='steelfilter',
                                              value=self.options.filterexpr))

        print('Running report, press Ctrl-C to exit.')
        print('')

        report = self.appresponse.create_report(data_def)
        time.sleep(1)

        try:
            while 1:
                banner = '{} {}'.format(datetime.datetime.now(), '--' * 20)
                print(banner)

                try:
                    data = report.get_data()['data']
                    headers = report.get_legend()

                    if self.options.sortby:
                        index = headers.index(self.options.sortby)
                        data.sort(key=lambda x: x[index], reverse=True)

                    if self.limit:
                        total_rows = len(data)
                        limit_string = ('Showing {} out of {} rows.'
                                        .format(self.limit, total_rows))
                        data = data[:self.limit]
                    else:
                        limit_string = None

                except KeyError:
                    # something went wrong, print error and exit
                    print('Error accessing data:')
                    print(report.get_data())
                    raise KeyboardInterrupt

                if self.options.csvfile:
                    with open(self.options.csvfile, 'a') as f:
                        f.write(banner)
                        f.write('\n')
                        for line in Formatter.get_csv(data, headers):
                            f.write(line)
                            f.write('\n')
                        if limit_string:
                            f.write(limit_string)
                            f.write('\n')
                else:
                    Formatter.print_table(data, headers)
                    if limit_string:
                        print(limit_string)

                time.sleep(self.delay)

        except KeyboardInterrupt:
            print('Exiting ...')
            report.delete()
Esempio n. 7
0
    def run(self):
        criteria = self.job.criteria

        ar = DeviceManager.get_device(criteria.appresponse_device)

        if self.table.options.source == 'packets':

            source_name = criteria.appresponse_source

            if source_name.startswith(SourceProxy.JOB_PREFIX):
                job_id = source_name.lstrip(SourceProxy.JOB_PREFIX)
                source = SourceProxy(ar.capture.get_job_by_id(job_id))
            else:
                file_id = source_name.lstrip(SourceProxy.FILE_PREFIX)
                source = SourceProxy(ar.fs.get_file_by_id(file_id))

        else:
            source = SourceProxy(name=self.table.options.source)

        col_extractors = []
        col_names = {}
        aliases = {}

        for col in self.table.get_columns(synthetic=False):
            col_names[col.options.extractor] = col.name

            if col.iskey:
                col_extractors.append(Key(col.options.extractor))
            else:
                col_extractors.append(Value(col.options.extractor))

            if col.options.alias:
                aliases[col.options.extractor] = col.options.alias
                col_extractors.append(Value(col.options.alias))

        # If the data source is of file type and entire PCAP
        # is set True, then set start end times to None

        if (self.table.options.source == 'packets' and
                source.path.startswith(SourceProxy.FILE_PREFIX) and
                criteria.entire_pcap):
            start = None
            end = None
        else:
            start = datetime_to_seconds(criteria.starttime)
            end = datetime_to_seconds(criteria.endtime)

        granularity = criteria.granularity.total_seconds()

        resolution = None

        # temp fix for https://bugzilla.nbttech.com/show_bug.cgi?id=305478
        # if we aren't asking for a timeseries, make sure the data gets
        # aggregated by making resolution greater than the report duration
        if (self.table.options.source == 'packets' and
                'start_time' not in col_names.keys() and
                'end_time' not in col_names.keys()):
            resolution = end - start + granularity

        data_def = DataDef(
            source=source,
            columns=col_extractors,
            granularity=granularity,
            resolution=resolution,
            start=start,
            end=end)

        if hasattr(criteria, 'appresponse_steelfilter'):
            logger.debug('calculating steelfilter expression ...')
            filterexpr = self.job.combine_filterexprs(
                exprs=criteria.appresponse_steelfilter
            )
            if filterexpr:
                logger.debug('applying steelfilter expression: %s'
                             % filterexpr)
                data_def.add_filter(TrafficFilter(type_='steelfilter',
                                                  value=filterexpr))

        report = Report(ar)
        report.add(data_def)
        report.run()

        df = report.get_dataframe()

        report.delete()

        if aliases:
            # overwrite columns with their alias values, then drop 'em
            for k, v in aliases.iteritems():
                df[k] = df[v]
                df.drop(v, 1, inplace=True)

        df.columns = map(lambda x: col_names[x], df.columns)

        def to_int(x):
            return x if str(x).isdigit() else None

        def to_float(x):
            return x if str(x).replace('.', '', 1).isdigit() else None

        # Numerical columns can be returned as '#N/D' when not available
        # Thus convert them to None to help sorting
        for col in self.table.get_columns(synthetic=False):
            if col.datatype == Column.DATATYPE_FLOAT:
                df[col.name] = df[col.name].apply(lambda x: to_float(x))
            elif col.datatype == Column.DATATYPE_INTEGER:
                df[col.name] = df[col.name].apply(lambda x: to_int(x))
            elif col.datatype == Column.DATATYPE_TIME:
                if granularity < 1:
                    # The fractional epoch time values are in string
                    # Thus needs to be converted to float
                    df[col.name] = df[col.name].apply(float)

        if self.table.options.sort_col_name:
            df.sort(columns=self.table.options.sort_col_name,
                    ascending=self.table.options.ascending,
                    inplace=True)
        return QueryComplete(df)