コード例 #1
0
ファイル: timeseries_test.py プロジェクト: vismid86/grr
 def testNormalizeFillsGapsWithNone(self):
     s = timeseries.Timeseries()
     for i in range(21, 51):
         s.Append(i, (i + 5) * 10000)
     for i in range(81, 101):
         s.Append(i, (i + 5) * 10000)
     s.Normalize(10 * 10000, 10 * 10000, 120 * 10000)
     self.assertEqual(11, len(s.data))
     self.assertEqual([None, 100000], s.data[0])
     self.assertEqual([22.5, 200000], s.data[1])
     self.assertEqual([None, 600000], s.data[5])
     self.assertEqual([None, 1100000], s.data[-1])
コード例 #2
0
    def testNormalize(self):
        s = self.makeSeries()
        s.Normalize(10 * 10000, 100000, 600000)
        self.assertLen(s.data, 5)
        self.assertEqual([9.5, 100000], s.data[0])
        self.assertEqual([49.5, 500000], s.data[-1])

        s = timeseries.Timeseries()
        for i in range(0, 1000):
            s.Append(0.5, i * 10)
        s.Normalize(200, 5000, 10000)
        self.assertLen(s.data, 25)
        self.assertListEqual(s.data[0], [0.5, 5000])
        self.assertListEqual(s.data[24], [0.5, 9800])

        s = timeseries.Timeseries()
        for i in range(0, 1000):
            s.Append(i, i * 10)
        s.Normalize(200, 5000, 10000, mode=timeseries.NORMALIZE_MODE_COUNTER)
        self.assertLen(s.data, 25)
        self.assertListEqual(s.data[0], [519, 5000])
        self.assertListEqual(s.data[24], [999, 9800])
コード例 #3
0
  def testToDeltas(self):
    s = self.makeSeries()
    self.assertEqual(100, len(s.data))
    s.ToDeltas()
    self.assertEqual(99, len(s.data))
    self.assertEqual([1, 60000], s.data[0])
    self.assertEqual([1, 1040000], s.data[-1])

    s = timeseries.Timeseries()
    for i in range(0, 1000):
      s.Append(i, i * 1e6)
    s.Normalize(
        20 * 1e6, 500 * 1e6, 1000 * 1e6, mode=timeseries.NORMALIZE_MODE_COUNTER)
    self.assertEqual(25, len(s.data))
    self.assertListEqual(s.data[0], [519, int(500 * 1e6)])
    s.ToDeltas()
    self.assertEqual(24, len(s.data))
    self.assertListEqual(s.data[0], [20, int(500 * 1e6)])
    self.assertListEqual(s.data[23], [20, int(960 * 1e6)])
コード例 #4
0
ファイル: stats_store.py プロジェクト: vismid86/grr
    def _TimeSeriesFromData(self, data, attr=None):
        """Build time series from StatsStore data."""

        series = timeseries.Timeseries()

        for value, timestamp in data:
            if attr:
                try:
                    series.Append(getattr(value, attr), timestamp)
                except AttributeError:
                    raise ValueError("Can't find attribute %s in value %s." %
                                     (attr, value))
            else:
                if hasattr(value, "sum") or hasattr(value, "count"):
                    raise ValueError(
                        "Can't treat complext type as simple value: %s" %
                        value)
                series.Append(value, timestamp)

        return series
コード例 #5
0
ファイル: client.py プロジェクト: vismid86/grr
    def Handle(self, args, token=None):
        start_time = args.start
        end_time = args.end

        if not end_time:
            end_time = rdfvalue.RDFDatetime.Now()

        if not start_time:
            start_time = end_time - rdfvalue.Duration("30m")

        fd = aff4.FACTORY.Create(args.client_id.ToClientURN().Add("stats"),
                                 aff4_type=aff4_stats.ClientStats,
                                 mode="r",
                                 token=token,
                                 age=(start_time, end_time))

        stat_values = list(fd.GetValuesForAttribute(fd.Schema.STATS))
        points = []
        for stat_value in reversed(stat_values):
            if args.metric == args.Metric.CPU_PERCENT:
                points.extend((s.cpu_percent, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_SYSTEM:
                points.extend((s.system_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_USER:
                points.extend((s.user_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.IO_READ_BYTES:
                points.extend(
                    (s.read_bytes, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_BYTES:
                points.extend((s.write_bytes, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_READ_OPS:
                points.extend(
                    (s.read_count, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_OPS:
                points.extend((s.write_count, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.NETWORK_BYTES_RECEIVED:
                points.append((stat_value.bytes_received, stat_value.age))
            elif args.metric == args.Metric.NETWORK_BYTES_SENT:
                points.append((stat_value.bytes_sent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_PERCENT:
                points.append((stat_value.memory_percent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_RSS_SIZE:
                points.append((stat_value.RSS_size, stat_value.age))
            elif args.metric == args.Metric.MEMORY_VMS_SIZE:
                points.append((stat_value.VMS_size, stat_value.age))
            else:
                raise ValueError("Unknown metric.")

        # Points collected from "cpu_samples" and "io_samples" may not be correctly
        # sorted in some cases (as overlaps between different stat_values are
        # possible).
        points.sort(key=lambda x: x[1])

        ts = timeseries.Timeseries()
        ts.MultiAppend(points)

        if args.metric not in self.GAUGE_METRICS:
            ts.MakeIncreasing()

        if len(stat_values) > self.MAX_SAMPLES:
            sampling_interval = rdfvalue.Duration.FromSeconds(
                ((end_time - start_time).seconds // self.MAX_SAMPLES) or 1)
            if args.metric in self.GAUGE_METRICS:
                mode = timeseries.NORMALIZE_MODE_GAUGE
            else:
                mode = timeseries.NORMALIZE_MODE_COUNTER

            ts.Normalize(sampling_interval, start_time, end_time, mode=mode)

        result = ApiGetClientLoadStatsResult()
        for value, timestamp in ts.data:
            dp = api_stats.ApiStatsStoreMetricDataPoint(timestamp=timestamp,
                                                        value=value)
            result.data_points.append(dp)

        return result
コード例 #6
0
    def Handle(self, args, context=None):
        start_time = args.start
        end_time = args.end

        if not end_time:
            end_time = rdfvalue.RDFDatetime.Now()

        if not start_time:
            start_time = end_time - rdfvalue.Duration.From(
                30, rdfvalue.MINUTES)

        stat_values = data_store.REL_DB.ReadClientStats(
            client_id=str(args.client_id),
            min_timestamp=start_time,
            max_timestamp=end_time)
        points = []
        for stat_value in reversed(stat_values):
            if args.metric == args.Metric.CPU_PERCENT:
                points.extend((s.cpu_percent, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_SYSTEM:
                points.extend((s.system_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_USER:
                points.extend((s.user_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.IO_READ_BYTES:
                points.extend(
                    (s.read_bytes, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_BYTES:
                points.extend((s.write_bytes, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_READ_OPS:
                points.extend(
                    (s.read_count, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_OPS:
                points.extend((s.write_count, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.NETWORK_BYTES_RECEIVED:
                points.append(
                    (stat_value.bytes_received, stat_value.timestamp))
            elif args.metric == args.Metric.NETWORK_BYTES_SENT:
                points.append((stat_value.bytes_sent, stat_value.timestamp))
            elif args.metric == args.Metric.MEMORY_PERCENT:
                points.append(
                    (stat_value.memory_percent, stat_value.timestamp))
            elif args.metric == args.Metric.MEMORY_RSS_SIZE:
                points.append((stat_value.RSS_size, stat_value.timestamp))
            elif args.metric == args.Metric.MEMORY_VMS_SIZE:
                points.append((stat_value.VMS_size, stat_value.timestamp))
            else:
                raise ValueError("Unknown metric.")

        # Points collected from "cpu_samples" and "io_samples" may not be correctly
        # sorted in some cases (as overlaps between different stat_values are
        # possible).
        points.sort(key=lambda x: x[1])

        ts = timeseries.Timeseries()
        ts.MultiAppend(points)

        if args.metric not in self.GAUGE_METRICS:
            ts.MakeIncreasing()

        if len(stat_values) > self.MAX_SAMPLES:
            sampling_interval = rdfvalue.Duration.From(
                ((end_time - start_time).ToInt(rdfvalue.SECONDS) //
                 self.MAX_SAMPLES) or 1, rdfvalue.SECONDS)
            if args.metric in self.GAUGE_METRICS:
                mode = timeseries.NORMALIZE_MODE_GAUGE
            else:
                mode = timeseries.NORMALIZE_MODE_COUNTER

            ts.Normalize(sampling_interval, start_time, end_time, mode=mode)

        result = ApiGetClientLoadStatsResult()
        for value, timestamp in ts.data:
            dp = api_stats.ApiStatsStoreMetricDataPoint(timestamp=timestamp,
                                                        value=float(value))
            result.data_points.append(dp)

        return result
コード例 #7
0
 def makeSeries(self):
     s = timeseries.Timeseries()
     for i in range(1, 101):
         s.Append(i, (i + 5) * 10000)
     return s