예제 #1
0
    def testAddRescale(self):
        s1 = timeseries.Timeseries()
        for i in range(0, 5):
            s1.Append(i, i * 1000)
        s2 = timeseries.Timeseries()
        for i in range(0, 5):
            s2.Append(2 * i, i * 1000)
        s1.Add(s2)

        for i in range(0, 5):
            self.assertEqual(3 * i, s1.data[i][0])

        s1.Rescale(1 / 3.0)
        for i in range(0, 5):
            self.assertEqual(i, s1.data[i][0])
예제 #2
0
    def testMean(self):
        s = timeseries.Timeseries()
        self.assertEqual(None, s.Mean())

        s = self.makeSeries()
        self.assertEqual(100, len(s.data))
        self.assertEqual(50, s.Mean())
예제 #3
0
  def ts(self):
    """Return single timeseries.Timeseries built by this query."""

    if self.time_series is None:
      raise RuntimeError("Time series weren't built yet.")

    if not self.time_series:
      return timeseries.Timeseries()

    return self.time_series[0]
예제 #4
0
 def testMakeIncreasing(self):
     s = timeseries.Timeseries()
     for i in range(0, 5):
         s.Append(i, i * 1000)
     for i in range(0, 5):
         s.Append(i, (i + 6) * 1000)
     self.assertEqual(10, len(s.data))
     self.assertEqual([4, 10000], s.data[-1])
     s.MakeIncreasing()
     self.assertEqual(10, len(s.data))
     self.assertEqual([8, 10000], s.data[-1])
예제 #5
0
 def testNormalizeFillsGapsWithNone(self):
     s = timeseries.Timeseries()
     for i in range(21, 51):
         s.Append(i, (i + 5) * 10000)
     for i in range(81, 101):
         s.Append(i, (i + 5) * 10000)
     s.Normalize(10 * 10000, 10 * 10000, 120 * 10000)
     self.assertEqual(11, len(s.data))
     self.assertEqual([None, 100000], s.data[0])
     self.assertEqual([22.5, 200000], s.data[1])
     self.assertEqual([None, 600000], s.data[5])
     self.assertEqual([None, 1100000], s.data[-1])
예제 #6
0
    def testNormalize(self):
        s = self.makeSeries()
        s.Normalize(10 * 10000, 100000, 600000)
        self.assertEqual(5, len(s.data))
        self.assertEqual([9.5, 100000], s.data[0])
        self.assertEqual([49.5, 500000], s.data[-1])

        s = timeseries.Timeseries()
        for i in range(0, 1000):
            s.Append(0.5, i * 10)
        s.Normalize(200, 5000, 10000)
        self.assertEqual(25, len(s.data))
        self.assertListEqual(s.data[0], [0.5, 5000])
        self.assertListEqual(s.data[24], [0.5, 9800])

        s = timeseries.Timeseries()
        for i in range(0, 1000):
            s.Append(i, i * 10)
        s.Normalize(200, 5000, 10000, mode=timeseries.NORMALIZE_MODE_COUNTER)
        self.assertEqual(25, len(s.data))
        self.assertListEqual(s.data[0], [519, 5000])
        self.assertListEqual(s.data[24], [999, 9800])
예제 #7
0
  def _TimeSeriesFromData(self, data, attr=None):
    """Build time series from StatsStore data."""

    series = timeseries.Timeseries()

    for value, timestamp in data:
      if attr:
        try:
          series.Append(getattr(value, attr), timestamp)
        except AttributeError:
          raise ValueError("Can't find attribute %s in value %s." % (attr,
                                                                     value))
      else:
        if hasattr(value, "sum") or hasattr(value, "count"):
          raise ValueError("Can't treat complext type as simple value: %s" %
                           value)
        series.Append(value, timestamp)

    return series
예제 #8
0
    def testToDeltas(self):
        s = self.makeSeries()
        self.assertEqual(100, len(s.data))
        s.ToDeltas()
        self.assertEqual(99, len(s.data))
        self.assertEqual([1, 60000], s.data[0])
        self.assertEqual([1, 1040000], s.data[-1])

        s = timeseries.Timeseries()
        for i in range(0, 1000):
            s.Append(i, i * 1e6)
        s.Normalize(20 * 1e6,
                    500 * 1e6,
                    1000 * 1e6,
                    mode=timeseries.NORMALIZE_MODE_COUNTER)
        self.assertEqual(25, len(s.data))
        self.assertListEqual(s.data[0], [519, int(500 * 1e6)])
        s.ToDeltas()
        self.assertEqual(24, len(s.data))
        self.assertListEqual(s.data[0], [20, int(500 * 1e6)])
        self.assertListEqual(s.data[23], [20, int(960 * 1e6)])
예제 #9
0
    def Handle(self, args, token=None):
        start_time = args.start
        end_time = args.end

        if not end_time:
            end_time = rdfvalue.RDFDatetime.Now()

        if not start_time:
            start_time = end_time - rdfvalue.Duration("30m")

        fd = aff4.FACTORY.Create(args.client_id.ToClientURN().Add("stats"),
                                 aff4_type=aff4_stats.ClientStats,
                                 mode="r",
                                 token=token,
                                 age=(start_time, end_time))

        stat_values = list(fd.GetValuesForAttribute(fd.Schema.STATS))
        points = []
        for stat_value in reversed(stat_values):
            if args.metric == args.Metric.CPU_PERCENT:
                points.extend((s.cpu_percent, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_SYSTEM:
                points.extend((s.system_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_USER:
                points.extend((s.user_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.IO_READ_BYTES:
                points.extend(
                    (s.read_bytes, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_BYTES:
                points.extend((s.write_bytes, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_READ_OPS:
                points.extend(
                    (s.read_count, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_OPS:
                points.extend((s.write_count, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.NETWORK_BYTES_RECEIVED:
                points.append((stat_value.bytes_received, stat_value.age))
            elif args.metric == args.Metric.NETWORK_BYTES_SENT:
                points.append((stat_value.bytes_sent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_PERCENT:
                points.append((stat_value.memory_percent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_RSS_SIZE:
                points.append((stat_value.RSS_size, stat_value.age))
            elif args.metric == args.Metric.MEMORY_VMS_SIZE:
                points.append((stat_value.VMS_size, stat_value.age))
            else:
                raise ValueError("Unknown metric.")

        # Points collected from "cpu_samples" and "io_samples" may not be correctly
        # sorted in some cases (as overlaps between different stat_values are
        # possible).
        points.sort(key=lambda x: x[1])

        ts = timeseries.Timeseries()
        ts.MultiAppend(points)

        if args.metric not in self.GAUGE_METRICS:
            ts.MakeIncreasing()

        if len(stat_values) > self.MAX_SAMPLES:
            sampling_interval = rdfvalue.Duration.FromSeconds(
                ((end_time - start_time).seconds / self.MAX_SAMPLES) or 1)
            if args.metric in self.GAUGE_METRICS:
                mode = timeseries.NORMALIZE_MODE_GAUGE
            else:
                mode = timeseries.NORMALIZE_MODE_COUNTER

            ts.Normalize(sampling_interval, start_time, end_time, mode=mode)

        result = ApiGetClientLoadStatsResult()
        for value, timestamp in ts.data:
            dp = api_stats.ApiStatsStoreMetricDataPoint(timestamp=timestamp,
                                                        value=value)
            result.data_points.append(dp)

        return result
예제 #10
0
 def makeSeries(self):
     s = timeseries.Timeseries()
     for i in range(1, 101):
         s.Append(i, (i + 5) * 10000)
     return s