def _TimeSeriesFromData(self, data, attr=None): """Build time series from StatsStore data.""" series = timeseries.Timeseries() for value, timestamp in data: if attr: try: series.Append(getattr(value, attr), timestamp) except AttributeError: raise ValueError("Can't find attribute %s in value %s." % (attr, value)) else: if hasattr(value, "sum") or hasattr(value, "count"): raise ValueError( "Can't treat complext type as simple value: %s" % value) series.Append(value, timestamp) return series
def Handle(self, args, token=None): start_time = args.start end_time = args.end if not end_time: end_time = rdfvalue.RDFDatetime.Now() if not start_time: start_time = end_time - rdfvalue.Duration("30m") fd = aff4.FACTORY.Create(args.client_id.ToClientURN().Add("stats"), aff4_type=aff4_stats.ClientStats, mode="r", token=token, age=(start_time, end_time)) stat_values = list(fd.GetValuesForAttribute(fd.Schema.STATS)) points = [] for stat_value in reversed(stat_values): if args.metric == args.Metric.CPU_PERCENT: points.extend((s.cpu_percent, s.timestamp) for s in stat_value.cpu_samples) elif args.metric == args.Metric.CPU_SYSTEM: points.extend((s.system_cpu_time, s.timestamp) for s in stat_value.cpu_samples) elif args.metric == args.Metric.CPU_USER: points.extend((s.user_cpu_time, s.timestamp) for s in stat_value.cpu_samples) elif args.metric == args.Metric.IO_READ_BYTES: points.extend( (s.read_bytes, s.timestamp) for s in stat_value.io_samples) elif args.metric == args.Metric.IO_WRITE_BYTES: points.extend((s.write_bytes, s.timestamp) for s in stat_value.io_samples) elif args.metric == args.Metric.IO_READ_OPS: points.extend( (s.read_count, s.timestamp) for s in stat_value.io_samples) elif args.metric == args.Metric.IO_WRITE_OPS: points.extend((s.write_count, s.timestamp) for s in stat_value.io_samples) elif args.metric == args.Metric.NETWORK_BYTES_RECEIVED: points.append((stat_value.bytes_received, stat_value.age)) elif args.metric == args.Metric.NETWORK_BYTES_SENT: points.append((stat_value.bytes_sent, stat_value.age)) elif args.metric == args.Metric.MEMORY_PERCENT: points.append((stat_value.memory_percent, stat_value.age)) elif args.metric == args.Metric.MEMORY_RSS_SIZE: points.append((stat_value.RSS_size, stat_value.age)) elif args.metric == args.Metric.MEMORY_VMS_SIZE: points.append((stat_value.VMS_size, stat_value.age)) else: raise ValueError("Unknown metric.") # Points collected from "cpu_samples" and "io_samples" may not be correctly # sorted in some cases (as overlaps between different stat_values are # possible). points.sort(key=lambda x: x[1]) ts = timeseries.Timeseries() ts.MultiAppend(points) if args.metric not in self.GAUGE_METRICS: ts.MakeIncreasing() if len(stat_values) > self.MAX_SAMPLES: sampling_interval = rdfvalue.Duration.FromSeconds( ((end_time - start_time).seconds / self.MAX_SAMPLES) or 1) if args.metric in self.GAUGE_METRICS: mode = timeseries.NORMALIZE_MODE_GAUGE else: mode = timeseries.NORMALIZE_MODE_COUNTER ts.Normalize(sampling_interval, start_time, end_time, mode=mode) result = ApiGetClientLoadStatsResult() for value, timestamp in ts.data: dp = api_stats.ApiStatsStoreMetricDataPoint(timestamp=timestamp, value=value) result.data_points.append(dp) return result
def makeSeries(self): s = timeseries.Timeseries() for i in range(1, 101): s.Append(i, (i + 5) * 10000) return s