def serialize(self, update):
     "Generate serialized samples (id, dt, value) from a timestamped update dict."
     for ts, data in update.items():
         dt = datetime.fromtimestamp(ts, utc)
         for name, item in data.items():
             series = Series.get(self.measured_object, name, item["type"])
             yield series.id, dt, item["value"]
 def fetch_last(self, fetch_metrics):
     "Return latest datetime and dict of field names and values."
     latest, data = datetime.fromtimestamp(0, utc), {}
     for series in Series.filter(self.measured_object, name__in=fetch_metrics):
         point = Stats.latest(series.id)
         data[series.name] = point.mean
         latest = max(latest, point.dt)
     return latest, data
 def fetch_jobs(self,
                metric,
                begin,
                end,
                job,
                max_points=float("inf"),
                num_points=0):
     "Return datetimes with dicts of field names and values."
     result = collections.defaultdict(dict)
     types = set()
     begin = Stats[0].round(begin)  # exclude points from a partial sample
     end = Stats[0].round(end)  # exclude points from a partial sample
     series_ids = Series.filter(self.measured_object,
                                name__startswith="job_" +
                                metric).values("id")
     series_ids = Stats[0].objects.filter(
         id__in=series_ids, dt__gte=begin).values("id").distinct("id")
     for series in Series.filter(self.measured_object, id__in=series_ids):
         types.add(series.type)
         for point in Stats.select(series.id,
                                   begin,
                                   end,
                                   rate=True,
                                   maxlen=max_points,
                                   fixed=num_points):
             result[point.dt][series.name.split("_", 3)[-1]] = point
     assert types.issubset(Series.JOB_TYPES)
     # translate job ids into metadata
     metadata = dict((job_id, job_id) for points in result.values()
                     for job_id in points)
     if job != "id":
         for type in types:  # there should generally be only one
             metadata.update(scheduler.metadata(type, job, metadata))
     for dt in result:
         data = collections.defaultdict(lambda: Point.zero)
         for job_id, point in result[dt].items():
             data[metadata[job_id]] += point
         result[dt] = dict((key, max(0.0, data[key].mean)) for key in data)
     return dict(result)
 def fetch(self, fetch_metrics, begin, end, max_points=float('inf'), num_points=0):
     "Return datetimes with dicts of field names and values."
     result = collections.defaultdict(dict)
     types = set()
     end = Stats[0].floor(end)  # exclude points from a partial sample
     for series in Series.filter(self.measured_object, name__in=fetch_metrics):
         types.add(series.type)
         minimum = 0.0 if series.type == 'Counter' else float('-inf')
         for point in Stats.select(series.id, begin, end, rate=series.type in ('Counter', 'Derive'), maxlen=max_points, fixed=num_points):
             result[point.dt][series.name] = max(minimum, point.mean)
     # if absolute and derived values are mixed, the earliest value will be incomplete
     if result and types > set(['Gauge']) and len(result[min(result)]) < len(fetch_metrics):
         del result[min(result)]
     return dict(result)
 def names(self):
     "names of all available data series"
     return set(Series.filter(self.measured_object, type__in=Series.DATA_TYPES).values_list('name', flat=True))
 def clear(self):
     "Remove all associated series."
     for series in Series.filter(self.measured_object):
         series.delete()
         Stats.delete(series.id)