def _get_aggregates(self, ms_id): keyset = set() for key in request.args.getlist('keys'): if key not in MetricsKey.to_dict(): raise exceptions.BadRequest( 'unknown metrics key {}'.format(key)) keyset.add(MetricsKey.lookup_by_name(key)) ms = MetricSetDao().get(ms_id) if not ms: raise exceptions.NotFound( 'metric set "{}" not found'.format(ms_id)) if not keyset.issubset(METRIC_SET_KEYS[ms.metricSetType]): raise exceptions.BadRequest( 'unexpected metric keys for metric set of type {}: {}'.format( ms.metricSetType.name, [ k.name for k in keyset - METRIC_SET_KEYS[ms.metricSetType] ])) aggs = AggregateMetricsDao().get_all_for_metric_set(ms_id) if keyset: aggs = [agg for agg in aggs if agg.metricsKey in keyset] return {'metrics': AggregateMetricsDao.to_client_json(aggs)}
def recalculate_public_metrics(): logging.info('generating public metrics') aggs = PublicMetricsExport.export(LIVE_METRIC_SET_ID) client_aggs = AggregateMetricsDao.to_client_json(aggs) # summing all counts for one metric yields a total qualified participant count participant_count = 0 if len(client_aggs) > 0: participant_count = sum([a['count'] for a in client_aggs[0]['values']]) logging.info('persisted public metrics: {} aggregations over ' '{} participants'.format(len(client_aggs), participant_count)) # Same format returned by the metric sets API. return json.dumps({'metrics': client_aggs})