def _get_aggregates(self, ms_id): keyset = set() for key in request.args.getlist('keys'): if key not in MetricsKey.to_dict(): raise exceptions.BadRequest( 'unknown metrics key {}'.format(key)) keyset.add(MetricsKey.lookup_by_name(key)) ms = MetricSetDao().get(ms_id) if not ms: raise exceptions.NotFound( 'metric set "{}" not found'.format(ms_id)) if not keyset.issubset(METRIC_SET_KEYS[ms.metricSetType]): raise exceptions.BadRequest( 'unexpected metric keys for metric set of type {}: {}'.format( ms.metricSetType.name, [ k.name for k in keyset - METRIC_SET_KEYS[ms.metricSetType] ])) aggs = AggregateMetricsDao().get_all_for_metric_set(ms_id) if keyset: aggs = [agg for agg in aggs if agg.metricsKey in keyset] return {'metrics': AggregateMetricsDao.to_client_json(aggs)}
def test_metrics_update(self): self._create_data() with FakeClock(TIME): PublicMetricsExport.export('123') aggs1 = [a.asdict() for a in AggregateMetricsDao().get_all()] with FakeClock(TIME2): PublicMetricsExport.export('123') aggs2 = [a.asdict() for a in AggregateMetricsDao().get_all()] self.assertEquals(TIME2, MetricSetDao().get('123').lastModified) self.assertEquals(aggs1, aggs2)
def recalculate_public_metrics(): logging.info('generating public metrics') aggs = PublicMetricsExport.export(LIVE_METRIC_SET_ID) client_aggs = AggregateMetricsDao.to_client_json(aggs) # summing all counts for one metric yields a total qualified participant count participant_count = 0 if len(client_aggs) > 0: participant_count = sum([a['count'] for a in client_aggs[0]['values']]) logging.info('persisted public metrics: {} aggregations over ' '{} participants'.format(len(client_aggs), participant_count)) # Same format returned by the metric sets API. return json.dumps({'metrics': client_aggs})
def assert_total_count_per_key(self, want_total_count): agg_by_key = {} for agg in AggregateMetricsDao().get_all(): if agg.metricsKey not in agg_by_key: agg_by_key[agg.metricsKey] = [] agg_by_key[agg.metricsKey].append(agg) self.assertNotEquals(0, len(agg_by_key), 'no metrics were persisted') for (k, aggs) in agg_by_key.iteritems(): count = sum([agg.count for agg in aggs]) self.assertEquals(want_total_count, count, ('metric {} must contain aggregates over exactly ' 'the set of {} qualified participants, got {}').format( k, want_total_count, count))
def save(session): aggs = [] MetricSetDao().upsert_with_session(session, ms) agg_dao = AggregateMetricsDao() agg_dao.delete_all_for_metric_set_with_session(session, metric_set_id) for (k, vals) in metrics.iteritems(): for v in vals: agg = AggregateMetrics( metricSetId=metric_set_id, metricsKey=k, value=v['value'], count=v['count'] ) agg_dao.insert_with_session(session, agg) aggs.append(agg) return aggs
def setUp(self): super(MetricSetDaoTest, self).setUp() self.metric_set_dao = MetricSetDao() self.aggregate_metrics_dao = AggregateMetricsDao()
class MetricSetDaoTest(SqlTestBase): def setUp(self): super(MetricSetDaoTest, self).setUp() self.metric_set_dao = MetricSetDao() self.aggregate_metrics_dao = AggregateMetricsDao() def create_metric_set(self, ms_id): ms = MetricSet( metricSetId=ms_id, metricSetType=MetricSetType.PUBLIC_PARTICIPANT_AGGREGATIONS, lastModified=datetime.datetime(2017, 1, 1)) self.metric_set_dao.insert(ms) return ms def test_get(self): self.assertIsNone(self.metric_set_dao.get('123')) ms = self.create_metric_set('123') self.assertEquals(ms.asdict(), self.metric_set_dao.get(ms.metricSetId).asdict()) agg = AggregateMetrics(metricSetId=ms.metricSetId, metricsKey=MetricsKey.GENDER, value="female", count=123) agg_key = (agg.metricSetId, agg.metricsKey, agg.value) self.assertIsNone(self.aggregate_metrics_dao.get(agg_key)) self.aggregate_metrics_dao.insert(agg) self.assertEquals(agg.asdict(), self.aggregate_metrics_dao.get(agg_key).asdict()) def test_get_all_for_metric_set(self): ms1 = self.create_metric_set('123') ms2 = self.create_metric_set('456') agg1 = AggregateMetrics(metricSetId=ms1.metricSetId, metricsKey=MetricsKey.GENDER, value="female", count=123) agg2 = AggregateMetrics(metricSetId=ms2.metricSetId, metricsKey=MetricsKey.GENDER, value="male", count=987) self.aggregate_metrics_dao.insert(agg1) self.aggregate_metrics_dao.insert(agg2) aggs = self.aggregate_metrics_dao.get_all_for_metric_set( ms1.metricSetId) self.assertEquals([agg1.asdict()], [a.asdict() for a in aggs]) aggs = self.aggregate_metrics_dao.get_all_for_metric_set( ms2.metricSetId) self.assertEquals([agg2.asdict()], [a.asdict() for a in aggs]) def test_delete_all_for_metric_set(self): ms1 = self.create_metric_set('123') ms2 = self.create_metric_set('456') self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId=ms1.metricSetId, metricsKey=MetricsKey.GENDER, value="female", count=123)) self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId=ms2.metricSetId, metricsKey=MetricsKey.GENDER, value="male", count=987)) self.assertEquals(2, len(self.aggregate_metrics_dao.get_all())) self.aggregate_metrics_dao.delete_all_for_metric_set(ms1.metricSetId) aggs = self.aggregate_metrics_dao.get_all() self.assertEquals(1, len(aggs)) self.assertEquals(ms2.metricSetId, aggs[0].metricSetId) self.aggregate_metrics_dao.delete_all_for_metric_set(ms1.metricSetId) aggs = self.aggregate_metrics_dao.get_all() self.assertEquals(1, len(aggs))
class MetricSetsApiTest(FlaskTestBase): def setUp(self): super(MetricSetsApiTest, self).setUp() self.metric_set_dao = MetricSetDao() self.aggregate_metrics_dao = AggregateMetricsDao() def create_metric_set(self, ms_id): ms = MetricSet( metricSetId=ms_id, metricSetType=MetricSetType.PUBLIC_PARTICIPANT_AGGREGATIONS, lastModified=datetime.datetime(2017, 1, 1)) self.metric_set_dao.insert(ms) return ms @unittest.skip("uses old metrics v1") def test_get_metric_sets_no_data(self): response = self.send_get('MetricSets') self.assertEquals({'metricSets': []}, response) @unittest.skip("uses old metrics v1") def test_get_metric_sets(self): self.create_metric_set('live1') self.create_metric_set('live2') response = self.send_get('MetricSets') self.assertItemsEqual(['live1', 'live2'], [ms['id'] for ms in response['metricSets']]) @parameterized.expand([ param('empty', ms_id='empty', want={}), param('all', want={ 'GENDER': [{ 'value': 'female', 'count': 123 }, { 'value': 'male', 'count': 789 }], 'STATE': [{ 'value': 'NJ', 'count': 789 }, { 'value': 'CA', 'count': 123 }] }), param('proper key subset', keys=['STATE'], want={ 'STATE': [{ 'value': 'NJ', 'count': 789 }, { 'value': 'CA', 'count': 123 }] }), param('overlapping key subset', keys=['STATE', 'AGE_RANGE'], want={ 'STATE': [{ 'value': 'NJ', 'count': 789 }, { 'value': 'CA', 'count': 123 }] }), param('non-matching subset', keys=['AGE_RANGE'], want={}), ]) @unittest.skip("uses old metrics v1") def test_get_metrics(self, _, ms_id='live', keys=None, want=None): self.create_metric_set('empty') self.create_metric_set('live') self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId='live', metricsKey=MetricsKey.GENDER, value="female", count=123)) self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId='live', metricsKey=MetricsKey.GENDER, value="male", count=789)) self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId='live', metricsKey=MetricsKey.STATE, value="NJ", count=789)) self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId='live', metricsKey=MetricsKey.STATE, value="CA", count=123)) q = {'keys': keys} if keys else None got = self.send_get('MetricSets/{}/Metrics'.format(ms_id), query_string=q)['metrics'] self.assertEquals( len(want), len(got), 'got unexpected number of metrics:' '\nwant: {}\ngot: {}'.format(want, got)) for m in got: self.assertIn(m['key'], want) self.assertItemsEqual(m['values'], want[m['key']]) @unittest.skip("uses old metrics v1") def test_get_metrics_bad_keys(self): self.create_metric_set('live') self.send_get('MetricSets/live/Metrics', query_string={'keys': 'mugman'}, expected_status=400) def test_get_metrics_nonexistent(self): self.send_get('MetricSets/unknown/Metrics', expected_status=404)