def get(self, ms_id=None): if ms_id: return self._get_aggregates(ms_id) msets = MetricSetDao().get_all() return { 'metricSets': [MetricSetDao.to_client_json(ms) for ms in msets] }
def _get_aggregates(self, ms_id): keyset = set() for key in request.args.getlist('keys'): if key not in MetricsKey.to_dict(): raise exceptions.BadRequest( 'unknown metrics key {}'.format(key)) keyset.add(MetricsKey.lookup_by_name(key)) ms = MetricSetDao().get(ms_id) if not ms: raise exceptions.NotFound( 'metric set "{}" not found'.format(ms_id)) if not keyset.issubset(METRIC_SET_KEYS[ms.metricSetType]): raise exceptions.BadRequest( 'unexpected metric keys for metric set of type {}: {}'.format( ms.metricSetType.name, [ k.name for k in keyset - METRIC_SET_KEYS[ms.metricSetType] ])) aggs = AggregateMetricsDao().get_all_for_metric_set(ms_id) if keyset: aggs = [agg for agg in aggs if agg.metricsKey in keyset] return {'metrics': AggregateMetricsDao.to_client_json(aggs)}
def test_metrics_update(self): self._create_data() with FakeClock(TIME): PublicMetricsExport.export('123') aggs1 = [a.asdict() for a in AggregateMetricsDao().get_all()] with FakeClock(TIME2): PublicMetricsExport.export('123') aggs2 = [a.asdict() for a in AggregateMetricsDao().get_all()] self.assertEquals(TIME2, MetricSetDao().get('123').lastModified) self.assertEquals(aggs1, aggs2)
def save(session): aggs = [] MetricSetDao().upsert_with_session(session, ms) agg_dao = AggregateMetricsDao() agg_dao.delete_all_for_metric_set_with_session(session, metric_set_id) for (k, vals) in metrics.iteritems(): for v in vals: agg = AggregateMetrics( metricSetId=metric_set_id, metricsKey=k, value=v['value'], count=v['count'] ) agg_dao.insert_with_session(session, agg) aggs.append(agg) return aggs
def setUp(self): super(MetricSetDaoTest, self).setUp() self.metric_set_dao = MetricSetDao() self.aggregate_metrics_dao = AggregateMetricsDao()
class MetricSetDaoTest(SqlTestBase): def setUp(self): super(MetricSetDaoTest, self).setUp() self.metric_set_dao = MetricSetDao() self.aggregate_metrics_dao = AggregateMetricsDao() def create_metric_set(self, ms_id): ms = MetricSet( metricSetId=ms_id, metricSetType=MetricSetType.PUBLIC_PARTICIPANT_AGGREGATIONS, lastModified=datetime.datetime(2017, 1, 1)) self.metric_set_dao.insert(ms) return ms def test_get(self): self.assertIsNone(self.metric_set_dao.get('123')) ms = self.create_metric_set('123') self.assertEquals(ms.asdict(), self.metric_set_dao.get(ms.metricSetId).asdict()) agg = AggregateMetrics(metricSetId=ms.metricSetId, metricsKey=MetricsKey.GENDER, value="female", count=123) agg_key = (agg.metricSetId, agg.metricsKey, agg.value) self.assertIsNone(self.aggregate_metrics_dao.get(agg_key)) self.aggregate_metrics_dao.insert(agg) self.assertEquals(agg.asdict(), self.aggregate_metrics_dao.get(agg_key).asdict()) def test_get_all_for_metric_set(self): ms1 = self.create_metric_set('123') ms2 = self.create_metric_set('456') agg1 = AggregateMetrics(metricSetId=ms1.metricSetId, metricsKey=MetricsKey.GENDER, value="female", count=123) agg2 = AggregateMetrics(metricSetId=ms2.metricSetId, metricsKey=MetricsKey.GENDER, value="male", count=987) self.aggregate_metrics_dao.insert(agg1) self.aggregate_metrics_dao.insert(agg2) aggs = self.aggregate_metrics_dao.get_all_for_metric_set( ms1.metricSetId) self.assertEquals([agg1.asdict()], [a.asdict() for a in aggs]) aggs = self.aggregate_metrics_dao.get_all_for_metric_set( ms2.metricSetId) self.assertEquals([agg2.asdict()], [a.asdict() for a in aggs]) def test_delete_all_for_metric_set(self): ms1 = self.create_metric_set('123') ms2 = self.create_metric_set('456') self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId=ms1.metricSetId, metricsKey=MetricsKey.GENDER, value="female", count=123)) self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId=ms2.metricSetId, metricsKey=MetricsKey.GENDER, value="male", count=987)) self.assertEquals(2, len(self.aggregate_metrics_dao.get_all())) self.aggregate_metrics_dao.delete_all_for_metric_set(ms1.metricSetId) aggs = self.aggregate_metrics_dao.get_all() self.assertEquals(1, len(aggs)) self.assertEquals(ms2.metricSetId, aggs[0].metricSetId) self.aggregate_metrics_dao.delete_all_for_metric_set(ms1.metricSetId) aggs = self.aggregate_metrics_dao.get_all() self.assertEquals(1, len(aggs))
class MetricSetsApiTest(FlaskTestBase): def setUp(self): super(MetricSetsApiTest, self).setUp() self.metric_set_dao = MetricSetDao() self.aggregate_metrics_dao = AggregateMetricsDao() def create_metric_set(self, ms_id): ms = MetricSet( metricSetId=ms_id, metricSetType=MetricSetType.PUBLIC_PARTICIPANT_AGGREGATIONS, lastModified=datetime.datetime(2017, 1, 1)) self.metric_set_dao.insert(ms) return ms @unittest.skip("uses old metrics v1") def test_get_metric_sets_no_data(self): response = self.send_get('MetricSets') self.assertEquals({'metricSets': []}, response) @unittest.skip("uses old metrics v1") def test_get_metric_sets(self): self.create_metric_set('live1') self.create_metric_set('live2') response = self.send_get('MetricSets') self.assertItemsEqual(['live1', 'live2'], [ms['id'] for ms in response['metricSets']]) @parameterized.expand([ param('empty', ms_id='empty', want={}), param('all', want={ 'GENDER': [{ 'value': 'female', 'count': 123 }, { 'value': 'male', 'count': 789 }], 'STATE': [{ 'value': 'NJ', 'count': 789 }, { 'value': 'CA', 'count': 123 }] }), param('proper key subset', keys=['STATE'], want={ 'STATE': [{ 'value': 'NJ', 'count': 789 }, { 'value': 'CA', 'count': 123 }] }), param('overlapping key subset', keys=['STATE', 'AGE_RANGE'], want={ 'STATE': [{ 'value': 'NJ', 'count': 789 }, { 'value': 'CA', 'count': 123 }] }), param('non-matching subset', keys=['AGE_RANGE'], want={}), ]) @unittest.skip("uses old metrics v1") def test_get_metrics(self, _, ms_id='live', keys=None, want=None): self.create_metric_set('empty') self.create_metric_set('live') self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId='live', metricsKey=MetricsKey.GENDER, value="female", count=123)) self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId='live', metricsKey=MetricsKey.GENDER, value="male", count=789)) self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId='live', metricsKey=MetricsKey.STATE, value="NJ", count=789)) self.aggregate_metrics_dao.insert( AggregateMetrics(metricSetId='live', metricsKey=MetricsKey.STATE, value="CA", count=123)) q = {'keys': keys} if keys else None got = self.send_get('MetricSets/{}/Metrics'.format(ms_id), query_string=q)['metrics'] self.assertEquals( len(want), len(got), 'got unexpected number of metrics:' '\nwant: {}\ngot: {}'.format(want, got)) for m in got: self.assertIn(m['key'], want) self.assertItemsEqual(m['values'], want[m['key']]) @unittest.skip("uses old metrics v1") def test_get_metrics_bad_keys(self): self.create_metric_set('live') self.send_get('MetricSets/live/Metrics', query_string={'keys': 'mugman'}, expected_status=400) def test_get_metrics_nonexistent(self): self.send_get('MetricSets/unknown/Metrics', expected_status=404)