def testAggregationNoValues(self): """Single metric with no values.""" slices = ( InstanceMetricData("id", ()), ) result = aggregation.aggregate(slices) self.assertEqual(len(result), 0)
def testAggregationSingleValue(self): """Single metric with single value.""" timestamp = datetime.datetime.utcnow() slices = (InstanceMetricData("id", (MetricRecord(timestamp, 100.0), )), ) result = aggregation.aggregate(slices) self.assertEqual(len(result), 1) self.assertIsInstance(result[0], tuple) self.assertSequenceEqual(result[0], (timestamp, 100.0))
def testAggregationSingleValue(self): """Single metric with single value.""" timestamp = datetime.datetime.utcnow() slices = ( InstanceMetricData("id", ( MetricRecord(timestamp, 100.0), )), ) result = aggregation.aggregate(slices) self.assertEqual(len(result), 1) self.assertIsInstance(result[0], tuple) self.assertSequenceEqual(result[0], (timestamp, 100.0))
def testAggregationMultipleValues(self): """Single metric with multiple values at different timestamps.""" timestamp2 = datetime.datetime.utcnow() timestamp1 = timestamp2 - datetime.timedelta(minutes=5) slices = (InstanceMetricData("id", ( MetricRecord(timestamp1, 100.0), MetricRecord(timestamp2, 50.0), )), ) result = aggregation.aggregate(slices) self.assertEqual(len(result), 2) self.assertIsInstance(result[0], tuple) self.assertIsInstance(result[1], tuple) self.assertSequenceEqual(result[0], (timestamp1, 100.0)) self.assertSequenceEqual(result[1], (timestamp2, 50.0))
def testAggregationMultipleValues(self): """Single metric with multiple values at different timestamps.""" timestamp2 = datetime.datetime.utcnow() timestamp1 = timestamp2 - datetime.timedelta(minutes=5) slices = ( InstanceMetricData("id", ( MetricRecord(timestamp1, 100.0), MetricRecord(timestamp2, 50.0), )), ) result = aggregation.aggregate(slices) self.assertEqual(len(result), 2) self.assertIsInstance(result[0], tuple) self.assertIsInstance(result[1], tuple) self.assertSequenceEqual(result[0], (timestamp1, 100.0)) self.assertSequenceEqual(result[1], (timestamp2, 50.0))
def testAggregationMultipleMetricsAligned(self): """Multiple metrics with matching timestamps.""" timestamp2 = datetime.datetime.utcnow() timestamp1 = timestamp2 - datetime.timedelta(minutes=5) slices = ( InstanceMetricData("id1", ( MetricRecord(timestamp1, 100.0), MetricRecord(timestamp2, 50.0), )), InstanceMetricData("id2", ( MetricRecord(timestamp1, 80.0), MetricRecord(timestamp2, 30.0), )), ) result = aggregation.aggregate(slices) self.assertEqual(len(result), 2) self.assertIsInstance(result[0], tuple) self.assertIsInstance(result[1], tuple) self.assertSequenceEqual(result[0], (timestamp1, 90.0)) self.assertSequenceEqual(result[1], (timestamp2, 40.0))
def testAggregationMultipleMetricsAlignedSum(self): """Multiple metrics with matching timestamps.""" timestamp2 = datetime.datetime.utcnow() timestamp1 = timestamp2 - datetime.timedelta(minutes=5) slices = ( InstanceMetricData("id1", ( MetricRecord(timestamp1, 100.0), MetricRecord(timestamp2, 50.0), )), InstanceMetricData("id2", ( MetricRecord(timestamp1, 80.0), MetricRecord(timestamp2, 30.0), )), ) result = aggregation.aggregate(slices, aggregationFn=sum) self.assertEqual(len(result), 2) self.assertIsInstance(result[0], tuple) self.assertIsInstance(result[1], tuple) self.assertSequenceEqual(result[0], (timestamp1, 180.0)) self.assertSequenceEqual(result[1], (timestamp2, 80.0))
def testAggregationMultipleMetricsMisaligned(self): """Multiple metrics with both matching and non-matching timestamps.""" timestamp3 = datetime.datetime.utcnow() timestamp2 = timestamp3 - datetime.timedelta(minutes=5) timestamp1 = timestamp2 - datetime.timedelta(minutes=5) slices = ( InstanceMetricData("id1", ( MetricRecord(timestamp1, 100.0), MetricRecord(timestamp2, 50.0), )), InstanceMetricData("id2", ( MetricRecord(timestamp2, 80.0), MetricRecord(timestamp3, 30.0), )), ) result = aggregation.aggregate(slices) self.assertEqual(len(result), 3) self.assertIsInstance(result[0], tuple) self.assertIsInstance(result[1], tuple) self.assertIsInstance(result[2], tuple) self.assertSequenceEqual(result[0], (timestamp1, 100.0)) self.assertSequenceEqual(result[1], (timestamp2, 65.0)) self.assertSequenceEqual(result[2], (timestamp3, 30.0))
def testAggregationEmptyTuple(self): """No values in input tuple.""" result = aggregation.aggregate(()) self.assertSequenceEqual(result, ())
def testAggregationEmptyList(self): """No values in input list.""" result = aggregation.aggregate([]) self.assertSequenceEqual(result, ())
def _processAutostackMetricRequests(self, engine, requests, modelSwapper): """ Execute autostack metric requests, aggregate and stream collected metric data :param engine: SQLAlchemy engine object :type engine: sqlalchemy.engine.Engine :param requests: sequence of AutostackMetricRequest objects :param modelSwapper: Model Swapper """ # Start collecting requested metric data collectionIter = self._metricGetter.collectMetricData(requests) # Aggregate each collection and dispatch to app MetricStreamer for metricCollection in collectionIter: request = requests[metricCollection.refID] metricObj = request.metric data = None if metricCollection.slices: aggregationFn = getAggregationFn(metricObj) if aggregationFn: data = aggregate(metricCollection.slices, aggregationFn=aggregationFn) else: data = aggregate(metricCollection.slices) try: with engine.connect() as conn: repository.retryOnTransientErrors(repository.setMetricLastTimestamp)( conn, metricObj.uid, metricCollection.nextMetricTime) except ObjectNotFoundError: self._log.warning("Processing autostack data collection results for " "unknown model=%s (model deleted?)", metricObj.uid) continue if data: try: self.metricStreamer.streamMetricData(data, metricID=metricObj.uid, modelSwapper=modelSwapper) except ObjectNotFoundError: # We expect that the model exists but in the odd case that it has # already been deleted we don't want to crash the process. self._log.info("Metric not found when adding data. metric=%s" % metricObj.uid) self._log.debug( "{TAG:APP.AGG.DATA.PUB} Published numItems=%d for metric=%s;" "timeRange=[%sZ-%sZ]; headTS=%sZ; tailTS=%sZ", len(data), getMetricLogPrefix(metricObj), metricCollection.timeRange.start.isoformat(), metricCollection.timeRange.end.isoformat(), data[0][0].isoformat(), data[-1][0].isoformat()) else: self._log.info( "{TAG:APP.AGG.DATA.NONE} No data for metric=%s;" "timeRange=[%sZ-%sZ]", getMetricLogPrefix(metricObj), metricCollection.timeRange.start.isoformat(), metricCollection.timeRange.end.isoformat())
def _processAutostackMetricRequests(self, engine, requests, modelSwapper): """ Execute autostack metric requests, aggregate and stream collected metric data :param engine: SQLAlchemy engine object :type engine: sqlalchemy.engine.Engine :param requests: sequence of AutostackMetricRequest objects :param modelSwapper: Model Swapper """ # Start collecting requested metric data collectionIter = self._metricGetter.collectMetricData(requests) # Aggregate each collection and dispatch to app MetricStreamer for metricCollection in collectionIter: request = requests[metricCollection.refID] metricObj = request.metric data = None if metricCollection.slices: aggregationFn = getAggregationFn(metricObj) if aggregationFn: data = aggregate(metricCollection.slices, aggregationFn=aggregationFn) else: data = aggregate(metricCollection.slices) try: with engine.connect() as conn: repository.retryOnTransientErrors( repository.setMetricLastTimestamp)( conn, metricObj.uid, metricCollection.nextMetricTime) except ObjectNotFoundError: self._log.warning( "Processing autostack data collection results for " "unknown model=%s (model deleted?)", metricObj.uid) continue if data: try: self.metricStreamer.streamMetricData( data, metricID=metricObj.uid, modelSwapper=modelSwapper) except ObjectNotFoundError: # We expect that the model exists but in the odd case that it has # already been deleted we don't want to crash the process. self._log.info( "Metric not found when adding data. metric=%s" % metricObj.uid) self._log.debug( "{TAG:APP.AGG.DATA.PUB} Published numItems=%d for metric=%s;" "timeRange=[%sZ-%sZ]; headTS=%sZ; tailTS=%sZ", len(data), getMetricLogPrefix(metricObj), metricCollection.timeRange.start.isoformat(), metricCollection.timeRange.end.isoformat(), data[0][0].isoformat(), data[-1][0].isoformat()) else: self._log.info( "{TAG:APP.AGG.DATA.NONE} No data for metric=%s;" "timeRange=[%sZ-%sZ]", getMetricLogPrefix(metricObj), metricCollection.timeRange.start.isoformat(), metricCollection.timeRange.end.isoformat())