def last_timestamp(self, streams): timestamp = datetime.datetime.min if timestamp.tzinfo is None: timestamp = timestamp.replace(tzinfo=pytz.utc) for stream_id, types in streams: try: t = datastream.get_data(stream_id, datastream.Granularity.Seconds, datetime.datetime.min, datetime.datetime.max, reverse=True)[0]['t'] if t.tzinfo is None: t = t.replace(tzinfo=pytz.utc) if t > timestamp: timestamp = t except IndexError: continue return timestamp
def test_get_downsampled(self): stream = self.streams[0] # First make sure everything is downsampled. until = (stream.latest_datapoint + datetime.timedelta(minutes=10)).strftime('%Y-%m-%dT%H:%M:%S') prev = datastream.backend._time_offset datastream.backend._time_offset = datetime.timedelta(minutes=10) try: management.execute_from_command_line( [sys.argv[0], 'downsample', '--until=%s' % until]) finally: datastream.backend._time_offset = prev serializer = serializers.DatastreamSerializer() middle_time = calendar.timegm( (stream.earliest_datapoint + (stream.latest_datapoint - stream.earliest_datapoint) / 2).utctimetuple()) start_time = calendar.timegm(stream.earliest_datapoint.utctimetuple()) for granularity in ('10seconds', 'S'): # There are 360 datapoints total. for offset in (0, 11, 331): for limit in (0, 5, 40): for reverse in (True, False): # We test end in test_get_stream. for start in (None, start_time, middle_time): for exclusive in (True, False): for time_downsampler_query, time_downsampler in ( (None, None), ('m', ['mean']), ('mean', ['mean']), ('a', ['first']), ('first', ['first']), ('a,z', ['first', 'last']), ('first,z', ['first', 'last']), ('first,last', ['first', 'last']), (['a', 'z'], ['first', 'last']), (['first', 'z'], ['first', 'last']), (['first', 'last'], ['first', 'last']), ): for value_downsampler_query, value_downsampler in ( (None, None), ('m', ['mean']), ('mean', ['mean']), ('l', ['min']), ('min', ['min']), ('l,u', ['min', 'max']), ('min,u', ['min', 'max']), ('min,max', ['min', 'max']), (['l', 'u'], ['min', 'max']), (['min', 'u'], ['min', 'max']), (['min', 'max'], ['min', 'max']), ): kwargs = { 'offset': offset, 'limit': limit, } params = { 'granularity': granularity, } if reverse: params.update({'reverse': True}) if start and exclusive: params.update( {'start_exclusive': start}) elif start: params.update({'start': start}) if time_downsampler: params.update({ 'time_downsamplers': time_downsampler_query }) if value_downsampler: params.update({ 'value_downsamplers': value_downsampler_query }) kwargs.update(params) data = self.get_detail( 'stream', stream.id, **kwargs) self.assertEqual( stream.id, data.pop('id')) # We manually construct URI to make sure it is like we assume it is. self.assertEqual( u'%s%s/' % (self.resource_list_uri('stream'), stream.id), data.pop('resource_uri')) self.assertEqual( stream.tags, data.pop('tags')) self.assertItemsEqual( stream.value_downsamplers, data.pop('value_downsamplers')) self.assertItemsEqual( stream.time_downsamplers, data.pop('time_downsamplers')) self.assertEqual( stream.highest_granularity, data.pop('highest_granularity')) self.assertEqual( serializer.format_datetime( stream.earliest_datapoint), data.pop('earliest_datapoint')) self.assertEqual( serializer.format_datetime( stream.latest_datapoint), data.pop('latest_datapoint')) self.assertEqual( stream.value_type, data.pop('value_type')) if start: start_string = serializer.format_datetime( datetime.datetime. utcfromtimestamp(start)) self.assertEqual( { u'end': None, u'reverse': reverse, u'end_exclusive': None, u'start': u'0001-01-01T00:00:00Z' if not start else start_string if not exclusive else None, u'granularity': u'10seconds', u'time_downsamplers': time_downsampler, u'start_exclusive': start_string if start and exclusive else None, u'value_downsamplers': value_downsampler, }, data.pop('query_params')) stream_datapoints = datastream.get_data( stream_id=stream.id, granularity=datastream.Granularity. Seconds10, start=datetime.datetime.min if not start else datetime. datetime.utcfromtimestamp(start) if not exclusive else None, end=None, start_exclusive=datetime.datetime. utcfromtimestamp(start) if start and exclusive else None, end_exclusive=None, reverse=reverse, value_downsamplers= value_downsampler, time_downsamplers=time_downsampler, ) # We store the length before we maybe slice it in assertEqualDatapoints. stream_datapoints_length = len( stream_datapoints) self.assertEqualDatapoints( stream_datapoints, offset, limit, data.pop('datapoints'), 'granularity=%s, offset=%s, limit=%s, reverse=%s, start=%s, exclusive=%s, time_downsampler=%s, value_downsampler=%s' % (granularity, offset, limit, reverse, start, exclusive, time_downsampler, value_downsampler)) if 0 < offset < limit: previous_limit = offset else: previous_limit = limit params = '&'.join([ '%s=%s' % (key, v) for key, value in params.iteritems() for v in (value if isinstance( value, list) else [value]) ]) self.assertMetaEqual( { u'total_count': stream_datapoints_length, # For datapoints (details), limit should always be the same as we specified. u'limit': limit, u'offset': offset, u'next': u'%s?%sformat=json&limit=%s&offset=%s' % (self.resource_detail_uri( 'stream', stream.id), '%s&' % params if params else '', limit, offset + limit) if limit and stream_datapoints_length > offset + limit else None, u'previous': u'%s?%sformat=json&limit=%s&offset=%s' % (self.resource_detail_uri( 'stream', stream.id), '%s&' % params if params else '', previous_limit, offset - previous_limit) if limit and offset != 0 else None, }, data.pop('meta')) # We should check everything. self.assertEqual({}, data) # This test takes long. We output ? regularly so that Travis CI does not timeout. sys.stdout.write('?') sys.stdout.flush()
def test_get_stream(self): serializer = serializers.DatastreamSerializer() # Numeric, nominal, and graph streams. for i in (0, 3, 4): stream = self.streams[i] middle_time = calendar.timegm( (stream.earliest_datapoint + (stream.latest_datapoint - stream.earliest_datapoint) / 2).utctimetuple()) end_time = calendar.timegm(stream.latest_datapoint.utctimetuple()) # There are 721 datapoints total. for offset in (0, 11, 700): for limit in (0, 5, 40): for reverse in (True, False): # We test start in test_get_downsampled. for end in (None, middle_time, end_time): for exclusive in (True, False): kwargs = { 'offset': offset, 'limit': limit, } params = {} if reverse: params.update({'reverse': True}) if end and exclusive: params.update({'end_exclusive': end}) elif end: params.update({'end': end}) kwargs.update(params) data = self.get_detail('stream', stream.id, **kwargs) self.assertEqual(stream.id, data.pop('id')) # We manually construct URI to make sure it is like we assume it is. self.assertEqual( u'%s%s/' % (self.resource_list_uri('stream'), stream.id), data.pop('resource_uri')) self.assertEqual(stream.tags, data.pop('tags')) self.assertItemsEqual( stream.value_downsamplers, data.pop('value_downsamplers')) self.assertItemsEqual( stream.time_downsamplers, data.pop('time_downsamplers')) self.assertEqual( stream.highest_granularity, data.pop('highest_granularity')) self.assertEqual( serializer.format_datetime( stream.earliest_datapoint), data.pop('earliest_datapoint')) self.assertEqual( serializer.format_datetime( stream.latest_datapoint), data.pop('latest_datapoint')) self.assertEqual(stream.value_type, data.pop('value_type')) if end: end_string = serializer.format_datetime( datetime.datetime.utcfromtimestamp( end)) self.assertEqual( { u'end': None if not end or exclusive else end_string, u'reverse': reverse, u'end_exclusive': end_string if end and exclusive else None, u'start': u'0001-01-01T00:00:00Z', u'granularity': u'seconds', u'time_downsamplers': None, u'start_exclusive': None, u'value_downsamplers': None, }, data.pop('query_params')) stream_datapoints = datastream.get_data( stream_id=stream.id, granularity=datastream.Granularity.Seconds, start=datetime.datetime.min, end=None if not end or exclusive else datetime.datetime.utcfromtimestamp(end), start_exclusive=None, end_exclusive=datetime.datetime. utcfromtimestamp(end) if end and exclusive else None, reverse=reverse, value_downsamplers=None, time_downsamplers=None, ) # We store the length before we maybe slice it in assertEqualDatapoints. stream_datapoints_length = len( stream_datapoints) self.assertEqualDatapoints( stream_datapoints, offset, limit, data.pop('datapoints'), 'offset=%s, limit=%s, reverse=%s, end=%s, exclusive=%s' % (offset, limit, reverse, end, exclusive)) if 0 < offset < limit: previous_limit = offset else: previous_limit = limit params = '&'.join([ '%s=%s' % (k, urllib.quote(str(v))) for k, v in params.iteritems() ]) self.assertMetaEqual( { u'total_count': stream_datapoints_length, # For datapoints (details), limit should always be the same as we specified. u'limit': limit, u'offset': offset, u'next': u'%s?%sformat=json&limit=%s&offset=%s' % (self.resource_detail_uri( 'stream', stream.id), '%s&' % params if params else '', limit, offset + limit) if limit and stream_datapoints_length > offset + limit else None, u'previous': u'%s?%sformat=json&limit=%s&offset=%s' % (self.resource_detail_uri( 'stream', stream.id), '%s&' % params if params else '', previous_limit, offset - previous_limit) if limit and offset != 0 else None, }, data.pop('meta')) # We should check everything. self.assertEqual({}, data) # This test takes long. We output ? regularly so that Travis CI does not timeout. sys.stdout.write('?') sys.stdout.flush()