def test_history_performance(): try: tracemalloc.start() except: pass for _ in range( 3 ): path = "/tmp/test_performance_%d" % random.randint( 100000, 999999 ) if os.path.exists( path ): continue assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path files = [] try: day = 24*60*60 dur = 3*day # a few days worth of data regstps = 0.0,5.0 # 0-5secs between updates numfiles = dur//day+1 # ~1 file/day, but at least 2 values = {} # Initial register values regscount = 1000 # Number of different registers regschanged = 1,10 # From 1-25 registers per row regsbase = 40001 start = timer() now = beg = start - dur linecnt = 0 for e in reversed( range( numfiles )): f = path + (( '.%d' % e ) if e else '') # 0'th file has no extension files.append( f ) with logger( f ) as l: if values: l.write( values, now=now ); linecnt += 1 while now < beg + len(files) * dur/numfiles: lst = now now += random.uniform( *regstps ) assert now >= lst assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now )) updates = {} for _ in range( random.randint( *regschanged )): updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 ) values.update( updates ) l.write( updates, now=now ); linecnt += 1 lst = now now += random.uniform( *regstps ) assert now >= lst assert timestamp( now ) >= timestamp( lst ) if e: # Compress .1 onward using a random format; randomly delete origin uncompressed file # so sometimes both files exist if random.choice( (True, False, False, False) ): continue # Don't make a compressed version of some files fz = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') ) files.append( fz ) with opener( fz, mode='wb' ) as fd: with open( f, 'rb' ) as rd: fd.write( rd.read() ) if random.choice( (True, False, False) ): continue # Don't remove some of the uncompressed files os.unlink( f ) files.pop( files.index( f )) logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt ) # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to # find the first file. Try to do it all in the next 'playback' second (just to push it to # the max), in 'chunks' pieces. historical = timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 )) basis = timer() playback = 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU chunks = 1000 factor = dur / playback lookahead = 60.0 duration = None if random.choice( (True,False) ): duration = random.uniform( dur * 98/100, dur * 102/100 ) begoff = historical.value - beg endoff = 0 if duration is None else (( historical.value + duration ) - ( beg + dur )) logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s", timestamp( beg ), format_offset( begoff, ms=False ), None if duration is None else format_offset( duration, ms=False, symbols='-+' ), timestamp( beg + dur ), format_offset( endoff, ms=False )) ld = loader( path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration ) eventcnt = 0 slept = 0 cur = None while ld: once = False while ld.state < ld.AWAITING or not once: once = True upcoming = None limit = random.randint( 0, 250 ) if random.choice( (True,False) ): upcoming = ld.advance() if random.choice( (True,False) ) and cur: # ~25% of the time, provide an 'upcoming' timestamp that is between the # current advancing historical time and the last load time. upcoming-= random.uniform( 0, upcoming.value - cur.value ) cur,events = ld.load( upcoming=upcoming, limit=limit ) eventcnt += len( events ) advance = ld.advance() offset = advance.value - cur.value logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" , ld, cur, format_offset( offset ), format_offset( upcoming.value - advance.value ) if upcoming is not None else None, len( ld.future ), len( ld.values ), len( events ), limit ) logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total", ld, cur, len( ld.future ), len( ld.values ), eventcnt ) try: snapshot = tracemalloc.take_snapshot() display_top( snapshot, limit=10 ) except: pass time.sleep( playback/chunks ) slept += playback/chunks elapsed = timer() - basis eventtps = eventcnt // ( elapsed - slept ) logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec", elapsed, slept, eventcnt, eventtps ) if not logging.getLogger().isEnabledFor( logging.NORMAL ): # Ludicrously low threshold, to pass tests on very slow machines assert eventtps >= 1000, \ "Historical event processing performance low: %d records/sec" % eventtps try: display_biggest_traceback() except: pass except Exception as exc: logging.normal( "Test failed: %s", exc ) ''' for f in files: logging.normal( "%s:\n %s", f, " ".join( l for l in open( f ))) ''' raise finally: for f in files: logging.detail( "unlinking %s", f ) try: os.unlink( f ) except: pass
def test_history_timestamp_abbreviations(): """Test timezone abbreviation support. """ abbrev = timestamp.support_abbreviations( 'CA', reset=True ) assert sorted( abbrev ) == ['ADT', 'AST', 'CDT', 'CST', 'EDT', 'EST', 'MDT', 'MST', 'NDT', 'NST', 'PDT', 'PST'] # Perform all the remaining timezone abbreviation tests relative to a known range of times, to # avoid differences in the future due to timezone changes. ts = timestamp( "2014-04-24 08:00:00 MDT" ) assert near( ts.value, 1398348000.0 ) # Try to add all of the Americas to the CA abbreviations already supported; can't be done (too # many inconsistencies) try: abbrev = timestamp.support_abbreviations( 'America' ) assert False, "Many zones should have been ambiguously abbreviated" except AmbiguousTimeZoneError as exc: assert "America/Mazatlan" in str( exc ) exclude = [ 'America/Mazatlan', 'America/Merida', 'America/Mexico_City', 'America/Monterrey', 'America/Bahia_Banderas', 'America/Cancun', 'America/Chihuahua', 'America/Havana', 'America/Santa_Isabel', 'America/Grand_Turk', 'America/Cayman', 'America/Port-au-Prince', 'America/Metlakatla', ] #print() #print( "America, w/o %r" % ( exclude )) abbrev = timestamp.support_abbreviations( 'America', exclude=exclude ) #print( sorted( abbrev )) #print( reprlib.repr( timestamp._tzabbrev )) pytz_version = tuple( map( int, pytz.__version__.split( '.' ))) if pytz_version < (2015,4): logging.warning( "pytz < 2015.4; HADT/HAST vs. HDT/HST" ) assert sorted( abbrev ) == ['ACT', 'AKDT', 'AKST', 'AMST', 'AMT', 'ART', 'BOT', 'BRST', 'BRT', 'CLST', 'CLT', 'COT', 'ECT', 'EGST', 'EGT', 'FNT', 'GFT', 'GMT', 'GYT', 'HADT', 'HAST', 'PET', 'PMDT', 'PMST', 'PYST', 'PYT', 'SRT', 'UYST', 'UYT', 'VET', 'WGST', 'WGT'] elif pytz_version < (2015,7): logging.warning( "pytz < 2015.7; had UYST" ) assert sorted( abbrev ) == ['ACT', 'AKDT', 'AKST', 'AMST', 'AMT', 'ART', 'BOT', 'BRST', 'BRT', 'CLST', 'CLT', 'COT', 'ECT', 'EGST', 'EGT', 'FNT', 'GFT', 'GMT', 'GYT', 'HDT', 'HST', 'PET', 'PMDT', 'PMST', 'PYST', 'PYT', 'SRT', 'UYST', 'UYT', 'VET', 'WGST', 'WGT'] elif pytz_version < (2017,2): assert sorted( abbrev ) == ['ACT', 'AKDT', 'AKST', 'AMST', 'AMT', 'ART', 'BOT', 'BRST', 'BRT', 'CLST', 'CLT', 'COT', 'ECT', 'EGST', 'EGT', 'FNT', 'GFT', 'GMT', 'GYT', 'HDT', 'HST', 'PET', 'PMDT', 'PMST', 'PYST', 'PYT', 'SRT', 'UYT', 'VET', 'WGST', 'WGT'] else: # As of pytz 2017.2, alot of these zones are now using time zones consistent with CA; only a few added. assert sorted( abbrev ) == ['AKDT', 'AKST', 'GMT', 'HDT', 'HST'] # We *can* add Europe/Berlin abbrev = timestamp.support_abbreviations( 'Europe/Berlin' ) assert sorted( abbrev ) == ['CEST', 'CET'] assert 'CEST' in timestamp._tzabbrev assert 'EEST' not in timestamp._tzabbrev # And all of Europe, w/o some troublesome time zones exclude = [ 'Europe/Simferopol', 'Europe/Istanbul', 'Europe/Minsk', 'Europe/Chisinau', 'Europe/Dublin' ] #print() #print( "Europe, w/o %r" % ( exclude )) abbrev = timestamp.support_abbreviations( 'Europe', exclude=exclude ) #print( sorted( abbrev )) if pytz_version < (2015,2): assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'MSK', 'SAMT', 'WEST', 'WET'] elif pytz_version < (2016,3): assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'IST', 'MSK', 'SAMT', 'WEST', 'WET'] elif pytz_version < (2016,7): assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'IST', 'MSK', 'SAMT', 'WEST', 'WET'] elif pytz_version < (2018,5): assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'IST', 'MSK', 'WEST', 'WET'] else: assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'MSK', 'WEST', 'WET'] assert 'EEST' in timestamp._tzabbrev try: timestamp.support_abbreviations( 'Asia' ) assert False, "Asia/Jerusalem IST should have mismatched Europe/Dublin IST" except AmbiguousTimeZoneError as exc: assert "Asia/Jerusalem" in str( exc ) assert near( parse_offset( '< 1:00:00.001' ), -3600.001 ) assert near( parse_offset( '<:1.001' ), -1.001 ) assert near( parse_offset( '>1:0.001' ), 60.001 ) assert near( parse_offset( '>1' ), 1 ) # While Asia is internally very inconsistent (eg. EEST), countries should be internally consisent abbrev = timestamp.support_abbreviations( 'JO', reset=True ) # Jordan #print( sorted( abbrev )) assert sorted( abbrev ) == [ 'EEST', 'EET'] z,dst,off = timestamp._tzabbrev['EEST'] assert str(z) == 'Asia/Amman' and dst == True and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 3:00:00" abbrev = timestamp.support_abbreviations( 'IE', reset=True ) # Israel #print( sorted( abbrev )) assert sorted( abbrev ) == [ 'GMT', 'IST' ] # Jordan, Israel and Lebanon only work if we pick a region to exclude, for one EEST definition abbrev = timestamp.support_abbreviations( ['JO', 'IE', 'LB'], exclude=[ 'Asia/Amman' ], reset=True ) #print( sorted( abbrev )) assert sorted( abbrev ) == [ 'EEST', 'EET', 'GMT', 'IST' ] z,dst,off = timestamp._tzabbrev['EEST'] assert str(z) == 'Asia/Beirut' and dst == True and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 3:00:00" # Australia zones incompatible with a bunch of other timezone abbreviations, eg. CST; reset abbrev = timestamp.support_abbreviations( 'Australia', reset=True ) #print( sorted( abbrev )) #print( repr( timestamp._tzabbrev )) if pytz_version < (2017,2): assert sorted( abbrev ) == ['ACDT', 'ACST', 'ACWST', 'AEDT', 'AEST', 'AWST', 'LHDT', 'LHST'] z,dst,off = timestamp._tzabbrev['LHST'] assert str(z) == 'Australia/Lord_Howe' and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == ">10:30:00" else: assert sorted( abbrev ) == ['ACDT', 'ACST', 'AEDT', 'AEST', 'AWST'] # Ensure that non-ambiguous (DST-specific) zone abbreviations override ambiguous (no longer # relevant, as pytz >= 2014.7 no longer contains dst == None for some of the Australian zones # without DST) abbrev = timestamp.support_abbreviations( [ 'Australia/Adelaide' ], reset=True ) assert sorted( abbrev ) == [ 'ACDT', 'ACST' ] z,dst,off = timestamp._tzabbrev['ACST'] assert str(z) == 'Australia/Adelaide' and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 9:30:00" abbrev = timestamp.support_abbreviations( [ 'Australia/Adelaide', 'Australia/Darwin' ], reset=True ) #print( sorted( abbrev )) #print( repr( timestamp._tzabbrev )) z,dst,off = timestamp._tzabbrev['ACST'] assert str(z) in ( 'Australia/Darwin', 'Australia/Adelaide' ) and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 9:30:00" # Check that zones with complete, permanent offset changes (not just DST) are handled. We know # that within a year of 2014-04-28, the America/Eirunepe (west Amazonas) zone had such a change # (pre pytz 2017.2, anyway...) if pytz_version < (2017,2): abbrev = timestamp.support_abbreviations( [ 'America/Eirunepe' ], at=datetime.datetime( 2014, 4, 28 ), reset=True) #print( sorted( abbrev )) assert sorted( abbrev ) == [ 'ACT', 'AMT' ] z,dst,off = timestamp._tzabbrev['ACT'] assert str(z) == 'America/Eirunepe' and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "< 5:00:00" z,dst,off = timestamp._tzabbrev['AMT'] assert str(z) == 'America/Eirunepe' and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "< 4:00:00"
def test_history_timestamp_abbreviations(): """Test timezone abbreviation support. """ abbrev = timestamp.support_abbreviations( 'CA', reset=True ) assert sorted( abbrev ) == ['ADT', 'AST', 'CDT', 'CST', 'EDT', 'EST', 'MDT', 'MST', 'NDT', 'NST', 'PDT', 'PST'] # Perform all the remaining timezone abbreviation tests relative to a known range of times, to # avoid differences in the future due to timezone changes. ts = timestamp( "2014-04-24 08:00:00 MDT" ) assert near( ts.value, 1398348000.0 ) try: abbrev = timestamp.support_abbreviations( 'America' ) assert False, "Many zones should have been ambiguously abbreviated" except AmbiguousTimeZoneError as exc: assert "America/Mazatlan" in str( exc ) abbrev = timestamp.support_abbreviations( 'America', exclude=['America/Mazatlan', 'America/Merida', 'America/Mexico_City', 'America/Monterrey', 'America/Bahia_Banderas', 'America/Cancun', 'America/Chihuahua', 'America/Havana', 'America/Santa_Isabel', 'America/Grand_Turk'] ) #print( sorted( abbrev )) #print( reprlib.repr( timestamp._tzabbrev )) assert sorted( abbrev ) == ['ACT', 'AKDT', 'AKST', 'AMST', 'AMT', 'ART', 'BOT', 'BRST', 'BRT', 'CLST', 'CLT', 'COT', 'ECT', 'EGST', 'EGT', 'FNT', 'GFT', 'GMT', 'GYT', 'HADT', 'HAST', 'PET', 'PMDT', 'PMST', 'PYST', 'PYT', 'SRT', 'UYST', 'UYT', 'VET', 'WGST', 'WGT'] abbrev = timestamp.support_abbreviations( 'Europe/Berlin' ) assert sorted( abbrev ) == ['CEST', 'CET'] assert 'CEST' in timestamp._tzabbrev assert 'EEST' not in timestamp._tzabbrev abbrev = timestamp.support_abbreviations( 'Europe', exclude=[ 'Europe/Simferopol', 'Europe/Istanbul', 'Europe/Minsk' ] ) #print( sorted( abbrev )) assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'FET', 'IST', 'MSK', 'SAMT', 'WEST', 'WET'] assert 'EEST' in timestamp._tzabbrev try: timestamp.support_abbreviations( 'Asia' ) assert False, "Asia/Jerusalem IST should have mismatched Europe/Dublin IST" except AmbiguousTimeZoneError as exc: assert "Asia/Jerusalem" in str( exc ) assert near( parse_offset( '< 1:00:00.001' ), -3600.001 ) assert near( parse_offset( '<:1.001' ), -1.001 ) assert near( parse_offset( '>1:0.001' ), 60.001 ) assert near( parse_offset( '>1' ), 1 ) # While Asia is internally very inconsistent (eg. EEST), countries should be internally consisent abbrev = timestamp.support_abbreviations( 'JO', reset=True ) # Jordan #print( sorted( abbrev )) assert sorted( abbrev ) == [ 'EEST', 'EET'] z,dst,off = timestamp._tzabbrev['EEST'] assert str(z) == 'Asia/Amman' and dst == True and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 3:00:00" abbrev = timestamp.support_abbreviations( 'IE', reset=True ) # Israel #print( sorted( abbrev )) assert sorted( abbrev ) == [ 'GMT', 'IST' ] # Jordan, Israel and Lebanon only work if we pick a region to exclude, for one EEST definition abbrev = timestamp.support_abbreviations( ['JO', 'IE', 'LB'], exclude=[ 'Asia/Amman' ], reset=True ) #print( sorted( abbrev )) assert sorted( abbrev ) == [ 'EEST', 'EET', 'GMT', 'IST' ] z,dst,off = timestamp._tzabbrev['EEST'] assert str(z) == 'Asia/Beirut' and dst == True and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 3:00:00" # Australia zones incompatible with a bunch of other timezone abbreviations, eg. CST; reset abbrev = timestamp.support_abbreviations( 'Australia', reset=True ) #print( sorted( abbrev )) #print( repr( timestamp._tzabbrev )) assert sorted( abbrev ) == ['ACDT', 'ACST', 'ACWST', 'AEDT', 'AEST', 'AWST', 'LHDT', 'LHST'] z,dst,off = timestamp._tzabbrev['LHST'] assert str(z) == 'Australia/Lord_Howe' and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == ">10:30:00" # Ensure that non-ambiguous (DST-specific) zone abbreviations override ambiguous (no longer # relevant, as pytz >= 2014.7 no longer contains dst == None for some of the Australian zones # without DST) abbrev = timestamp.support_abbreviations( [ 'Australia/Adelaide' ], reset=True ) #print( sorted( abbrev )) # ['ACDT', 'ACST'] z,dst,off = timestamp._tzabbrev['ACST'] assert str(z) == 'Australia/Adelaide' and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 9:30:00" abbrev = timestamp.support_abbreviations( [ 'Australia/Adelaide', 'Australia/Darwin' ], reset=True ) #print( sorted( abbrev )) #print( repr( timestamp._tzabbrev )) z,dst,off = timestamp._tzabbrev['ACST'] assert str(z) in ( 'Australia/Darwin', 'Australia/Adelaide' ) and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 9:30:00" # Check that zones with complete, permanent offset changes (not just DST) are handled. We know # that within a year of 2014-04-28, the America/Eirunepe (west Amazonas) zone had such a change. abbrev = timestamp.support_abbreviations( [ 'America/Eirunepe' ], at=datetime.datetime( 2014, 4, 28 )) #print( sorted( abbrev )) assert sorted( abbrev ) == [ 'ACT', 'AMT' ] z,dst,off = timestamp._tzabbrev['ACT'] assert str(z) == 'America/Eirunepe' and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "< 5:00:00" z,dst,off = timestamp._tzabbrev['AMT'] assert str(z) == 'America/Eirunepe' and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "< 4:00:00"
def test_history_performance(): try: tracemalloc.start() except: pass for _ in range( 3 ): path = "/tmp/test_performance_%d" % random.randint( 100000, 999999 ) if os.path.exists( path ): continue assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path files = [] try: day = 24*60*60 dur = 3*day # a few days worth of data regstps = 0.0,5.0 # 0-5secs between updates numfiles = dur//day+1 # ~1 file/day, but at least 2 values = {} # Initial register values regscount = 1000 # Number of different registers regschanged = 1,10 # From 1-25 registers per row regsbase = 40001 start = timer() now = beg = start - dur linecnt = 0 for e in reversed( range( numfiles )): f = path + (( '.%d' % e ) if e else '') # 0'th file has no extension files.append( f ) with logger( f ) as l: if values: l.write( values, now=now ); linecnt += 1 while now < beg + len(files) * dur/numfiles: lst = now now += random.uniform( *regstps ) assert now >= lst assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now )) updates = {} for _ in range( random.randint( *regschanged )): updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 ) values.update( updates ) l.write( updates, now=now ); linecnt += 1 lst = now now += random.uniform( *regstps ) assert now >= lst assert timestamp( now ) >= timestamp( lst ) if e: # Compress .1 onward using a random format; randomly delete origin uncompressed file # so sometimes both files exist if random.choice( (True, False, False, False) ): continue # Don't make a compressed version of some files fz = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') ) files.append( fz ) with opener( fz, mode='wb' ) as fd: fd.write( open( f, 'rb' ).read() ) if random.choice( (True, False, False) ): continue # Don't remove some of the uncompressed files os.unlink( f ) files.pop( files.index( f )) logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt ) # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to # find the first file. Try to do it all in the next 'playback' second (just to push it to # the max), in 'chunks' pieces. historical = timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 )) basis = timer() playback = 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU chunks = 1000 factor = dur / playback lookahead = 60.0 duration = None if random.choice( (True,False) ): duration = random.uniform( dur * 98/100, dur * 102/100 ) begoff = historical.value - beg endoff = 0 if duration is None else (( historical.value + duration ) - ( beg + dur )) logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s", timestamp( beg ), format_offset( begoff, ms=False ), None if duration is None else format_offset( duration, ms=False, symbols='-+' ), timestamp( beg + dur ), format_offset( endoff, ms=False )) ld = loader( path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration ) eventcnt = 0 slept = 0 cur = None while ld: once = False while ld.state < ld.AWAITING or not once: once = True upcoming = None limit = random.randint( 0, 250 ) if random.choice( (True,False) ): upcoming = ld.advance() if random.choice( (True,False) ) and cur: # ~25% of the time, provide an 'upcoming' timestamp that is between the # current advancing historical time and the last load time. upcoming-= random.uniform( 0, upcoming.value - cur.value ) cur,events = ld.load( upcoming=upcoming, limit=limit ) eventcnt += len( events ) advance = ld.advance() offset = advance.value - cur.value logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" , ld, cur, format_offset( offset ), format_offset( upcoming.value - advance.value ) if upcoming is not None else None, len( ld.future ), len( ld.values ), len( events ), limit ) logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total", ld, cur, len( ld.future ), len( ld.values ), eventcnt ) try: snapshot = tracemalloc.take_snapshot() display_top( snapshot, limit=10 ) except: pass time.sleep( playback/chunks ) slept += playback/chunks elapsed = timer() - basis eventtps = eventcnt // ( elapsed - slept ) logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec", elapsed, slept, eventcnt, eventtps ) if not logging.getLogger().isEnabledFor( logging.NORMAL ): # Ludicrously low threshold, to pass tests on very slow machines assert eventtps >= 1000, \ "Historical event processing performance low: %d records/sec" % eventtps try: display_biggest_traceback() except: pass except Exception as exc: logging.normal( "Test failed: %s", exc ) ''' for f in files: logging.normal( "%s:\n %s", f, " ".join( l for l in open( f ))) ''' raise finally: for f in files: logging.detail( "unlinking %s", f ) try: os.unlink( f ) except: pass
def test_history_timestamp_abbreviations(): """Test timezone abbreviation support. """ abbrev = timestamp.support_abbreviations("CA", reset=True) assert sorted(abbrev) == ["ADT", "AST", "CDT", "CST", "EDT", "EST", "MDT", "MST", "NDT", "NST", "PDT", "PST"] # Perform all the remaining timezone abbreviation tests relative to a known range of times, to # avoid differences in the future due to timezone changes. ts = timestamp("2014-04-24 08:00:00 MDT") assert near(ts.value, 1398348000.0) try: abbrev = timestamp.support_abbreviations("America") assert False, "Many zones should have been ambiguously abbreviated" except AmbiguousTimeZoneError as exc: assert "America/Mazatlan" in str(exc) abbrev = timestamp.support_abbreviations( "America", exclude=[ "America/Mazatlan", "America/Merida", "America/Mexico_City", "America/Monterrey", "America/Bahia_Banderas", "America/Cancun", "America/Chihuahua", "America/Havana", "America/Santa_Isabel", "America/Grand_Turk", "America/Cayman", ], ) # print( sorted( abbrev )) # print( reprlib.repr( timestamp._tzabbrev )) if tuple(map(int, pytz.__version__.split("."))) < (2015, 4): logging.warning("pytz < 2015.4; HADT/HAST vs. HDT/HST") assert sorted(abbrev) == [ "ACT", "AKDT", "AKST", "AMST", "AMT", "ART", "BOT", "BRST", "BRT", "CLST", "CLT", "COT", "ECT", "EGST", "EGT", "FNT", "GFT", "GMT", "GYT", "HADT", "HAST", "PET", "PMDT", "PMST", "PYST", "PYT", "SRT", "UYST", "UYT", "VET", "WGST", "WGT", ] else: assert sorted(abbrev) == [ "ACT", "AKDT", "AKST", "AMST", "AMT", "ART", "BOT", "BRST", "BRT", "CLST", "CLT", "COT", "ECT", "EGST", "EGT", "FNT", "GFT", "GMT", "GYT", "HDT", "HST", "PET", "PMDT", "PMST", "PYST", "PYT", "SRT", "UYST", "UYT", "VET", "WGST", "WGT", ] abbrev = timestamp.support_abbreviations("Europe/Berlin") assert sorted(abbrev) == ["CEST", "CET"] assert "CEST" in timestamp._tzabbrev assert "EEST" not in timestamp._tzabbrev abbrev = timestamp.support_abbreviations( "Europe", exclude=["Europe/Simferopol", "Europe/Istanbul", "Europe/Minsk", "Europe/Chisinau"] ) # print( sorted( abbrev )) assert sorted(abbrev) == ["BST", "EEST", "EET", "IST", "MSK", "SAMT", "WEST", "WET"] assert "EEST" in timestamp._tzabbrev try: timestamp.support_abbreviations("Asia") assert False, "Asia/Jerusalem IST should have mismatched Europe/Dublin IST" except AmbiguousTimeZoneError as exc: assert "Asia/Jerusalem" in str(exc) assert near(parse_offset("< 1:00:00.001"), -3600.001) assert near(parse_offset("<:1.001"), -1.001) assert near(parse_offset(">1:0.001"), 60.001) assert near(parse_offset(">1"), 1) # While Asia is internally very inconsistent (eg. EEST), countries should be internally consisent abbrev = timestamp.support_abbreviations("JO", reset=True) # Jordan # print( sorted( abbrev )) assert sorted(abbrev) == ["EEST", "EET"] z, dst, off = timestamp._tzabbrev["EEST"] assert ( str(z) == "Asia/Amman" and dst == True and format_offset(timedelta_total_seconds(off), ms=None) == "> 3:00:00" ) abbrev = timestamp.support_abbreviations("IE", reset=True) # Israel # print( sorted( abbrev )) assert sorted(abbrev) == ["GMT", "IST"] # Jordan, Israel and Lebanon only work if we pick a region to exclude, for one EEST definition abbrev = timestamp.support_abbreviations(["JO", "IE", "LB"], exclude=["Asia/Amman"], reset=True) # print( sorted( abbrev )) assert sorted(abbrev) == ["EEST", "EET", "GMT", "IST"] z, dst, off = timestamp._tzabbrev["EEST"] assert ( str(z) == "Asia/Beirut" and dst == True and format_offset(timedelta_total_seconds(off), ms=None) == "> 3:00:00" ) # Australia zones incompatible with a bunch of other timezone abbreviations, eg. CST; reset abbrev = timestamp.support_abbreviations("Australia", reset=True) # print( sorted( abbrev )) # print( repr( timestamp._tzabbrev )) assert sorted(abbrev) == ["ACDT", "ACST", "ACWST", "AEDT", "AEST", "AWST", "LHDT", "LHST"] z, dst, off = timestamp._tzabbrev["LHST"] assert ( str(z) == "Australia/Lord_Howe" and dst == False and format_offset(timedelta_total_seconds(off), ms=None) == ">10:30:00" ) # Ensure that non-ambiguous (DST-specific) zone abbreviations override ambiguous (no longer # relevant, as pytz >= 2014.7 no longer contains dst == None for some of the Australian zones # without DST) abbrev = timestamp.support_abbreviations(["Australia/Adelaide"], reset=True) # print( sorted( abbrev )) # ['ACDT', 'ACST'] z, dst, off = timestamp._tzabbrev["ACST"] assert ( str(z) == "Australia/Adelaide" and dst == False and format_offset(timedelta_total_seconds(off), ms=None) == "> 9:30:00" ) abbrev = timestamp.support_abbreviations(["Australia/Adelaide", "Australia/Darwin"], reset=True) # print( sorted( abbrev )) # print( repr( timestamp._tzabbrev )) z, dst, off = timestamp._tzabbrev["ACST"] assert ( str(z) in ("Australia/Darwin", "Australia/Adelaide") and dst == False and format_offset(timedelta_total_seconds(off), ms=None) == "> 9:30:00" ) # Check that zones with complete, permanent offset changes (not just DST) are handled. We know # that within a year of 2014-04-28, the America/Eirunepe (west Amazonas) zone had such a change. abbrev = timestamp.support_abbreviations(["America/Eirunepe"], at=datetime.datetime(2014, 4, 28)) # print( sorted( abbrev )) assert sorted(abbrev) == ["ACT", "AMT"] z, dst, off = timestamp._tzabbrev["ACT"] assert ( str(z) == "America/Eirunepe" and dst == False and format_offset(timedelta_total_seconds(off), ms=None) == "< 5:00:00" ) z, dst, off = timestamp._tzabbrev["AMT"] assert ( str(z) == "America/Eirunepe" and dst == False and format_offset(timedelta_total_seconds(off), ms=None) == "< 4:00:00" )