示例#1
0
def test_history_opener():
    # Try opening all the compressed files in the 2 acceptable ways: context or iterator
    path		=  'tests/history'
    for f in os.listdir( path ):
        if f.startswith( 'hi' ):
            with opener( os.path.join( path, f )) as fd:
                assert next( fd ).decode() == "hi\n"
            try:
                fd	= opener( os.path.join( path, f ))
                for line in fd:
                    assert line.decode() == "hi\n"
            finally:
                fd.close()
示例#2
0
文件: history_test.py 项目: ekw/cpppo
def test_history_opener():
    # Try opening all the compressed files in the 2 acceptable ways: context or iterator
    path		=  'tests/history'
    for f in os.listdir( path ):
        if f.startswith( 'hi' ):
            with opener( os.path.join( path, f )) as fd:
                assert next( fd ).decode() == "hi\n"
            try:
                fd	= opener( os.path.join( path, f ))
                for line in fd:
                    assert line.decode() == "hi\n"
            finally:
                fd.close()
示例#3
0
def test_history_performance():
    try:
        tracemalloc.start()
    except:
        pass

    for _ in range( 3 ):
        path		= "/tmp/test_performance_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        day		= 24*60*60
        dur		= 3*day		# a few days worth of data
        regstps		= 0.0,5.0	# 0-5secs between updates
        numfiles	= dur//day+1	# ~1 file/day, but at least 2
        values		= {}		# Initial register values
        regscount	= 1000		# Number of different registers
        regschanged	= 1,10		# From 1-25 registers per row
        regsbase	= 40001

        start		= timer()

        now = beg	= start - dur
        linecnt		= 0
        for e in reversed( range( numfiles )):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                if values:
                    l.write( values, now=now ); linecnt += 1
                while now < beg + len(files) * dur/numfiles:
                    lst	= now
                    now += random.uniform( *regstps )
                    assert now >= lst
                    assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now ))
                    updates = {}
                    for _ in range( random.randint( *regschanged )):
                        updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 )
                    values.update( updates )
                    l.write( updates, now=now ); linecnt += 1
                lst 	= now
                now    += random.uniform( *regstps )
                assert now >= lst
                assert timestamp( now ) >= timestamp( lst )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    with open( f, 'rb' ) as rd:
                        fd.write( rd.read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt )

        # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to
        # find the first file.  Try to do it all in the next 'playback' second (just to push it to
        # the max), in 'chunks' pieces.
        historical	= timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 ))
        basis		= timer()
        playback	= 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU
        chunks		= 1000
        factor		= dur / playback
        lookahead	= 60.0
        duration	= None
        if random.choice( (True,False) ):
            duration	= random.uniform( dur * 98/100, dur * 102/100 )

        begoff		= historical.value - beg
        endoff		= 0 if duration is None else (( historical.value + duration ) - ( beg + dur ))
        logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s",
                         timestamp( beg ), format_offset( begoff, ms=False ),
                         None if duration is None else format_offset( duration, ms=False, symbols='-+' ),
                         timestamp( beg + dur ), format_offset( endoff, ms=False ))

        ld		= loader(
            path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration )
        eventcnt	= 0
        slept		= 0
        cur		= None
        while ld:
            once	= False
            while ld.state < ld.AWAITING or not once:
                once		= True
                upcoming	= None
                limit		= random.randint( 0, 250 )
                if random.choice( (True,False) ):
                    upcoming	= ld.advance()
                    if random.choice( (True,False) ) and cur:
                        # ~25% of the time, provide an 'upcoming' timestamp that is between the
                        # current advancing historical time and the last load time.
                        upcoming-= random.uniform( 0, upcoming.value - cur.value )
                cur,events	= ld.load( upcoming=upcoming, limit=limit )
                eventcnt       += len( events )
                advance		= ld.advance()
                offset		= advance.value - cur.value
                logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" ,
                                ld, cur, format_offset( offset ),
                                format_offset( upcoming.value - advance.value ) if upcoming is not None else None,
                                len( ld.future ), len( ld.values ), len( events ), limit )

            logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total",
                                ld, cur, len( ld.future ), len( ld.values ), eventcnt )
            try:
                snapshot	= tracemalloc.take_snapshot()
                display_top( snapshot, limit=10 )
            except:
                pass

            time.sleep( playback/chunks )
            slept	       += playback/chunks

        elapsed		= timer() - basis
        eventtps	= eventcnt // ( elapsed - slept )
        logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec",
                       elapsed, slept, eventcnt, eventtps )
        if not logging.getLogger().isEnabledFor( logging.NORMAL ):
            # Ludicrously low threshold, to pass tests on very slow machines
            assert eventtps >= 1000, \
                "Historical event processing performance low: %d records/sec" % eventtps
        try:
            display_biggest_traceback()
        except:
            pass

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        '''
        for f in files:
            logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
        '''
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
示例#4
0
def test_history_sequential():
    for _ in range( 3 ):
        path		= "/tmp/test_sequential_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases.  Note: times are truncated to milliseconds, so timestamps saved out will
        # probably evaluate as < the original value when read back in!  Since each file contains
        # only one record, we must be careful to use 'strict', to ensure we open the next file
        # strictly greater than the last timestamp (or we'll open the same file again!)
        now		= timer()
        count		= 10
        for e in range( count ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has 0 extension
            files.append( f )
            with logger( f ) as l:
                l.write( { 40001: count - e }, now=now - e )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of  some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    with open( f, 'rb' ) as rd:
                        fd.write( rd.read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        # Attempt to begin loading history around the middle of the recording
        rdr		= reader( path,
                                  historical=now - random.uniform( 3.0, 9.0 ),
                                  basis=now + random.uniform( -.5, +.5 ),
                                  factor=3 )

        # Begin with the first historical file before our computed advancing historical time (we
        # could provide a specific timestamp here, if we wanted).  No lookahead.
        ts_l		= None
        f_l		= None
        after		= False # only first open is "before"; rest are "after"
        strict		= False # only goes false when timestamp increases in the same file
        deadline	= now + count
        while timer() <= deadline:
            # open next file beginning after the last ts
            o		= rdr.open( target=ts_l, after=after, strict=strict ) # Generator; doesn't do much here...
            after	= True
            strict	= True
            for (f,l,cur),(ts,js) in o: # raises HistoryExhausted on open() generator failure
                assert ts_l is None or ts >= ts_l, \
                    "Historical record out of sequence; %s isn't >= %s" % ( ts, ts_l )
                ts_l	= ts
                if js is None:
                    logging.info( "@%s: not yet available", ts )
                    assert ts > cur, "Next record should have been returned; not in future"
                    time.sleep( .1 )
                else:
                    logging.normal( "@%s: %r", ts, js )
                    assert ts <= cur, "Next record shouldn't have been returned; yet future"
                    if f == f_l and ts > ts_l:
                        strict = False
                f_l,ts_l= f,ts
        assert False, "Should have raised HistoryExhausted by now"
    except HistoryExhausted as exc:
        logging.normal( "History exhausted: %s", exc )

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
示例#5
0
文件: history_test.py 项目: ekw/cpppo
def test_history_performance():
    try:
        tracemalloc.start()
    except:
        pass

    for _ in range( 3 ):
        path		= "/tmp/test_performance_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        day		= 24*60*60
        dur		= 3*day		# a few days worth of data
        regstps		= 0.0,5.0	# 0-5secs between updates
        numfiles	= dur//day+1	# ~1 file/day, but at least 2
        values		= {}		# Initial register values
        regscount	= 1000		# Number of different registers
        regschanged	= 1,10		# From 1-25 registers per row
        regsbase	= 40001

        start		= timer()

        now = beg	= start - dur
        linecnt		= 0
        for e in reversed( range( numfiles )):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                if values:
                    l.write( values, now=now ); linecnt += 1
                while now < beg + len(files) * dur/numfiles:
                    lst	= now
                    now += random.uniform( *regstps )
                    assert now >= lst
                    assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now ))
                    updates = {}
                    for _ in range( random.randint( *regschanged )):
                        updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 )
                    values.update( updates )
                    l.write( updates, now=now ); linecnt += 1
                lst 	= now
                now    += random.uniform( *regstps )
                assert now >= lst
                assert timestamp( now ) >= timestamp( lst )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    fd.write( open( f, 'rb' ).read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt )

        # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to
        # find the first file.  Try to do it all in the next 'playback' second (just to push it to
        # the max), in 'chunks' pieces.
        historical	= timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 ))
        basis		= timer()
        playback	= 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU
        chunks		= 1000
        factor		= dur / playback
        lookahead	= 60.0
        duration	= None
        if random.choice( (True,False) ):
            duration	= random.uniform( dur * 98/100, dur * 102/100 )

        begoff		= historical.value - beg
        endoff		= 0 if duration is None else (( historical.value + duration ) - ( beg + dur ))
        logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s",
                         timestamp( beg ), format_offset( begoff, ms=False ),
                         None if duration is None else format_offset( duration, ms=False, symbols='-+' ),
                         timestamp( beg + dur ), format_offset( endoff, ms=False ))

        ld		= loader(
            path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration )
        eventcnt	= 0
        slept		= 0
        cur		= None
        while ld:
            once	= False
            while ld.state < ld.AWAITING or not once:
                once		= True
                upcoming	= None
                limit		= random.randint( 0, 250 )
                if random.choice( (True,False) ):
                    upcoming	= ld.advance()
                    if random.choice( (True,False) ) and cur:
                        # ~25% of the time, provide an 'upcoming' timestamp that is between the
                        # current advancing historical time and the last load time.
                        upcoming-= random.uniform( 0, upcoming.value - cur.value )
                cur,events	= ld.load( upcoming=upcoming, limit=limit )
                eventcnt       += len( events )
                advance		= ld.advance()
                offset		= advance.value - cur.value
                logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" ,
                                ld, cur, format_offset( offset ),
                                format_offset( upcoming.value - advance.value ) if upcoming is not None else None,
                                len( ld.future ), len( ld.values ), len( events ), limit )

            logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total",
                                ld, cur, len( ld.future ), len( ld.values ), eventcnt )
            try:
                snapshot	= tracemalloc.take_snapshot()
                display_top( snapshot, limit=10 )
            except:
                pass

            time.sleep( playback/chunks )
            slept	       += playback/chunks

        elapsed		= timer() - basis
        eventtps	= eventcnt // ( elapsed - slept )
        logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec",
                       elapsed, slept, eventcnt, eventtps )
        if not logging.getLogger().isEnabledFor( logging.NORMAL ):
            # Ludicrously low threshold, to pass tests on very slow machines
            assert eventtps >= 1000, \
                "Historical event processing performance low: %d records/sec" % eventtps
        try:
            display_biggest_traceback()
        except:
            pass

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        '''
        for f in files:
            logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
        '''
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
示例#6
0
文件: history_test.py 项目: ekw/cpppo
def test_history_sequential():
    for _ in range( 3 ):
        path		= "/tmp/test_sequential_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases.  Note: times are truncated to milliseconds, so timestamps saved out will
        # probably evaluate as < the original value when read back in!  Since each file contains
        # only one record, we must be careful to use 'strict', to ensure we open the next file
        # strictly greater than the last timestamp (or we'll open the same file again!)
        now		= timer()
        count		= 10
        for e in range( count ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has 0 extension
            files.append( f )
            with logger( f ) as l:
                l.write( { 40001: count - e }, now=now - e )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of  some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    fd.write( open( f, 'rb' ).read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        # Attempt to begin loading history around the middle of the recording
        rdr		= reader( path,
                                          historical=now - random.uniform( 3.0, 9.0 ),
                                          basis=now + random.uniform( -.5, +.5 ),
                                          factor=3 )

        # Begin with the first historical file before our computed advancing historical time (we
        # could provide a specific timestamp here, if we wanted).  No lookahead.
        ts_l		= None
        f_l		= None
        after		= False # only first open is "before"; rest are "after"
        strict		= False # only goes false when timestamp increases in the same file
        deadline	= now + count
        while timer() <= deadline:
            # open next file beginning after the last ts
            o		= rdr.open( target=ts_l, after=after, strict=strict ) # Generator; doesn't do much here...
            after	= True
            strict	= True
            for (f,l,cur),(ts,js) in o: # raises HistoryExhausted on open() generator failure
                assert ts_l is None or ts >= ts_l, \
                    "Historical record out of sequence; %s isn't >= %s" % ( ts, ts_l )
                ts_l	= ts
                if js is None:
                    logging.info( "@%s: not yet available", ts )
                    assert ts > cur, "Next record should have been returned; not in future"
                    time.sleep( .1 )
                else:
                    logging.normal( "@%s: %r", ts, js )
                    assert ts <= cur, "Next record shouldn't have been returned; yet future"
                    if f == f_l and ts > ts_l:
                        strict = False
                f_l,ts_l= f,ts
        assert False, "Should have raised HistoryExhausted by now"
    except HistoryExhausted as exc:
        logging.normal( "History exhausted: %s", exc )

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass