コード例 #1
0
ファイル: smc.py プロジェクト: pjkundert/cpppo_positioner
    def complete( self, actuator=1, svoff=False, timeout=None ):
        """Ensure that any prior operation on the actuator is complete w/in timeout; return True iff the
        current operation is detected as being complete.

        According to the documentation, the absence of the X4B "INP" flag should indicate
        completion, (see LEC Modbus RTU op Manual.pdf, section 4.4).  However, this does not work,
        and the X48 "BUSY" flag seems to serve this purpose; perhaps it is a documentation error.
    
        If 'svoff' is True, we'll also turn off the servo (clear Y19_SVON) if we detect completion.

        """
        begin			= cpppo.timer()
        if timeout is None:
            timeout		= self.TIMEOUT
        unit			= self.unit( uid=actuator )
        # Loop on True/None; terminate only on False; X48_BUSY contains 0/False when complete
        complete		= self.check(
            predicate=lambda: unit.read( data.X48_BUSY.addr ) == False,
            deadline=None if timeout is None else begin + timeout )
        ( logging.warning if not complete else logging.detail )(
            "Complete: actuator %3d %s", actuator, "success" if complete else "failure" )
        if svoff and complete:
            logging.detail( "ServoOff: actuator %3d", actuator )
            unit.write( data.Y19_SVON.addr, 0 )
        return complete
コード例 #2
0
    def complete(self, actuator=1, svoff=False, timeout=None):
        """Ensure that any prior operation on the actuator is complete w/in timeout; return True iff the
        current operation is detected as being complete.

        According to the documentation, the absence of the X4B "INP" flag should indicate
        completion, (see LEC Modbus RTU op Manual.pdf, section 4.4).  However, this does not work,
        and the X48 "BUSY" flag seems to serve this purpose; perhaps it is a documentation error.
    
        If 'svoff' is True, we'll also turn off the servo (clear Y19_SVON) if we detect completion.

        """
        begin = cpppo.timer()
        if timeout is None:
            timeout = self.TIMEOUT
        unit = self.unit(uid=actuator)
        # Loop on True/None; terminate only on False; X48_BUSY contains 0/False when complete
        complete = self.check(
            predicate=lambda: unit.read(data.X48_BUSY.addr) == False,
            deadline=None if timeout is None else begin + timeout)
        (logging.warning if not complete else logging.detail)(
            "Complete: actuator %3d %s", actuator,
            "success" if complete else "failure")
        if svoff and complete:
            logging.detail("ServoOff: actuator %3d", actuator)
            unit.write(data.Y19_SVON.addr, 0)
        return complete
コード例 #3
0
ファイル: smc.py プロジェクト: pjkundert/cpppo_positioner
    def outputs( self, actuator, *flags ):
        """Set one or more 'flag' matching 'NAME' (or clear it, if all lower case 'name' used).  Only
        Y... (Coils) may be written.  The available flags are:
        
            IN[0-5]
            HOLD
            SVON
            DRIVE
            RESET
            SETUP
            JOG_MINUS
            JOG_PLUS
            INPUT_INVALID

        """
        unit			= self.unit( uid=actuator )
        for f in flags:
            NAM			= f.upper()
            nam			= f.lower()
            key			= [ k for k in super( cpppo.dotdict, data ).keys()
                                    if k.startswith( 'Y' ) and k.endswith( NAM ) ]
            assert len( key ) == 1 and f in (NAM,nam), "invalid/ambiguous key name %s" % ( f )
            val			= bool( f == NAM )
            logging.detail( "%s/%-8s <== %s", unit.description, f, val )
            unit.write( data[key[0]].addr, val )
        return self.status( actuator=actuator )
コード例 #4
0
    def outputs(self, actuator, *flags):
        """Set one or more 'flag' matching 'NAME' (or clear it, if all lower case 'name' used).  Only
        Y... (Coils) may be written.  The available flags are:
        
            IN[0-5]
            HOLD
            SVON
            DRIVE
            RESET
            SETUP
            JOG_MINUS
            JOG_PLUS
            INPUT_INVALID

        """
        unit = self.unit(uid=actuator)
        for f in flags:
            NAM = f.upper()
            nam = f.lower()
            key = [
                k for k in super(cpppo.dotdict, data).keys()
                if k.startswith('Y') and k.endswith(NAM)
            ]
            assert len(key) == 1 and f in (
                NAM, nam), "invalid/ambiguous key name %s" % (f)
            val = bool(f == NAM)
            logging.detail("%s/%-8s <== %s", unit.description, f, val)
            unit.write(data[key[0]].addr, val)
        return self.status(actuator=actuator)
コード例 #5
0
ファイル: poll.py プロジェクト: wdouglascosta/cpppo
def run(via,
        process,
        failure=None,
        backoff_min=None,
        backoff_multiplier=None,
        backoff_max=None,
        latency=None,
        **kwds):
    """Perform polling loop 'til process.done (or forever), and process each poll result.

    On Exception, invoke the supplied poll failure method (if any), and apply exponential back-off
    between repeated attempts to run the polling loop.  The default backoff starts at the poll cycle
    (or 1.0) seconds, and defaults to increase up to 10 times that, at a default rate of 1.5x the
    current backoff.

    One or more instance of poll.run may be using the same 'via' EtherNet/IP CIP proxy instance;
    it is assumed that Thread blocking behaviour is performed within the I/O processing code to
    ensure that only one Thread is performing I/O.

    """
    if backoff_min is None:
        backoff_min = kwds.get('cycle')
        if backoff_min is None:
            backoff_min = 1.0
    if backoff_max is None:
        backoff_max = backoff_min * 10
    if backoff_multiplier is None:
        backoff_multiplier = 1.5
    if latency is None:
        latency = .5

    backoff = None
    lst, dly = 0, 0
    beg = timer()
    while not hasattr(process, 'done') or not process.done:
        # Await expiry of 'dly', checking flags at least every 'latency' seconds
        ela = timer() - beg
        if ela < dly:
            time.sleep(min(latency, dly - ela))
            continue
        # Perform a poll.loop and/or increase exponential back-off.
        try:
            lst, dly, res = loop(via, last_poll=lst, **kwds)
            for p, v in res:
                process(p, v)
            backoff = None  # Signal a successfully completed poll!
        except Exception as exc:
            if backoff is None:
                backoff = backoff_min
                logging.normal("Polling failure: waiting %7.3fs; %s", backoff,
                               exc)
            else:
                backoff = min(backoff * backoff_multiplier, backoff_max)
                logging.detail("Polling backoff: waiting %7.3fs; %s", backoff,
                               exc)
            dly = backoff
            if failure is not None:
                failure(exc)
        beg = timer()
コード例 #6
0
    def shutdown_request( self ):
        '''The default SocketServer.shutdown_request does send a shutdown(socket.SHUT_WR), but does NOT
        wait for the socket to drain before closing it, potentially leaving the kernel socket dirty
        (filled with unclaimed data; at least the client's EOF).  Drain the socket, then close it.
        Ignores ENOTCONN (and other) socket.error if socket is already closed.

        '''
        logging.detail( "Modbus/TCP client socket shutdown/drain %s", self.client_address )
        network.drain( self.request, timeout=self.drain, close=False )
        self.close_request()
コード例 #7
0
ファイル: pymodbus_fixes.py プロジェクト: Felipeasg/cpppo
    def shutdown_request( self ):
        '''The default SocketServer.shutdown_request does send a shutdown(socket.SHUT_WR), but does NOT
        wait for the socket to drain before closing it, potentially leaving the kernel socket dirty
        (filled with unclaimed data; at least the client's EOF).  Drain the socket, then close it.
        Ignores ENOTCONN (and other) socket.error if socket is already closed.

        '''
        logging.detail( "Modbus/TCP client socket shutdown/drain %s", self.client_address )
        network.drain( self.request, timeout=self.drain, close=False )
        self.close_request()
コード例 #8
0
ファイル: poll.py プロジェクト: Felipeasg/cpppo
def run( via, process, failure=None, backoff_min=None, backoff_multiplier=None, backoff_max=None,
         latency=None, **kwds ):
    """Perform polling loop 'til process.done (or forever), and process each poll result.

    On Exception, invoke the supplied poll failure method (if any), and apply exponential back-off
    between repeated attempts to run the polling loop.  The default backoff starts at the poll cycle
    (or 1.0) seconds, and defaults to increase up to 10 times that, at a default rate of 1.5x the
    current backoff.

    One or more instance of poll.run may be using the same 'via' EtherNet/IP CIP proxy instance;
    it is assumed that Thread blocking behaviour is performed within the I/O processing code to
    ensure that only one Thread is performing I/O.

    """
    if backoff_min is None:
        backoff_min		= kwds.get( 'cycle' )
        if backoff_min is None:
            backoff_min		= 1.0
    if backoff_max is None:
        backoff_max		= backoff_min * 10
    if backoff_multiplier is None:
        backoff_multiplier	= 1.5
    if latency is None:
        latency			= .5

    backoff			= None
    lst,dly			= 0,0
    beg				= timer()
    while not hasattr( process, 'done' ) or not process.done:
        # Await expiry of 'dly', checking flags at least every 'latency' seconds
        ela			= timer() - beg
        if ela < dly:
            time.sleep( min( latency, dly - ela ))
            continue
        # Perform a poll.loop and/or increase exponential back-off.
        try:
            lst,dly,res		= loop( via, last_poll=lst, **kwds )
            for p,v in res:
                process( p, v )
            backoff		= None # Signal a successfully completed poll!
        except Exception as exc:
            if backoff is None:
                backoff		= backoff_min
                logging.normal( "Polling failure: waiting %7.3fs; %s", backoff, exc )
            else:
                backoff		= min( backoff * backoff_multiplier, backoff_max )
                logging.detail(  "Polling backoff: waiting %7.3fs; %s", backoff, exc )
            dly			= backoff
            if failure is not None:
                failure( exc )
        beg			= timer()
コード例 #9
0
ファイル: history_test.py プロジェクト: ekw/cpppo
def test_history_unparsable():
    """Test history files rendered unparsable due to dropouts.  This should be handled with no problem
    except if the initial frame of register data on the first file is missing.

    """
    for _ in range( 3 ):
        path		= "/tmp/test_unparsable_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases, containing records that are invalid.
        now		= timer()
        v		= 10000
        secs		= 10
        secs_ext	=  1.0  # adjust range of history to target out by this +/-
        basisext	=   .5  # adjust start basis time from now by this +/-
        minfactor	=   .25
        maxfactor	=  2.0
        maxlatency	=   .25
        # 1/N file lines corrupted (kills 2 records; the current and following).  0 --> no errors
        maxerror	= random.choice( [ None, 3, 10, 100 ] )
        oldest		= None
        newest		= None
        logging.normal( "Corrupting %s of all history lines", None if not maxerror else "1/%d" % maxerror )
        for e in range( secs ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                ssend	= 100
                for ss in range( 0, ssend ): # subseconds up to but not including ssend...
                    js	= json.dumps( { 40001: v + e * 1000 + (ss * 1000 // ssend) } ) + '\n'
                    if maxerror and not random.randint( 0, maxerror ):
                        # Truncate some of the records (as would occur in a filesystem full or halt)
                        js = js[:random.randint( 0, len( js ) - 1)]
                    ts	= timestamp( now - e + ss/ssend )
                    if oldest is None or ts < oldest:
                        oldest = ts
                    if newest is None or ts > newest:
                        newest = ts
                    l._append( '\t'.join( (str( ts ),json.dumps( None ),js) ) )

        # Load the historical records.  This will be robust against all errors except if the first
        # line of the first history file opened is corrupt, and we therefore cannot get the initial
        # frame of register data.
        historical	= timestamp( now - random.uniform( -secs_ext, secs + secs_ext ))
        basisdelay	= random.uniform( -basisext, +basisext )
        basis		= now + basisdelay
        factor		= random.uniform( minfactor, maxfactor )
        lookahead	= 1.0
        on_bad_iframe	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        on_bad_data	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        logging.normal( "Playback starts %s (%.1f%%) of history %s-%s, in %.3fs, at x %.2f rate w/%.1fs lookahead, on_bad_iframe=%s, on_bad_data=%s",
                        historical, ( historical.value - oldest.value ) * 100 / ( newest.value - oldest.value ),
                        oldest, newest, basisdelay, factor, lookahead,
                        "SUPPRESS" if on_bad_iframe == loader.SUPPRESS else "FAIL" if on_bad_iframe  == loader.FAIL else "RAISE",
                        "SUPPRESS" if on_bad_data   == loader.SUPPRESS else "FAIL" if on_bad_data    == loader.FAIL else "RAISE" )

        ld		= loader( path,
                                historical=historical, basis=basis, factor=factor, lookahead=lookahead )
        dur		= basisext + ( secs_ext + secs + secs_ext ) / factor + basisext + 2*maxlatency # Don't be tooo strict
        beg		= timer()
        count		= 0

        while ld:
            assert timer() - beg < dur, "The loader should have ended"
            cur,events	= ld.load( on_bad_iframe=on_bad_iframe, on_bad_data=on_bad_data )
            count      += len( events )
            logging.normal( "%s loaded up to %s; %d future, %d values: %d events: %s",
                            ld, cur, len( ld.future ), len( ld.values ), len( events ), 
                            repr( events ) if logging.root.isEnabledFor( logging.DEBUG ) else reprlib.repr( events ))
            time.sleep( random.uniform( 0.0, maxlatency ))

        if on_bad_data == ld.FAIL or on_bad_iframe == ld.FAIL:
            assert ld.state in (ld.COMPLETE, ld.FAILED)
        else:
            assert ld.state == ld.COMPLETE

    except IframeError as exc:
        logging.warning( "Detected error on initial frame of registers in first history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED and count == 0, "Shouldn't have loaded any events -- only iframe failures expected"

    except DataError as exc:
        logging.warning( "Detected error on registers data in a history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        for f in files:
            if os.path.exists( f ):
                logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
            else:
                logging.warning( "%s: Couldn't find file", f )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
コード例 #10
0
ファイル: history_test.py プロジェクト: ekw/cpppo
def test_history_performance():
    try:
        tracemalloc.start()
    except:
        pass

    for _ in range( 3 ):
        path		= "/tmp/test_performance_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        day		= 24*60*60
        dur		= 3*day		# a few days worth of data
        regstps		= 0.0,5.0	# 0-5secs between updates
        numfiles	= dur//day+1	# ~1 file/day, but at least 2
        values		= {}		# Initial register values
        regscount	= 1000		# Number of different registers
        regschanged	= 1,10		# From 1-25 registers per row
        regsbase	= 40001

        start		= timer()

        now = beg	= start - dur
        linecnt		= 0
        for e in reversed( range( numfiles )):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                if values:
                    l.write( values, now=now ); linecnt += 1
                while now < beg + len(files) * dur/numfiles:
                    lst	= now
                    now += random.uniform( *regstps )
                    assert now >= lst
                    assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now ))
                    updates = {}
                    for _ in range( random.randint( *regschanged )):
                        updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 )
                    values.update( updates )
                    l.write( updates, now=now ); linecnt += 1
                lst 	= now
                now    += random.uniform( *regstps )
                assert now >= lst
                assert timestamp( now ) >= timestamp( lst )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    fd.write( open( f, 'rb' ).read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt )

        # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to
        # find the first file.  Try to do it all in the next 'playback' second (just to push it to
        # the max), in 'chunks' pieces.
        historical	= timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 ))
        basis		= timer()
        playback	= 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU
        chunks		= 1000
        factor		= dur / playback
        lookahead	= 60.0
        duration	= None
        if random.choice( (True,False) ):
            duration	= random.uniform( dur * 98/100, dur * 102/100 )

        begoff		= historical.value - beg
        endoff		= 0 if duration is None else (( historical.value + duration ) - ( beg + dur ))
        logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s",
                         timestamp( beg ), format_offset( begoff, ms=False ),
                         None if duration is None else format_offset( duration, ms=False, symbols='-+' ),
                         timestamp( beg + dur ), format_offset( endoff, ms=False ))

        ld		= loader(
            path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration )
        eventcnt	= 0
        slept		= 0
        cur		= None
        while ld:
            once	= False
            while ld.state < ld.AWAITING or not once:
                once		= True
                upcoming	= None
                limit		= random.randint( 0, 250 )
                if random.choice( (True,False) ):
                    upcoming	= ld.advance()
                    if random.choice( (True,False) ) and cur:
                        # ~25% of the time, provide an 'upcoming' timestamp that is between the
                        # current advancing historical time and the last load time.
                        upcoming-= random.uniform( 0, upcoming.value - cur.value )
                cur,events	= ld.load( upcoming=upcoming, limit=limit )
                eventcnt       += len( events )
                advance		= ld.advance()
                offset		= advance.value - cur.value
                logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" ,
                                ld, cur, format_offset( offset ),
                                format_offset( upcoming.value - advance.value ) if upcoming is not None else None,
                                len( ld.future ), len( ld.values ), len( events ), limit )

            logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total",
                                ld, cur, len( ld.future ), len( ld.values ), eventcnt )
            try:
                snapshot	= tracemalloc.take_snapshot()
                display_top( snapshot, limit=10 )
            except:
                pass

            time.sleep( playback/chunks )
            slept	       += playback/chunks

        elapsed		= timer() - basis
        eventtps	= eventcnt // ( elapsed - slept )
        logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec",
                       elapsed, slept, eventcnt, eventtps )
        if not logging.getLogger().isEnabledFor( logging.NORMAL ):
            # Ludicrously low threshold, to pass tests on very slow machines
            assert eventtps >= 1000, \
                "Historical event processing performance low: %d records/sec" % eventtps
        try:
            display_biggest_traceback()
        except:
            pass

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        '''
        for f in files:
            logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
        '''
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
コード例 #11
0
ファイル: history_test.py プロジェクト: ekw/cpppo
def test_history_timestamp():
    """Test timestamp, ensuring comparison deals in UTC only.  Supports testing in local timezones:
    
        Canada/Edmonton		-- A generic, ambiguous DST/non-DST timezone
        MST			-- A DST-specific non-DST timezone
        UTC			-- UTC

    """
    trtab			= ( string 
                                    if sys.version_info[0] < 3
                                    else str ).maketrans( ":-.", "   " )

    def utc_strp( loctime ):
        if '.' in loctime:
            unaware		= datetime.datetime.strptime( loctime, timestamp._fmt + ".%f" )
        else:
            unaware		= datetime.datetime.strptime( loctime, timestamp._fmt )
        return pytz.utc.localize( unaware )

    def utc_trns( loctime ):
        terms			= loctime.translate( trtab ).split()
        if len( terms ) == 7:
            # convert .123 into 123000 microseconds
            terms[6]               += '0' * ( 6 - len( terms[6] ))
        return datetime.datetime( *map( int, terms ), tzinfo=pytz.utc )

    # Basic millisecond hygiene.  Comparisons are by standard UTC format to 3 sub-second decimal
    # places of precision.  Unfortunately, the Python 2/3 strftime microsecond formatters are
    # different, so we don't use them.  If no precision, we do NOT round; we truncate, to avoid the
    # surprising effect of formatting a UNIX value manually using strftime produces a different
    # second than formatting it using render() with no sub-second precision.
    assert timestamp( 1399326141.999836 ) >= timestamp( 1399326141.374836 )
    assert timestamp( 1399326141.999836 ).render( ms=False ) == '2014-05-05 21:42:21'
    assert timestamp( 1399326141.999836 ).render( ms=5 ) == '2014-05-05 21:42:21.99984'
    assert timestamp( 1399326141.999836 ).render() == '2014-05-05 21:42:22.000'

    # Type caste support
    assert abs( float( timestamp( 1399326141.999836 )) - 1399326141.999836 ) < 1e-6
    assert int( timestamp( 1399326141.999836 )) == 1399326141

    # Adjust timestamp default precision and comparison epsilon.
    save			= timestamp._precision,timestamp._epsilon
    try:
        ts			= timestamp( 1399326141.999836 )
        for p in range( 0, 7 ):
            timestamp._precision= p
            timestamp._epsilon	= 10**-p if p else 0

            assert ts.render( ms=True ) == {
                0: '2014-05-05 21:42:21', # Truncates at 0 digits of sub-second precision
                1: '2014-05-05 21:42:22.0',
                2: '2014-05-05 21:42:22.00',
                3: '2014-05-05 21:42:22.000',
                4: '2014-05-05 21:42:21.9998',
                5: '2014-05-05 21:42:21.99984',
                6: '2014-05-05 21:42:21.999836',
            }[timestamp._precision]
            # For p == 0, try exact precision.  1e-6 is the smallest delta that can be reliably
            # added to a typical UNIX timestamp (eg.  1399326141.999836) in a double and still
            # expect it to affect the value (can store 15-17 decimal digits of precision).
            s,l			= (timestamp._epsilon*f for f in (0.9,1.1)) if p else (0,10**-6)
            assert     ts == ts + s
            assert     ts == ts - s
            assert not(ts == ts + l)
            assert not(ts == ts - l)
            assert     ts != ts + l
            assert     ts != ts - l
            assert not(ts <  ts + s)
            assert not(ts <  ts - s)
            assert     ts <  ts + l
            assert not(ts <  ts - l)
            assert     ts <= ts + s
            assert     ts <= ts - s
            assert     ts <= ts + l
            assert not(ts <= ts - l)
            assert not(ts >  ts + s)
            assert not(ts >  ts - s)
            assert not(ts >  ts + l)
            assert     ts >  ts - l
            assert     ts >= ts + s
            assert     ts >= ts - s
            assert not(ts >= ts + l)
            assert     ts >= ts - l
    finally:
        timestamp._precision,timestamp._epsilon = save


    # Maintain DST specificity when rendering in DST-specific timezones?  Nope, only when using
    # specially constructed non-DST versions of timezones, when they are made available by pytz.
    timestamp.support_abbreviations( None, reset=True )

    assert timestamp.timezone_info('MST') == (pytz.timezone( 'MST' ),None)
    assert timestamp( 1399326141.999836 ).render(
        tzinfo='MST', ms=False )		== '2014-05-05 14:42:21 MST'

    # Get MST/MDT etc., and CET/CEST abbreviations
    timestamp.support_abbreviations( ['CA','Europe/Berlin'], reset=True )

    assert timestamp.timezone_info('MST') == (pytz.timezone( 'America/Edmonton' ),False)
    assert timestamp( 1399326141.999836 ).render(
        tzinfo='MST', ms=False )		== '2014-05-05 15:42:21 MDT'


    # $ TZ=UTC date --date=@1388559600
    # Wed Jan  1 07:00:00 UTC 2014
    # 1396531199
    # Thu Apr  3 07:19:59 MDT 2014
    assert '2014-01-02 03:04:55.123'.translate( trtab ) == '2014 01 02 03 04 55 123'

    cnt				= 10000
    beg				= timer()
    for _ in range( cnt ):
        utc1			= utc_strp( '2014-01-02 03:04:55.123' )
    dur1			= timer() - beg
    beg				= timer()
    for _ in range( cnt ):
        utc2			= utc_trns( '2014-01-02 03:04:55.123' )
    dur2			= timer() - beg
    beg				= timer()
    for _ in range( cnt ):
        utc3			= timestamp.datetime_from_string( '2014-01-02 03:04:55.123' )
    dur3			= timer() - beg
    assert utc1.strftime( timestamp._fmt ) \
        == utc2.strftime( timestamp._fmt ) \
        == utc3.strftime( timestamp._fmt ) == '2014-01-02 03:04:55'
    logging.detail( "strptime: %d/s, translate: %d/s, timestamp: %d/s", cnt/dur1, cnt/dur2, cnt/dur3 )

    now				= timer()
    assert timestamp( now ) < timestamp( now + 1 )

    # From a numeric timestamp
    ts				= timestamp( 1396531199 )
    assert ts.utc	== '2014-04-03 13:19:59.000' == str( ts )

    assert ts.local	in ( '2014-04-03 07:19:59 MDT',
                             '2014-04-03 06:19:59 MST',
                             '2014-04-03 13:19:59 UTC' )

    # From a string UTC time
    dt				= timestamp.datetime_from_string( '2014-01-01 07:00:00.0' )
    assert str( dt )	== '2014-01-01 07:00:00+00:00'
    assert repr( dt )	== 'datetime.datetime(2014, 1, 1, 7, 0, tzinfo=<UTC>)'
    #assert dt.strftime( '%s' ) != '1388559600' # !? (will fail if machine is in UTC timezone )
    #assert pytz.utc.normalize( dt ).strftime( '%s' ) != '1388559600' # !?
    assert 1388559559.999999 < timestamp.number_from_datetime( dt ) < 1388559600.000001 # ok
    ts				= timestamp( '2014-01-01 07:00:00.0' )
    assert  1388559559.999999 < ts.value < 1388559600.000001
    assert ts.utc	== '2014-01-01 07:00:00.000' == str( ts )
    assert ts.local	in ( '2014-01-01 00:00:00 MST',
                             '2014-01-01 07:00:00 UTC' )

    # OK, now try a UTC time where the local timezone is in MDT
    ts.utc			= '2014-04-01 07:00:00.000'
    assert ts.local	in ( '2014-04-01 01:00:00 MDT',
                             '2014-04-01 00:00:00 MST',
                             '2014-04-01 07:00:00 UTC' )

    # Make sure that local times are unambiguous over daylight savings time
    # Mar 9 02:00 -> 03:00    1394355540 == Mar 9 2014 01:59
    # Nov 2 02:00 -> 01:00    1414915140 == Nov 2 2014 01:59
    ts				= timestamp( 1394355540 )
    assert ts.local	in ( '2014-03-09 01:59:00 MST',
                             '2014-03-09 08:59:00 UTC' )
    ts			       += 61
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )

    ts				= timestamp( 1414915140 )
    assert ts.local	in ( '2014-11-02 01:59:00 MDT',
                             '2014-11-02 00:59:00 MST',
                             '2014-11-02 07:59:00 UTC' )
    ts			       += 61
    assert ts.local	in ( '2014-11-02 01:00:01 MST',
                             '2014-03-09 02:00:01 MST',
                             '2014-11-02 08:00:01 UTC' )

    # Now try converting a few strings that have a specific timezone.  We can use either .utc =
    # ... or .local = ...; they just default to the UTC or (local) timezone, respectively.  Using a
    # DST-specific timezone such as MST/MDT, we can unambiguously specify whether a time is inside
    # or outside DST.
    try:
        ts.local		= '2014-03-09 02:00:01 America/Edmonton' # Just inside MDT 2014
        assert False, """Should have failed -- time doesn't exist during "spring ahead" """
    except Exception as exc:
        assert "NonExistentTimeError" in str( exc )
    ts.local			= '2014-03-09 03:00:01 MDT' # Just inside MDT 2014
    assert 1394355600.999999 < ts.value < 1394355601.000001
    assert ts.utc 	==   '2014-03-09 09:00:01.000' # MDT == UCT-6:00
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )
    # However, we CAN use a specifically non-DST timezone to specify times non-existent in DST
    ts.local			= '2014-03-09 02:00:01 MST' # No such time in MDT!!
    assert 1394355600.999999 < ts.value < 1394355601.000001
    assert ts.utc	==   '2014-03-09 09:00:01.000'
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )

    ts.local			= '2014-11-02 01:00:01 MST' # 1 second after the end of DST
    assert 1414915200.999999 < ts.value < 1414915201.000001
    assert ts.utc	==   '2014-11-02 08:00:01.000'
    assert ts.local	in ( '2014-11-02 01:00:01 MST',
                             '2014-11-02 00:59:59 MST',
                             '2014-11-02 08:00:01 UTC' )

    ts			       -= 2 # Go back 2 seconds, into DST
    assert ts.utc	==   '2014-11-02 07:59:59.000'
    assert ts.local	in ( '2014-11-02 01:59:59 MDT',
                             '2014-11-02 00:59:59 MST',
                             '2014-11-02 07:59:59 UTC' )

    ts.local			= '2014-11-02 01:59:58 MDT' # 2 seconds before end of DST
    assert 1414915197.999999 < ts.value < 1414915198.000001
    assert ts.utc	==   '2014-11-02 07:59:58.000'
    assert ts.local	in ( '2014-11-02 01:59:58 MDT',
                             '2014-11-02 00:59:58 MST',
                             '2014-11-02 07:59:58 UTC' )

    # Using a canonical timezone such as 'America/Edmonton', an "ambiguous" time (eg. during the
    # overlap in the fall) cannot be specified.  Using a DST-specific timezone, we can.
    try:
        ts.local		= '2014-11-02 01:00:01 America/Edmonton' # Inside DST?
    except Exception as exc:
        assert "AmbiguousTimeError" in str( exc )

    ts.local			= '2014-11-02 00:59:59 America/Edmonton' # 2 seconds before end of DST
    assert 1414911598.999999 < ts.value < 1414911599.000001
    assert ts.utc	==   '2014-11-02 06:59:59.000'
    assert ts.local	in ( '2014-11-02 00:59:59 MDT',
                             '2014-11-01 23:59:59 MST',
                             '2014-11-02 06:59:59 UTC' )

    after			= timestamp( '2014-11-02 01:02:03.123 MST' ) # (Nov 2 2014 -- 1:02 *after* DST ended)
    before			= timestamp( '2014-11-02 01:02:03.456 MDT' ) # (Nov 2 2014 --  :58 *before* DST ends)
    assert before < after
    assert before.utc	==   '2014-11-02 07:02:03.456'
    assert before.local	in ( '2014-11-02 01:02:03 MDT',
                             '2014-11-02 00:02:03 MST',
                             '2014-11-02 07:02:03 UTC' )
    assert after.utc	==   '2014-11-02 08:02:03.123'
    assert after.local	in ( '2014-11-02 01:02:03 MST',
                             '2014-11-02 08:02:03 UTC' )

    after			= timestamp( '2014-10-26 02:01:00.123 CET' )  # (Nov 26 2014 -- 1:02 *after* DST ended)
    before			= timestamp( '2014-10-26 02:01:00.456 CEST' ) # (Nov 26 2014 --  :58 *before* DST ends)
    assert before < after
    assert before.utc	==   '2014-10-26 00:01:00.456'
    assert before.local	in ( '2014-10-25 18:01:00 MDT',
                             '2014-10-25 17:01:00 MST',
                             '2014-10-26 00:01:00 UTC' )
    assert after.utc	==   '2014-10-26 01:01:00.123'
    assert after.local	in ( '2014-10-25 19:01:00 MDT',
                             '2014-10-25 18:01:00 MST',
                             '2014-10-26 01:01:00 UTC' )
コード例 #12
0
ファイル: history_test.py プロジェクト: ekw/cpppo
def test_history_sequential():
    for _ in range( 3 ):
        path		= "/tmp/test_sequential_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases.  Note: times are truncated to milliseconds, so timestamps saved out will
        # probably evaluate as < the original value when read back in!  Since each file contains
        # only one record, we must be careful to use 'strict', to ensure we open the next file
        # strictly greater than the last timestamp (or we'll open the same file again!)
        now		= timer()
        count		= 10
        for e in range( count ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has 0 extension
            files.append( f )
            with logger( f ) as l:
                l.write( { 40001: count - e }, now=now - e )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of  some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    fd.write( open( f, 'rb' ).read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        # Attempt to begin loading history around the middle of the recording
        rdr		= reader( path,
                                          historical=now - random.uniform( 3.0, 9.0 ),
                                          basis=now + random.uniform( -.5, +.5 ),
                                          factor=3 )

        # Begin with the first historical file before our computed advancing historical time (we
        # could provide a specific timestamp here, if we wanted).  No lookahead.
        ts_l		= None
        f_l		= None
        after		= False # only first open is "before"; rest are "after"
        strict		= False # only goes false when timestamp increases in the same file
        deadline	= now + count
        while timer() <= deadline:
            # open next file beginning after the last ts
            o		= rdr.open( target=ts_l, after=after, strict=strict ) # Generator; doesn't do much here...
            after	= True
            strict	= True
            for (f,l,cur),(ts,js) in o: # raises HistoryExhausted on open() generator failure
                assert ts_l is None or ts >= ts_l, \
                    "Historical record out of sequence; %s isn't >= %s" % ( ts, ts_l )
                ts_l	= ts
                if js is None:
                    logging.info( "@%s: not yet available", ts )
                    assert ts > cur, "Next record should have been returned; not in future"
                    time.sleep( .1 )
                else:
                    logging.normal( "@%s: %r", ts, js )
                    assert ts <= cur, "Next record shouldn't have been returned; yet future"
                    if f == f_l and ts > ts_l:
                        strict = False
                f_l,ts_l= f,ts
        assert False, "Should have raised HistoryExhausted by now"
    except HistoryExhausted as exc:
        logging.normal( "History exhausted: %s", exc )

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
コード例 #13
0
ファイル: modbus_poll.py プロジェクト: ywong3/cpppo
def main():
    parser			= argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog = """\
    Begin polling the designated register range(s), optionally writing initial values to them.

    Register range(s) and value(s) must be supplied:
    
      <begin>[-<end>]
      <begin>[-<end>]=<val>,...
    
    EXAMPLE
    
      modbus_poll --address localhost:7502 40001-40100
    
    """ )
    parser.add_argument( '-v', '--verbose',
                         default=0, action="count", help="Display logging information." )
    parser.add_argument('-l', '--log', 
                        type=str, default=None, help="Direct log output to the specified file" )
    parser.add_argument( '-a', '--address', default="0.0.0.0:502",
                         help="Default [interface][:port] to bind to (default: any, port 502)" )
    parser.add_argument( '-r', '--reach',	default=1,
                         help="Merge polls within <reach> registers of each-other" )
    parser.add_argument( '-R', '--rate',	default=1.0,
                         help="Target poll rate" )
    parser.add_argument( '-t', '--timeout',	default=Defaults.Timeout,
                         help="I/O Timeout (default: %s)" % ( Defaults.Timeout ))
    parser.add_argument( 'registers', nargs="+" )
    args			= parser.parse_args()
    
    # Deduce logging level and target file (if any)
    levelmap 			= {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )
    if args.log:
        cpppo.log_cfg['filename'] = args.log
    logging.basicConfig( **cpppo.log_cfg )

    # (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple
    # like ("interface",502)
    address			= None
    if args.address:
        address			= args.address.split(':')
        assert 1 <= len( address ) <= 2
        address			= (
            str( address[0] ),
            int( address[1] ) if len( address ) > 1 else Defaults.Port )
        log.info( "--address '%s' produces address=%r" % ( args.address, address ))

    # Set up the Modbus/TCP I/O timeout to use, for all connect and read/write transactions
    Defaults.Timeout		= float( args.timeout )

    # Start the PLC poller (and perform any initial writes indicated)
    poller			= poller_modbus(
        "Modbus/TCP", host=address[0], port=address[1], reach=int( args.reach ), rate=float( args.rate ))

    for txt in args.registers:
        beg,end,val		= register_decode( txt ) # beg-end is inclusive
        for reg in range( beg, end+1 ):
            poller.poll( reg )
        if val:
            # Value(s) were supplied for the register(s) range; write 'em.  This results in a
            # WriteMultipleRegistersRequest if val is an iterable, or a WriteSingle...  if not.
            # We'll need to shatter/merge the register range into appropriate sized chunks for a
            # valid Modbus/TCP request, and then take the appropriate number of values for each.
            for base,length in merge( [ (beg,end-beg+1) ] ):
                poller.write( base, val[0] if length == 1 else val[:length] )
                val		= val[length:]
    
    load			= ''
    fail			= ''
    poll			= ''
    regs			= {}
    while True:
        loadcur			= "%.2f" % ( poller.load[0] if poller.load[0] else 0 )
        if loadcur != load:
            load		= loadcur
            logging.detail( "load: %s", loadcur )
        failcur			= ", ".join( [ ("%d-%d" % (b,b+c-1)) for b,c in poller.failing ] )
        pollcur			= ", ".join( [ ("%d-%d" % (b,b+c-1)) for b,c in poller.polling ] )
        if ( failcur != fail or pollcur != poll ):
            fail, poll		= failcur, pollcur
            logging.normal( "failing: %s, polling: %s", fail, poll )
        # log data changes
        for beg,cnt in poller.polling:
            for reg in range( beg, beg+cnt ):
                val		= poller.read( reg )
                old		= regs.get( reg ) # may be None
                if val != old:
                    logging.warning( "%5d == %5d (was: %s)" %( reg, val, old ))
                    regs[reg]	= val

        time.sleep( 1 )
コード例 #14
0
ファイル: modbus_poll.py プロジェクト: raphaelbertozzi/cpppo
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""\
    Begin polling the designated register range(s), optionally writing initial values to them.

    Register range(s) and value(s) must be supplied:
    
      <begin>[-<end>]
      <begin>[-<end>]=<val>,...
    
    EXAMPLE
    
      modbus_poll --address localhost:7502 40001-40100
    
    """,
    )
    parser.add_argument("-v", "--verbose", default=0, action="count", help="Display logging information.")
    parser.add_argument("-l", "--log", type=str, default=None, help="Direct log output to the specified file")
    parser.add_argument(
        "-a", "--address", default="0.0.0.0:502", help="Default [interface][:port] to bind to (default: any, port 502)"
    )
    parser.add_argument("-r", "--reach", default=1, help="Merge polls within <reach> registers of each-other")
    parser.add_argument("-R", "--rate", default=1.0, help="Target poll rate")
    parser.add_argument(
        "-t", "--timeout", default=Defaults.Timeout, help="I/O Timeout (default: %s)" % (Defaults.Timeout)
    )
    parser.add_argument("registers", nargs="+")
    args = parser.parse_args()

    # Deduce logging level and target file (if any)
    levelmap = {0: logging.WARNING, 1: logging.NORMAL, 2: logging.DETAIL, 3: logging.INFO, 4: logging.DEBUG}
    cpppo.log_cfg["level"] = levelmap[args.verbose] if args.verbose in levelmap else logging.DEBUG
    if args.log:
        cpppo.log_cfg["filename"] = args.log
    logging.basicConfig(**cpppo.log_cfg)

    # (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple
    # like ("interface",502)
    address = None
    if args.address:
        address = args.address.split(":")
        assert 1 <= len(address) <= 2
        address = (str(address[0]), int(address[1]) if len(address) > 1 else Defaults.Port)
        log.info("--address '%s' produces address=%r" % (args.address, address))

    # Set up the Modbus/TCP I/O timeout to use, for all connect and read/write transactions
    Defaults.Timeout = float(args.timeout)

    # Start the PLC poller (and perform any initial writes indicated)
    poller = poller_modbus("Modbus/TCP", host=address[0], port=address[1], reach=int(args.reach), rate=float(args.rate))

    for txt in args.registers:
        beg, end, val = register_decode(txt)  # beg-end is inclusive
        for reg in range(beg, end + 1):
            poller.poll(reg)
        if val:
            # Value(s) were supplied for the register(s) range; write 'em.  This results in a
            # WriteMultipleRegistersRequest if val is an iterable, or a WriteSingle...  if not.
            # We'll need to shatter/merge the register range into appropriate sized chunks for a
            # valid Modbus/TCP request, and then take the appropriate number of values for each.
            for base, length in merge([(beg, end - beg + 1)]):
                poller.write(base, val[0] if length == 1 else val[:length])
                val = val[length:]

    load = ""
    fail = ""
    poll = ""
    regs = {}
    while True:
        loadcur = "%.2f" % (poller.load[0] if poller.load[0] else 0)
        if loadcur != load:
            load = loadcur
            logging.detail("load: %s", loadcur)
        failcur = ", ".join([("%d-%d" % (b, b + c - 1)) for b, c in poller.failing])
        pollcur = ", ".join([("%d-%d" % (b, b + c - 1)) for b, c in poller.polling])
        if failcur != fail or pollcur != poll:
            fail, poll = failcur, pollcur
            logging.normal("failing: %s, polling: %s", fail, poll)
        # log data changes
        for beg, cnt in poller.polling:
            for reg in range(beg, beg + cnt):
                val = poller.read(reg)
                old = regs.get(reg)  # may be None
                if val != old:
                    logging.warning("%5d == %5d (was: %s)" % (reg, val, old))
                    regs[reg] = val

        time.sleep(1)
コード例 #15
0
ファイル: modbus_poll.py プロジェクト: ekw/cpppo
def main():
    parser			= argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog = """\

    Register range(s) and value(s) must be supplied:
    
      <begin>[-<end>]
    
    EXAMPLE
    
      modbus_poll --address localhost:7502 40001-40100
    
    """ )
    parser.add_argument( '-v', '--verbose',
                         default=0, action="count", help="Display logging information." )
    parser.add_argument('-l', '--log', 
                        type=str, default=None, help="Direct log output to the specified file" )
    parser.add_argument( '-a', '--address', default="0.0.0.0:502",
                         help="Default [interface][:port] to bind to (default: any, port 502)" )
    parser.add_argument( '-r', '--reach',	default=1,
                         help="Merge polls within <reach> registers of each-other" )
    parser.add_argument( '-R', '--rate',	default=1.0,
                         help="Target poll rate" )
    parser.add_argument( 'registers', nargs="+" )
    args			= parser.parse_args()
    
    # Deduce logging level and target file (if any)
    levelmap 			= {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )
    if args.log:
        cpppo.log_cfg['filename'] = args.log
    logging.basicConfig( **cpppo.log_cfg )

    # (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple
    # like ("interface",502)
    address			= None
    if args.address:
        address			= args.address.split(':')
        assert 1 <= len( address ) <= 2
        address			= (
            str( address[0] ),
            int( address[1] ) if len( address ) > 1 else Defaults.Port )
        log.info( "--address '%s' produces address=%r" % ( args.address, address ))

    # Start the PLC poller

    poller			= poller_modbus(
        "Modbus/TCP", host=address[0], port=address[1], reach=int( args.reach ), rate=float( args.rate ))

    
    for r in args.registers:
        rng			= r.split('-')
        beg,cnt			= int(rng[0]), int(rng[1])-int(rng[0])+1 if len(rng) else 1
        for reg in range( beg, beg+cnt ):
            poller.poll( reg )
    
    load			= ''
    fail			= ''
    poll			= ''
    regs			= {}
    while True:
        loadcur			= "%.2f" % ( poller.load[0] if poller.load[0] else 0 )
        if loadcur != load:
            load		= loadcur
            logging.detail( "load: %s", loadcur )
        failcur			= ", ".join( [ ("%d-%d" % (b,b+c-1)) for b,c in poller.failing ] )
        pollcur			= ", ".join( [ ("%d-%d" % (b,b+c-1)) for b,c in poller.polling ] )
        if ( failcur != fail or pollcur != poll ):
            fail, poll		= failcur, pollcur
            logging.normal( "failing: %s, polling: %s", fail, poll )
        # log data changes
        for beg,cnt in poller.polling:
            for reg in range( beg, beg+cnt ):
                val		= poller.read( reg )
                old		= regs.get( reg ) # may be None
                if val != old:
                    logging.warning( "%5d == %5d (was: %s)" %( reg, val, old ))
                    regs[reg]	= val

        time.sleep( 1 )
コード例 #16
0
    def position(self,
                 actuator=1,
                 timeout=TIMEOUT,
                 home=True,
                 noop=False,
                 svoff=False,
                 **kwds):
        """Begin position operation on 'actuator' w/in 'timeout'.  

        :param home: Return to home position before any other movement
        :param noop: Do not perform final activation

        Running with specified data

        1   - Set internal flag Y30 (input invalid flag)
        2   - Write 1 to internal flag Y19 (SVON)
        2a  -   and confirm internal flag X49 (SVRE) has become "1"
        3   - Write 1 to internal flag Y1C (SETUP)
        3a  -   and confirm internal flag X4A (SETON) has become "1"
        4   - Write data to D9102-D9110
        5   - Write Operation Start instruction "1" to D9100 (returns to 0 after processed)

        If no positioning kwds are provided, then no new position is configured.  If 'noop' is True,
        everything except the final activation is performed.

        """
        begin = cpppo.timer()
        if timeout is None:
            timeout = self.TIMEOUT
        assert self.complete( actuator=actuator, svoff=svoff, timeout=timeout ), \
            "Previous actuator position incomplete within timeout %r" % timeout
        status = self.status(actuator=actuator)
        if not kwds:
            return status

        # Previous positioning complete, and possibly new position keywords provided.
        logging.detail("Position: actuator %3d setdata: %r", actuator, kwds)
        unit = self.unit(uid=actuator)

        # 1: set INPUT_INVALID
        unit.write(data.Y30_INPUT_INVALID.addr, 1)

        # 2: set SVON, check SVRE
        if timeout:
            assert cpppo.timer() <= begin + timeout, \
                "Failed to complete positioning SVON/SVRE within timeout"
        unit.write(data.Y19_SVON.addr, 1)
        svre = self.check(predicate=lambda: unit.read(
            data.Y19_SVON.addr) and unit.read(data.X49_SVRE.addr),
                          deadline=None if timeout is None else begin +
                          timeout)
        assert svre, \
            "Failed to set SVON True and read SVRE True"

        # 3: Return to home? set SETUP, check SETON.  Otherwise, clear SETUP.  It is very unclear
        #    whether we need to do this, and/or whether we need to clear it afterwards.
        if home:
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning SETUP/SETON within timeout"
            unit.write(data.Y1C_SETUP.addr, 1)
            seton = self.check(predicate=lambda: unit.read(
                data.Y1C_SETUP.addr) and unit.read(data.X4A_SETON.addr),
                               deadline=None if timeout is None else begin +
                               timeout)
            if not seton:
                logging.warning("Failed to set SETUP True and read SETON True")
            # assert seton, \
            #    "Failed to set SETUP True and read SETON True"
        else:
            unit.write(data.Y1C_SETUP.addr, 0)

        # 4: Write any changed position data.  The actuator doesn't accept individual register
        # writes, so we use multiple register writes for each value.
        for k, v in kwds.items():
            assert k in data, \
                "Unrecognized positioning keyword: %s == %v" % ( k, v )
            assert STEP_DATA_BEG <= data[k].addr <= STEP_DATA_END, \
                "Invalid positioning keyword: %s == %v; not within position data address range" % ( k, v )
            format = data[k].get('format')
            if format:
                # Create a big-endian buffer.  This will be some multiple of register size.  Then,
                # unpack it into some number of 16-bit big-endian registers (this will be a tuple).
                buf = struct.pack('>' + format, v)
                values = [
                    struct.unpack_from('>H', buf[o:])[0]
                    for o in range(0, len(buf), 2)
                ]
            else:
                values = [v]
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning data update within timeout"
            logging.normal("Position: actuator %3d updated: %16s: %8s (== %s)",
                           actuator, k, v, values)
            unit.write(data[k].addr, values)

        # 5: set operation_start to 0x0100 (1 in high-order bytes) unless 'noop'
        if not noop:
            unit.write(data.operation_start.addr, 0x0100)
            started = self.check(predicate=lambda: unit.read(
                data.operation_start.addr) == 0x0100,
                                 deadline=None if timeout is None else begin +
                                 timeout)
            assert started, \
                "Failed to detect positioning start within timeout"

        return self.status(actuator=actuator)
コード例 #17
0
ファイル: history_test.py プロジェクト: srdgame/cpppo
def test_history_performance():
    try:
        tracemalloc.start()
    except:
        pass

    for _ in range( 3 ):
        path		= "/tmp/test_performance_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        day		= 24*60*60
        dur		= 3*day		# a few days worth of data
        regstps		= 0.0,5.0	# 0-5secs between updates
        numfiles	= dur//day+1	# ~1 file/day, but at least 2
        values		= {}		# Initial register values
        regscount	= 1000		# Number of different registers
        regschanged	= 1,10		# From 1-25 registers per row
        regsbase	= 40001

        start		= timer()

        now = beg	= start - dur
        linecnt		= 0
        for e in reversed( range( numfiles )):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                if values:
                    l.write( values, now=now ); linecnt += 1
                while now < beg + len(files) * dur/numfiles:
                    lst	= now
                    now += random.uniform( *regstps )
                    assert now >= lst
                    assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now ))
                    updates = {}
                    for _ in range( random.randint( *regschanged )):
                        updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 )
                    values.update( updates )
                    l.write( updates, now=now ); linecnt += 1
                lst 	= now
                now    += random.uniform( *regstps )
                assert now >= lst
                assert timestamp( now ) >= timestamp( lst )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    with open( f, 'rb' ) as rd:
                        fd.write( rd.read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt )

        # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to
        # find the first file.  Try to do it all in the next 'playback' second (just to push it to
        # the max), in 'chunks' pieces.
        historical	= timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 ))
        basis		= timer()
        playback	= 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU
        chunks		= 1000
        factor		= dur / playback
        lookahead	= 60.0
        duration	= None
        if random.choice( (True,False) ):
            duration	= random.uniform( dur * 98/100, dur * 102/100 )

        begoff		= historical.value - beg
        endoff		= 0 if duration is None else (( historical.value + duration ) - ( beg + dur ))
        logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s",
                         timestamp( beg ), format_offset( begoff, ms=False ),
                         None if duration is None else format_offset( duration, ms=False, symbols='-+' ),
                         timestamp( beg + dur ), format_offset( endoff, ms=False ))

        ld		= loader(
            path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration )
        eventcnt	= 0
        slept		= 0
        cur		= None
        while ld:
            once	= False
            while ld.state < ld.AWAITING or not once:
                once		= True
                upcoming	= None
                limit		= random.randint( 0, 250 )
                if random.choice( (True,False) ):
                    upcoming	= ld.advance()
                    if random.choice( (True,False) ) and cur:
                        # ~25% of the time, provide an 'upcoming' timestamp that is between the
                        # current advancing historical time and the last load time.
                        upcoming-= random.uniform( 0, upcoming.value - cur.value )
                cur,events	= ld.load( upcoming=upcoming, limit=limit )
                eventcnt       += len( events )
                advance		= ld.advance()
                offset		= advance.value - cur.value
                logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" ,
                                ld, cur, format_offset( offset ),
                                format_offset( upcoming.value - advance.value ) if upcoming is not None else None,
                                len( ld.future ), len( ld.values ), len( events ), limit )

            logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total",
                                ld, cur, len( ld.future ), len( ld.values ), eventcnt )
            try:
                snapshot	= tracemalloc.take_snapshot()
                display_top( snapshot, limit=10 )
            except:
                pass

            time.sleep( playback/chunks )
            slept	       += playback/chunks

        elapsed		= timer() - basis
        eventtps	= eventcnt // ( elapsed - slept )
        logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec",
                       elapsed, slept, eventcnt, eventtps )
        if not logging.getLogger().isEnabledFor( logging.NORMAL ):
            # Ludicrously low threshold, to pass tests on very slow machines
            assert eventtps >= 1000, \
                "Historical event processing performance low: %d records/sec" % eventtps
        try:
            display_biggest_traceback()
        except:
            pass

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        '''
        for f in files:
            logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
        '''
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
コード例 #18
0
ファイル: smc.py プロジェクト: pjkundert/cpppo_positioner
    def position( self, actuator=1, timeout=TIMEOUT, home=True, noop=False, svoff=False, **kwds ):
        """Begin position operation on 'actuator' w/in 'timeout'.  

        :param home: Return to home position before any other movement
        :param noop: Do not perform final activation

        Running with specified data

        1   - Set internal flag Y30 (input invalid flag)
        2   - Write 1 to internal flag Y19 (SVON)
        2a  -   and confirm internal flag X49 (SVRE) has become "1"
        3   - Write 1 to internal flag Y1C (SETUP)
        3a  -   and confirm internal flag X4A (SETON) has become "1"
        4   - Write data to D9102-D9110
        5   - Write Operation Start instruction "1" to D9100 (returns to 0 after processed)

        If no positioning kwds are provided, then no new position is configured.  If 'noop' is True,
        everything except the final activation is performed.

        """
        begin			= cpppo.timer()
        if timeout is None:
            timeout		= self.TIMEOUT
        assert self.complete( actuator=actuator, svoff=svoff, timeout=timeout ), \
            "Previous actuator position incomplete within timeout %r" % timeout
        status			= self.status( actuator=actuator )
        if not kwds:
            return status

        # Previous positioning complete, and possibly new position keywords provided.
        logging.detail( "Position: actuator %3d setdata: %r", actuator, kwds )
        unit			= self.unit( uid=actuator )

        # 1: set INPUT_INVALID
        unit.write( data.Y30_INPUT_INVALID.addr, 1 )

        # 2: set SVON, check SVRE
        if timeout:
            assert cpppo.timer() <= begin + timeout, \
                "Failed to complete positioning SVON/SVRE within timeout"
        unit.write( data.Y19_SVON.addr, 1 )
        svre			= self.check(
            predicate=lambda: unit.read( data.Y19_SVON.addr ) and unit.read( data.X49_SVRE.addr ),
            deadline=None if timeout is None else begin + timeout )
        assert svre, \
            "Failed to set SVON True and read SVRE True"

        # 3: Return to home? set SETUP, check SETON.  Otherwise, clear SETUP.  It is very unclear
        #    whether we need to do this, and/or whether we need to clear it afterwards.
        if home:
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning SETUP/SETON within timeout"
            unit.write( data.Y1C_SETUP.addr, 1 )
            seton			= self.check(
                predicate=lambda: unit.read( data.Y1C_SETUP.addr ) and unit.read( data.X4A_SETON.addr ),
                deadline=None if timeout is None else begin + timeout )
            if not seton:
                logging.warning( "Failed to set SETUP True and read SETON True" )
            # assert seton, \
            #    "Failed to set SETUP True and read SETON True"
        else:
            unit.write( data.Y1C_SETUP.addr, 0 )
        
        # 4: Write any changed position data.  The actuator doesn't accept individual register
        # writes, so we use multiple register writes for each value.
        for k,v in kwds.items():
            assert k in data, \
                "Unrecognized positioning keyword: %s == %v" % ( k, v )
            assert STEP_DATA_BEG <= data[k].addr <= STEP_DATA_END, \
                "Invalid positioning keyword: %s == %v; not within position data address range" % ( k, v )
            format		= data[k].get( 'format' )
            if format:
                # Create a big-endian buffer.  This will be some multiple of register size.  Then,
                # unpack it into some number of 16-bit big-endian registers (this will be a tuple).
                buf		= struct.pack( '>'+format, v )
                values		= [ struct.unpack_from( '>H', buf[o:] )[0] for o in range( 0, len( buf ), 2 ) ]
            else:
                values		= [ v ]
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning data update within timeout"
            logging.normal( "Position: actuator %3d updated: %16s: %8s (== %s)", actuator, k, v, values )
            unit.write( data[k].addr, values )

        # 5: set operation_start to 0x0100 (1 in high-order bytes) unless 'noop'
        if not noop:
            unit.write( data.operation_start.addr, 0x0100 )
            started			= self.check(
                predicate=lambda: unit.read( data.operation_start.addr ) == 0x0100,
                deadline=None if timeout is None else begin + timeout )
            assert started, \
                "Failed to detect positioning start within timeout"

        return self.status( actuator=actuator )
コード例 #19
0
ファイル: history_test.py プロジェクト: srdgame/cpppo
def test_history_sequential():
    for _ in range( 3 ):
        path		= "/tmp/test_sequential_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases.  Note: times are truncated to milliseconds, so timestamps saved out will
        # probably evaluate as < the original value when read back in!  Since each file contains
        # only one record, we must be careful to use 'strict', to ensure we open the next file
        # strictly greater than the last timestamp (or we'll open the same file again!)
        now		= timer()
        count		= 10
        for e in range( count ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has 0 extension
            files.append( f )
            with logger( f ) as l:
                l.write( { 40001: count - e }, now=now - e )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of  some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    with open( f, 'rb' ) as rd:
                        fd.write( rd.read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        # Attempt to begin loading history around the middle of the recording
        rdr		= reader( path,
                                  historical=now - random.uniform( 3.0, 9.0 ),
                                  basis=now + random.uniform( -.5, +.5 ),
                                  factor=3 )

        # Begin with the first historical file before our computed advancing historical time (we
        # could provide a specific timestamp here, if we wanted).  No lookahead.
        ts_l		= None
        f_l		= None
        after		= False # only first open is "before"; rest are "after"
        strict		= False # only goes false when timestamp increases in the same file
        deadline	= now + count
        while timer() <= deadline:
            # open next file beginning after the last ts
            o		= rdr.open( target=ts_l, after=after, strict=strict ) # Generator; doesn't do much here...
            after	= True
            strict	= True
            for (f,l,cur),(ts,js) in o: # raises HistoryExhausted on open() generator failure
                assert ts_l is None or ts >= ts_l, \
                    "Historical record out of sequence; %s isn't >= %s" % ( ts, ts_l )
                ts_l	= ts
                if js is None:
                    logging.info( "@%s: not yet available", ts )
                    assert ts > cur, "Next record should have been returned; not in future"
                    time.sleep( .1 )
                else:
                    logging.normal( "@%s: %r", ts, js )
                    assert ts <= cur, "Next record shouldn't have been returned; yet future"
                    if f == f_l and ts > ts_l:
                        strict = False
                f_l,ts_l= f,ts
        assert False, "Should have raised HistoryExhausted by now"
    except HistoryExhausted as exc:
        logging.normal( "History exhausted: %s", exc )

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
コード例 #20
0
ファイル: modbus_poll.py プロジェクト: gregwjacobs/cpppo
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""\

    Register range(s) and value(s) must be supplied:
    
      <begin>[-<end>]
    
    EXAMPLE
    
      modbus_poll --address localhost:7502 40001-40100
    
    """)
    parser.add_argument('-v',
                        '--verbose',
                        default=0,
                        action="count",
                        help="Display logging information.")
    parser.add_argument('-l',
                        '--log',
                        type=str,
                        default=None,
                        help="Direct log output to the specified file")
    parser.add_argument(
        '-a',
        '--address',
        default="0.0.0.0:502",
        help="Default [interface][:port] to bind to (default: any, port 502)")
    parser.add_argument(
        '-r',
        '--reach',
        default=1,
        help="Merge polls within <reach> registers of each-other")
    parser.add_argument('-R', '--rate', default=1.0, help="Target poll rate")
    parser.add_argument('registers', nargs="+")
    args = parser.parse_args()

    # Deduce logging level and target file (if any)
    levelmap = {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
    }
    cpppo.log_cfg['level'] = (levelmap[args.verbose]
                              if args.verbose in levelmap else logging.DEBUG)
    if args.log:
        cpppo.log_cfg['filename'] = args.log
    logging.basicConfig(**cpppo.log_cfg)

    # (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple
    # like ("interface",502)
    address = None
    if args.address:
        address = args.address.split(':')
        assert 1 <= len(address) <= 2
        address = (str(address[0]),
                   int(address[1]) if len(address) > 1 else Defaults.Port)
        log.info("--address '%s' produces address=%r" %
                 (args.address, address))

    # Start the PLC poller

    poller = poller_modbus("Modbus/TCP",
                           host=address[0],
                           port=address[1],
                           reach=int(args.reach),
                           rate=float(args.rate))

    for r in args.registers:
        rng = r.split('-')
        beg, cnt = int(
            rng[0]), int(rng[1]) - int(rng[0]) + 1 if len(rng) else 1
        for reg in range(beg, beg + cnt):
            poller.poll(reg)

    load = ''
    fail = ''
    poll = ''
    regs = {}
    while True:
        loadcur = "%.2f" % (poller.load[0] if poller.load[0] else 0)
        if loadcur != load:
            load = loadcur
            logging.detail("load: %s", loadcur)
        failcur = ", ".join([("%d-%d" % (b, b + c - 1))
                             for b, c in poller.failing])
        pollcur = ", ".join([("%d-%d" % (b, b + c - 1))
                             for b, c in poller.polling])
        if (failcur != fail or pollcur != poll):
            fail, poll = failcur, pollcur
            logging.normal("failing: %s, polling: %s", fail, poll)
        # log data changes
        for beg, cnt in poller.polling:
            for reg in range(beg, beg + cnt):
                val = poller.read(reg)
                old = regs.get(reg)  # may be None
                if val != old:
                    logging.warning("%5d == %5d (was: %s)" % (reg, val, old))
                    regs[reg] = val

        time.sleep(1)
コード例 #21
0
    def request(self, data, addr=None):
        """Any exception should result in a reply being generated with a non-zero status."""

        # See if this request is for us; if not, route to the correct Object, and return its result.
        # If the resolution/lookup fails (eg. bad symbolic Tag); ignore it (return False on error)
        # and continue processing, so we can return a proper .status error code from the actual
        # request, below.
        target = self.route(data, fail=Message_Router.ROUTE_FALSE)
        if target:
            if log.isEnabledFor(logging.DETAIL):
                log.detail("%s Routing to %s: %s", self, target,
                           enip_format(data))
            return target.request(data, addr=addr)

        if log.isEnabledFor(logging.DETAIL):
            log.detail("%s Request: %s", self, enip_format(data))
        # This request is for this Object.

        # Pick out our services added at this level.  If not recognized, let superclass try; it'll
        # return an appropriate error code if not recognized.
        if (data.get('service') == self.RD_VAR_REQ
                or self.RD_VAR_CTX in data and data.setdefault(
                    'service', self.RD_VAR_REQ) == self.RD_VAR_REQ):
            # Read Dynamic Variable --> Read Dynamic Variable Reply.
            pass
        elif (data.get('service') == self.RD_STS_REQ
              or self.RD_STS_CTX in data and data.setdefault(
                  'service', self.RD_STS_REQ) == self.RD_STS_REQ):
            # Read Additional Status --> Read Additional Status Reply.
            pass
        elif (data.get('service') == self.RD_INF_REQ
              or self.RD_INF_CTX in data and data.setdefault(
                  'service', self.RD_INF_REQ) == self.RD_INF_REQ):
            # Get Device Info --> Get Device Info Reply.
            pass
        elif (data.get('service') == self.PT_INI_REQ
              or self.PT_INI_CTX in data and data.setdefault(
                  'service', self.PT_INI_REQ) == self.PT_INI_REQ):
            # Pass-thru Init --> Pass-thru Init Reply.
            pass
        elif (data.get('service') == self.PT_QRY_REQ
              or self.PT_QRY_CTX in data and data.setdefault(
                  'service', self.PT_QRY_REQ) == self.PT_QRY_REQ):
            # Pass-thru Query --> Pass-thru Query Reply.
            pass
        elif (data.get('service') == self.PT_FLQ_REQ
              or self.PT_FLQ_CTX in data and data.setdefault(
                  'service', self.PT_FLQ_REQ) == self.PT_FLQ_REQ):
            # Pass-thru Flush Queue --> Pass-thru Flush Queue Reply.
            pass
        else:
            # Not recognized; more generic command?
            return super(HART, self).request(data, addr=addr)

        # It is a recognized HART Object request.  Set the data.status to the appropriate error
        # code, should a failure occur at that location during processing.  We will be returning a
        # reply beyond this point; any exceptions generated will be captured, logged and an
        # appropriate reply .status error code returned.

        if not hasattr(self, 'hart_command'):
            self.hart_command = None  # Any HART Pass-thru command in process: None or (<command>,<command_data)

        def fldnam_attribute(typ, fldnam, dfl):
            insnam = "HART_{channel}_Data".format(channel=self.instance_id - 1)
            tag = '.'.join((insnam, fldnam))
            res = resolve_tag(tag)
            if not res:
                # Not found; create one.  Use Class ID 0xF35D, same Instance ID as self.
                # No one else should be creating Instances of this Class ID...
                clsid = HART_Data.class_id
                insid = self.instance_id
                obj = lookup(clsid, insid)
                if not obj:
                    obj = HART_Data(insnam, instance_id=insid)
                att = Attribute_print(name=tag, type_cls=typ,
                                      default=dfl)  # eg. 'PV', REAL
                attid = 0
                if obj.attribute:
                    attid = int(sorted(obj.attribute, key=misc.natural)[-1])
                attid += 1
                obj.attribute[str(attid)] \
                                = att
                log.normal(
                    "%-24s Instance %3d, Attribute %3d added: %s (Tag: %s)",
                    obj, insid, attid, att, tag)
                res = redirect_tag(tag, {
                    'class': clsid,
                    'instance': insid,
                    'attribute': attid
                })
                assert resolve_tag( tag ) == res, \
                    "Failed to create '{tag}' Tag pointing to {res!r}; found: {out!r}".format(
                        tag=tag, res=res, out=resolve_tag( tag ))
            # res is a (clsid,insid,attid) of an Attribute containing this fldnam's data.
            attribute = lookup(*res)
            return attribute

        data.service |= 0x80
        data.status = 0x08  # Service not supported, if not recognized or fail to access
        try:
            if data.service == self.RD_VAR_RPY:
                data.read_var = dotdict()
                for typ, fldnam, dfl in self.RD_VAR_RPY_FLD:
                    attribute = fldnam_attribute(typ, fldnam, dfl)
                    data.read_var[fldnam] = attribute[0]
                    logging.detail("%s <-- %s == %s", fldnam, attribute,
                                   data.read_var[fldnam])
                data.read_var.status = 0x00
                data.status = 0
            elif data.service == self.PT_INI_RPY:
                # Actually store the command, return a proper handle.  The status is actually a HART
                # command result code where 33 means initiated.  Unlike a real HART I/O card, we'll
                # just discard any previous HART pass-thru command (we don't have a stack).
                data.init.handle = 99
                data.init.queue_space = 200
                if self.hart_command:
                    data.init.status = random.choice(
                        (32, 33))  # 32 busy, 33 initiated, 35 device offline
                    if data.init.status == 33:
                        self.hart_command = None
                else:
                    data.init.status = random.choice((33, 35))
                if self.hart_command is None and data.init.status == 33:
                    self.hart_command = data.init.command, data.init.get(
                        'command_data', [])
                logging.normal(
                    "%s: HART Pass-thru Init Command %r: %s", self,
                    self.hart_command, "busy" if data.init.status == 33 else
                    "initiated" if data.init.status == 32 else "unknown: %s" %
                    data.init.status)
                logging.detail("%s HART Pass-thru Init: %r", self, data)
                data.status = 0
            elif data.service == self.PT_QRY_RPY:
                # TODO: just return a single network byte ordered real, for now, as if its a HART
                # Read Primary Variable request.  We're returning the Input Tag version of the
                # pass-thru command (not the CIP version)
                data.query.reply_status = 0
                data.query.fld_dev_status = 0
                data.query.reply_data = []

                if self.hart_command is not None:
                    data.query.status = random.choice((0, 34, 34, 34))
                    data.query.command = self.hart_command[
                        0]  # ignore command_data
                else:
                    data.query.status = 35  # 0 success, 34 running, 35 dead
                    data.query.command = 0

                if self.hart_command and self.hart_command[
                        0] == 1 and data.query.status == 0:
                    # PV units code (unknown? not in Input Tag type command) + 4-byte PV REAL (network order)
                    attribute = fldnam_attribute(REAL, 'PV', 1.234)
                    val = attribute[0]
                    data.query.reply_data += [
                        b for b in bytearray(REAL_network.produce(val))
                    ]
                elif self.hart_command and self.hart_command[
                        0] == 2 and data.query.status == 0:
                    # current and percent of range.
                    attribute = fldnam_attribute(REAL, 'loop_current',
                                                 random.uniform(4, 20))
                    cur = attribute[0]
                    pct = 0.0 if cur < 4 else 100.0 if cur > 20 else (
                        cur - 4) / (20 - 4) * 100
                    data.query.reply_data += [
                        b for b in bytearray(REAL_network.produce(cur))
                    ]
                    data.query.reply_data += [
                        b for b in bytearray(REAL_network.produce(pct))
                    ]
                elif self.hart_command and self.hart_command[
                        0] == 3 and data.query.status == 0:
                    insnam = "HART_{channel}_Data".format(
                        channel=self.instance_id - 1)
                    for v in ('PV', 'SV', 'TV', 'FV'):
                        attribute = fldnam_attribute(REAL, v,
                                                     random.uniform(0, 1))
                        val = attribute[0]
                        data.query.reply_data += [
                            b for b in bytearray(REAL_network.produce(val))
                        ]
                data.query.reply_size = len(data.query.reply_data)
                logging.normal(
                    "%s: HART Pass-thru Query Command %r: %s", self,
                    self.hart_command, "success" if data.query.status == 0 else
                    "running" if data.query.status == 34 else
                    "dead" if data.query.status == 35 else "unknown: %s" %
                    data.query.status)

                if data.query.status in (0, 35):
                    self.hart_command = None
                logging.detail("%s HART Pass-thru Query: %r", self, data)
                data.status = 0
            else:
                assert False, "Not Implemented: {data!r}".format(data=data)

            # Success (data.status == 0x00), or failure w/ non-zero data.status

        except Exception as exc:
            # On Exception, if we haven't specified a more detailed error code, return General
            # Error.  Remember: 0x06 (Insufficent Packet Space) is a NORMAL response to a successful
            # Read Tag Fragmented that returns a subset of the requested data.
            log.normal(
                "%r Service 0x%02x %s failed with Exception: %s\nRequest: %s\n%s",
                self, data.service if 'service' in data else 0,
                (self.service[data.service] if 'service' in data
                 and data.service in self.service else "(Unknown)"), exc,
                enip_format(data),
                ('' if log.getEffectiveLevel() >= logging.NORMAL else ''.join(
                    traceback.format_exception(*sys.exc_info()))))
            assert data.status, \
                "Implementation error: must specify .status before raising Exception!"
            pass

        # Always produce a response payload; if a failure occurred, will contain an error status
        if log.isEnabledFor(logging.DETAIL):
            log.detail("%s Response: Service 0x%02x %s %s", self,
                       data.service if 'service' in data else 0,
                       (self.service[data.service] if 'service' in data
                        and data.service in self.service else "(Unknown)"),
                       enip_format(data))
        data.input = bytearray(self.produce(data))
        return True
コード例 #22
0
ファイル: main.py プロジェクト: ekw/cpppo
def main( argv=None, attribute_class=device.Attribute, identity_class=None, idle_service=None,
          **kwds ):
    """Pass the desired argv (excluding the program name in sys.arg[0]; typically pass argv=None, which
    is equivalent to argv=sys.argv[1:], the default for argparse.  Requires at least one tag to be
    defined.

    If a cpppo.apidict() is passed for kwds['server']['control'], we'll use it to transmit server
    control signals via its .done, .disable, .timeout and .latency attributes.

    Uses the provided attribute_class (default: device.Attribute) to process all EtherNet/IP
    attribute I/O (eg. Read/Write Tag [Fragmented]) requests.  By default, device.Attribute stores
    and retrieves the supplied data.  To perform other actions (ie. forward the data to your own
    application), derive from device.Attribute, and override the __getitem__ and __setitem__
    methods.

    If an idle_service function is provided, it will be called after a period of latency between
    incoming requests.

    """
    global address
    global options
    global tags
    global srv_ctl
    global latency
    global timeout

    ap				= argparse.ArgumentParser(
        description = "Provide an EtherNet/IP Server",
        epilog = "" )

    ap.add_argument( '-v', '--verbose',
                     default=0, action="count",
                     help="Display logging information." )
    ap.add_argument( '-a', '--address',
                     default=( "%s:%d" % address ),
                     help="EtherNet/IP interface[:port] to bind to (default: %s:%d)" % (
                         address[0], address[1] ))
    ap.add_argument( '-p', '--print', default=False, action='store_true',
                     help="Print a summary of operations to stdout" )
    ap.add_argument( '-l', '--log',
                     help="Log file, if desired" )
    ap.add_argument( '-w', '--web',
                     default="",
                     help="Web API [interface]:[port] to bind to (default: %s, port 80)" % (
                         address[0] ))
    ap.add_argument( '-d', '--delay',
                     help="Delay response to each request by a certain number of seconds (default: 0.0)",
                     default="0.0" )
    ap.add_argument( '-s', '--size',
                     help="Limit EtherNet/IP encapsulated request size to the specified number of bytes (default: None)",
                     default=None )
    ap.add_argument( '-P', '--profile',
                     help="Output profiling data to a file (default: None)",
                     default=None )
    ap.add_argument( 'tags', nargs="+",
                     help="Any tags, their type (default: INT), and number (default: 1), eg: tag=INT[1000]")

    args			= ap.parse_args( argv )

    # Deduce interface:port address to bind, and correct types (default is address, above)
    bind			= args.address.split(':')
    assert 1 <= len( bind ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    bind			= ( str( bind[0] ) if bind[0] else address[0],
                                    int( bind[1] ) if len( bind ) > 1 and bind[1] else address[1] )

    # Set up logging level (-v...) and --log <file>
    levelmap 			= {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )

    # Chain any provided idle_service function with log rotation; these may (also) consult global
    # signal flags such as logrotate_request, so execute supplied functions before logrotate_perform
    idle_service		= [ idle_service ] if idle_service else []
    if args.log:
        # Output logging to a file, and handle UNIX-y log file rotation via 'logrotate', which sends
        # signals to indicate that a service's log file has been moved/renamed and it should re-open
        cpppo.log_cfg['filename']= args.log
        signal.signal( signal.SIGHUP, logrotate_request )
        idle_service.append( logrotate_perform )

    logging.basicConfig( **cpppo.log_cfg )


    # Pull out a 'server.control...' supplied in the keywords, and make certain it's a
    # cpppo.apidict.  We'll use this to transmit control signals to the server thread.  Set the
    # current values to sane initial defaults/conditions.
    if 'server' in kwds:
        assert 'control' in kwds['server'], "A 'server' keyword provided without a 'control' attribute"
        srv_ctl			= cpppo.dotdict( kwds.pop( 'server' ))
        assert isinstance( srv_ctl['control'], cpppo.apidict ), "The server.control... must be a cpppo.apidict"
    else:
        srv_ctl.control		= cpppo.apidict( timeout=timeout )

    srv_ctl.control['done']	= False
    srv_ctl.control['disable']	= False
    srv_ctl.control.setdefault( 'latency', latency )

    # Global options data.  Copy any remaining keyword args supplied to main().  This could
    # include an alternative enip_process, for example, instead of defaulting to logix.process.
    options.update( kwds )

    # Specify a response delay.  The options.delay is another dotdict() layer, so it's attributes
    # (eg. .value, .range) are available to the web API for manipulation.  Therefore, they can be
    # set to arbitrary values at random times!  However, the type will be retained.
    def delay_range( *args, **kwds ):
        """If a delay.range like ".1-.9" is specified, then change the delay.value every second to something
        in that range."""
        assert 'delay' in kwds and 'range' in kwds['delay'] and '-' in kwds['delay']['range'], \
            "No delay=#-# specified"
        log.normal( "Delaying all responses by %s seconds", kwds['delay']['range'] )
        while True:
            # Once we start, changes to delay.range will be re-evaluated each loop
            time.sleep( 1 )
            try:
                lo,hi		= map( float, kwds['delay']['range'].split( '-' ))
                kwds['delay']['value'] = random.uniform( lo, hi )
                log.info( "Mutated delay == %g", kwds['delay']['value'] )
            except Exception as exc:
                log.warning( "No delay=#[.#]-#[.#] range specified: %s", exc )

    options.delay		= cpppo.dotdict()
    try:
        options.delay.value	= float( args.delay )
        log.normal( "Delaying all responses by %r seconds" , options.delay.value )
    except:
        assert '-' in args.delay, \
            "Unrecognized --delay=%r option" % args.delay
        # A range #-#; set up a thread to mutate the option.delay.value over the .range
        options.delay.range	= args.delay
        options.delay.value	= 0.0
        mutator			= threading.Thread( target=delay_range, kwargs=options )
        mutator.daemon		= True
        mutator.start()

    # Create all the specified tags/Attributes.  The enip_process function will (somehow) assign the
    # given tag name to reference the specified Attribute.  We'll define an Attribute to print
    # I/O if args.print is specified; reads will only be logged at logging.NORMAL and above.
    class Attribute_print( attribute_class ):
        def __getitem__( self, key ):
            value		= super( Attribute_print, self ).__getitem__( key )
            if log.isEnabledFor( logging.NORMAL ):
                print( "%20s[%5s-%-5s] == %s" % (
                    self.name, 
                    key.indices( len( self ))[0]   if isinstance( key, slice ) else key,
                    key.indices( len( self ))[1]-1 if isinstance( key, slice ) else key,
                    value ))
            return value

        def __setitem__( self, key, value ):
            super( Attribute_print, self ).__setitem__( key, value )
            print( "%20s[%5s-%-5s] <= %s" % (
                self.name, 
                key.indices( len( self ))[0]   if isinstance( key, slice ) else key,
                key.indices( len( self ))[1]-1 if isinstance( key, slice ) else key,
                value ))

    for t in args.tags:
        tag_name, rest		= t, ''
        if '=' in tag_name:
            tag_name, rest	= tag_name.split( '=', 1 )
        tag_type, rest		= rest or 'INT', ''
        tag_size		= 1
        if '[' in tag_type:
            tag_type, rest	= tag_type.split( '[', 1 )
            assert ']' in rest, "Invalid tag; mis-matched [...]"
            tag_size, rest	= rest.split( ']', 1 )
        assert not rest, "Invalid tag specified; expected tag=<type>[<size>]: %r" % t
        tag_type		= str( tag_type ).upper()
        typenames		= {"INT": parser.INT, "DINT": parser.DINT, "SINT": parser.SINT, "REAL": parser.REAL }
        assert tag_type in typenames, "Invalid tag type; must be one of %r" % list( typenames.keys() )
        tag_default		= 0.0 if tag_type == "REAL" else 0
        try:
            tag_size		= int( tag_size )
        except:
            raise AssertionError( "Invalid tag size: %r" % tag_size )

        # Ready to create the tag and its Attribute (and error code to return, if any).  If tag_size
        # is 1, it will be a scalar Attribute.  Since the tag_name may contain '.', we don't want
        # the normal dotdict.__setitem__ resolution to parse it; use plain dict.__setitem__.
        log.normal( "Creating tag: %s=%s[%d]", tag_name, tag_type, tag_size )
        tag_entry		= cpppo.dotdict()
        tag_entry.attribute	= ( Attribute_print if args.print else attribute_class )(
            tag_name, typenames[tag_type], default=( tag_default if tag_size == 1 else [tag_default] * tag_size ))
        tag_entry.error		= 0x00
        dict.__setitem__( tags, tag_name, tag_entry )

    # Use the Logix simulator by default (unless some other one was supplied as a keyword options to
    # main(), loaded above into 'options').  This key indexes an immutable value (not another
    # dotdict layer), so is not available for the web API to report/manipulate.
    options.setdefault( 'enip_process', logix.process )
    options.setdefault( 'identity_class', identity_class )

    # The Web API

    # Deduce web interface:port address to bind, and correct types (default is address, above).
    # Default to the same interface as we're bound to, port 80.  We'll only start if non-empty --web
    # was provided, though (even if it's just ':', to get all defaults).  Usually you'll want to
    # specify at least --web :[<port>].
    http			= args.web.split(':')
    assert 1 <= len( http ) <= 2, "Invalid --web [<interface>]:[<port>}: %s" % args.web
    http			= ( str( http[0] ) if http[0] else bind[0],
                                    int( http[1] ) if len( http ) > 1 and http[1] else 80 )


    if args.web:
        assert 'web' in sys.modules, "Failed to import web API module; --web option not available.  Run 'pip install web.py'"
        logging.normal( "EtherNet/IP Simulator Web API Server: %r" % ( http, ))
        webserver		= threading.Thread( target=web_api, kwargs={'http': http} )
        webserver.daemon	= True
        webserver.start()

        
    # The EtherNet/IP Simulator.  Pass all the top-level options keys/values as keywords, and pass
    # the entire tags dotdict as a tags=... keyword.  The server_main server.control signals (.done,
    # .disable) are also passed as the server= keyword.  We are using an cpppo.apidict with a long
    # timeout; this will block the web API for several seconds to allow all threads to respond to
    # the signals delivered via the web API.
    logging.normal( "EtherNet/IP Simulator: %r" % ( bind, ))
    kwargs			= dict( options, latency=latency, size=args.size, tags=tags, server=srv_ctl )

    tf				= network.server_thread
    tf_kwds			= dict()
    if args.profile:
        tf			= network.server_thread_profiling
        tf_kwds['filename']	= args.profile

    disabled			= False	# Recognize toggling between en/disabled
    while not srv_ctl.control.done:
        if not srv_ctl.control.disable:
            if disabled:
                logging.detail( "EtherNet/IP Server enabled" )
                disabled= False
            network.server_main( address=bind, target=enip_srv, kwargs=kwargs,
                                 idle_service=lambda: map( lambda f: f(), idle_service ),
                                 thread_factory=tf, **tf_kwds )
        else:
            if not disabled:
                logging.detail( "EtherNet/IP Server disabled" )
                disabled= True
            time.sleep( latency )            # Still disabled; wait a bit

    return 0
コード例 #23
0
import cpppo
#cpppo.log_cfg['level'] = logging.DETAIL
logging.basicConfig( **cpppo.log_cfg )

#from cpppo.server.enip.get_attribute import proxy_simple as device # MicroLogix
#from cpppo.server.enip.get_attribute import proxy as device	    # ControlLogix
from cpppo.server.enip.ab import powerflex_750_series as device	   # PowerFlex 750

# Optionally specify Powerflex DNS name or IP address, prefixed with '@':
host				= 'localhost'
if len( sys.argv ) > 1 and sys.argv[1].startswith( '@' ):
    host			= sys.argv.pop( 1 )[1:]

# Optionally specify velocity; defaults to 0:
velocity			= 0
if len( sys.argv ) > 1:
    velocity			= float( sys.argv.pop( 1 ))

param				= 'Motor Velocity = (REAL)%s' % ( velocity )
try:
    via				= device( host=host )
    with via: # establish gateway, detects Exception (closing gateway)
	val,			= via.write(
	    via.parameter_substitution( param ), checking=True )
    print( "%s: %-32s == %s" % ( time.ctime(), param, val ))
except Exception as exc:
    logging.detail( "Exception writing Parameter %s: %s, %s",
	param, exc, traceback.format_exc() )
    sys.exit( 1 )
コード例 #24
0
ファイル: history_test.py プロジェクト: srdgame/cpppo
def test_history_timestamp():
    """Test timestamp, ensuring comparison deals in UTC only.  Supports testing in local timezones:
    
        Canada/Edmonton		-- A generic, ambiguous DST/non-DST timezone
        MST			-- A DST-specific non-DST timezone
        UTC			-- UTC

    """
    trtab			= ( string 
                                    if sys.version_info[0] < 3
                                    else str ).maketrans( ":-.", "   " )

    def utc_strp( loctime ):
        if '.' in loctime:
            unaware		= datetime.datetime.strptime( loctime, timestamp._fmt + ".%f" )
        else:
            unaware		= datetime.datetime.strptime( loctime, timestamp._fmt )
        return pytz.utc.localize( unaware )

    def utc_trns( loctime ):
        terms			= loctime.translate( trtab ).split()
        if len( terms ) == 7:
            # convert .123 into 123000 microseconds
            terms[6]               += '0' * ( 6 - len( terms[6] ))
        return datetime.datetime( *map( int, terms ), tzinfo=pytz.utc )

    # Basic millisecond hygiene.  Comparisons are by standard UTC format to 3 sub-second decimal
    # places of precision.  Unfortunately, the Python 2/3 strftime microsecond formatters are
    # different, so we don't use them.  If no precision, we do NOT round; we truncate, to avoid the
    # surprising effect of formatting a UNIX value manually using strftime produces a different
    # second than formatting it using render() with no sub-second precision.
    assert timestamp( 1399326141.999836 ) >= timestamp( 1399326141.374836 )
    assert timestamp( 1399326141.999836 ).render( ms=False ) == '2014-05-05 21:42:21'
    assert timestamp( 1399326141.999836 ).render( ms=5 ) == '2014-05-05 21:42:21.99984'
    assert timestamp( 1399326141.999836 ).render() == '2014-05-05 21:42:22.000'

    # Type caste support
    assert abs( float( timestamp( 1399326141.999836 )) - 1399326141.999836 ) < 1e-6
    assert int( timestamp( 1399326141.999836 )) == 1399326141

    # Adjust timestamp default precision and comparison epsilon.
    save			= timestamp._precision,timestamp._epsilon
    try:
        ts			= timestamp( 1399326141.999836 )
        for p in range( 0, 7 ):
            timestamp._precision= p
            timestamp._epsilon	= 10**-p if p else 0

            assert ts.render( ms=True ) == {
                0: '2014-05-05 21:42:21', # Truncates at 0 digits of sub-second precision
                1: '2014-05-05 21:42:22.0',
                2: '2014-05-05 21:42:22.00',
                3: '2014-05-05 21:42:22.000',
                4: '2014-05-05 21:42:21.9998',
                5: '2014-05-05 21:42:21.99984',
                6: '2014-05-05 21:42:21.999836',
            }[timestamp._precision]
            # For p == 0, try exact precision.  1e-6 is the smallest delta that can be reliably
            # added to a typical UNIX timestamp (eg.  1399326141.999836) in a double and still
            # expect it to affect the value (can store 15-17 decimal digits of precision).
            s,l			= (timestamp._epsilon*f for f in (0.9,1.1)) if p else (0,10**-6)
            assert     ts == ts + s
            assert     ts == ts - s
            assert not(ts == ts + l)
            assert not(ts == ts - l)
            assert     ts != ts + l
            assert     ts != ts - l
            assert not(ts <  ts + s)
            assert not(ts <  ts - s)
            assert     ts <  ts + l
            assert not(ts <  ts - l)
            assert     ts <= ts + s
            assert     ts <= ts - s
            assert     ts <= ts + l
            assert not(ts <= ts - l)
            assert not(ts >  ts + s)
            assert not(ts >  ts - s)
            assert not(ts >  ts + l)
            assert     ts >  ts - l
            assert     ts >= ts + s
            assert     ts >= ts - s
            assert not(ts >= ts + l)
            assert     ts >= ts - l
    finally:
        timestamp._precision,timestamp._epsilon = save


    # Maintain DST specificity when rendering in DST-specific timezones?  Nope, only when using
    # specially constructed non-DST versions of timezones, when they are made available by pytz.
    timestamp.support_abbreviations( None, reset=True )

    assert timestamp.timezone_info('MST') == (pytz.timezone( 'MST' ),None)
    assert timestamp( 1399326141.999836 ).render(
        tzinfo='MST', ms=False )		== '2014-05-05 14:42:21 MST'

    # Get MST/MDT etc., and CET/CEST abbreviations
    timestamp.support_abbreviations( ['CA','Europe/Berlin'], reset=True )

    assert timestamp.timezone_info('MST') == (pytz.timezone( 'America/Edmonton' ),False)
    assert timestamp( 1399326141.999836 ).render(
        tzinfo='MST', ms=False )		== '2014-05-05 15:42:21 MDT'


    # $ TZ=UTC date --date=@1388559600
    # Wed Jan  1 07:00:00 UTC 2014
    # 1396531199
    # Thu Apr  3 07:19:59 MDT 2014
    assert '2014-01-02 03:04:55.123'.translate( trtab ) == '2014 01 02 03 04 55 123'

    cnt				= 10000
    beg				= timer()
    for _ in range( cnt ):
        utc1			= utc_strp( '2014-01-02 03:04:55.123' )
    dur1			= timer() - beg
    beg				= timer()
    for _ in range( cnt ):
        utc2			= utc_trns( '2014-01-02 03:04:55.123' )
    dur2			= timer() - beg
    beg				= timer()
    for _ in range( cnt ):
        utc3			= timestamp.datetime_from_string( '2014-01-02 03:04:55.123' )
    dur3			= timer() - beg
    assert utc1.strftime( timestamp._fmt ) \
        == utc2.strftime( timestamp._fmt ) \
        == utc3.strftime( timestamp._fmt ) == '2014-01-02 03:04:55'
    logging.detail( "strptime: %d/s, translate: %d/s, timestamp: %d/s", cnt/dur1, cnt/dur2, cnt/dur3 )

    now				= timer()
    assert timestamp( now ) < timestamp( now + 1 )

    # From a numeric timestamp
    ts				= timestamp( 1396531199 )
    assert ts.utc	== '2014-04-03 13:19:59.000' == str( ts )

    assert ts.local	in ( '2014-04-03 07:19:59 MDT',
                             '2014-04-03 06:19:59 MST',
                             '2014-04-03 13:19:59 UTC' )

    # From a string UTC time
    dt				= timestamp.datetime_from_string( '2014-01-01 07:00:00.0' )
    assert str( dt )	== '2014-01-01 07:00:00+00:00'
    assert repr( dt )	== 'datetime.datetime(2014, 1, 1, 7, 0, tzinfo=<UTC>)'
    #assert dt.strftime( '%s' ) != '1388559600' # !? (will fail if machine is in UTC timezone )
    #assert pytz.utc.normalize( dt ).strftime( '%s' ) != '1388559600' # !?
    assert 1388559559.999999 < timestamp.number_from_datetime( dt ) < 1388559600.000001 # ok
    ts				= timestamp( '2014-01-01 07:00:00.0' )
    assert  1388559559.999999 < ts.value < 1388559600.000001
    assert ts.utc	== '2014-01-01 07:00:00.000' == str( ts )
    assert ts.local	in ( '2014-01-01 00:00:00 MST',
                             '2014-01-01 07:00:00 UTC' )

    # OK, now try a UTC time where the local timezone is in MDT
    ts.utc			= '2014-04-01 07:00:00.000'
    assert ts.local	in ( '2014-04-01 01:00:00 MDT',
                             '2014-04-01 00:00:00 MST',
                             '2014-04-01 07:00:00 UTC' )

    # Make sure that local times are unambiguous over daylight savings time
    # Mar 9 02:00 -> 03:00    1394355540 == Mar 9 2014 01:59
    # Nov 2 02:00 -> 01:00    1414915140 == Nov 2 2014 01:59
    ts				= timestamp( 1394355540 )
    assert ts.local	in ( '2014-03-09 01:59:00 MST',
                             '2014-03-09 08:59:00 UTC' )
    ts			       += 61
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )

    ts				= timestamp( 1414915140 )
    assert ts.local	in ( '2014-11-02 01:59:00 MDT',
                             '2014-11-02 00:59:00 MST',
                             '2014-11-02 07:59:00 UTC' )
    ts			       += 61
    assert ts.local	in ( '2014-11-02 01:00:01 MST',
                             '2014-03-09 02:00:01 MST',
                             '2014-11-02 08:00:01 UTC' )

    # Now try converting a few strings that have a specific timezone.  We can use either .utc =
    # ... or .local = ...; they just default to the UTC or (local) timezone, respectively.  Using a
    # DST-specific timezone such as MST/MDT, we can unambiguously specify whether a time is inside
    # or outside DST.
    try:
        ts.local		= '2014-03-09 02:00:01 America/Edmonton' # Just inside MDT 2014
        assert False, """Should have failed -- time doesn't exist during "spring ahead" """
    except Exception as exc:
        assert "NonExistentTimeError" in str( exc )
    ts.local			= '2014-03-09 03:00:01 MDT' # Just inside MDT 2014
    assert 1394355600.999999 < ts.value < 1394355601.000001
    assert ts.utc 	==   '2014-03-09 09:00:01.000' # MDT == UCT-6:00
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )
    # However, we CAN use a specifically non-DST timezone to specify times non-existent in DST
    ts.local			= '2014-03-09 02:00:01 MST' # No such time in MDT!!
    assert 1394355600.999999 < ts.value < 1394355601.000001
    assert ts.utc	==   '2014-03-09 09:00:01.000'
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )

    ts.local			= '2014-11-02 01:00:01 MST' # 1 second after the end of DST
    assert 1414915200.999999 < ts.value < 1414915201.000001
    assert ts.utc	==   '2014-11-02 08:00:01.000'
    assert ts.local	in ( '2014-11-02 01:00:01 MST',
                             '2014-11-02 00:59:59 MST',
                             '2014-11-02 08:00:01 UTC' )

    ts			       -= 2 # Go back 2 seconds, into DST
    assert ts.utc	==   '2014-11-02 07:59:59.000'
    assert ts.local	in ( '2014-11-02 01:59:59 MDT',
                             '2014-11-02 00:59:59 MST',
                             '2014-11-02 07:59:59 UTC' )

    ts.local			= '2014-11-02 01:59:58 MDT' # 2 seconds before end of DST
    assert 1414915197.999999 < ts.value < 1414915198.000001
    assert ts.utc	==   '2014-11-02 07:59:58.000'
    assert ts.local	in ( '2014-11-02 01:59:58 MDT',
                             '2014-11-02 00:59:58 MST',
                             '2014-11-02 07:59:58 UTC' )

    # Using a canonical timezone such as 'America/Edmonton', an "ambiguous" time (eg. during the
    # overlap in the fall) cannot be specified.  Using a DST-specific timezone, we can.
    try:
        ts.local		= '2014-11-02 01:00:01 America/Edmonton' # Inside DST?
    except Exception as exc:
        assert "AmbiguousTimeError" in str( exc )

    ts.local			= '2014-11-02 00:59:59 America/Edmonton' # 2 seconds before end of DST
    assert 1414911598.999999 < ts.value < 1414911599.000001
    assert ts.utc	==   '2014-11-02 06:59:59.000'
    assert ts.local	in ( '2014-11-02 00:59:59 MDT',
                             '2014-11-01 23:59:59 MST',
                             '2014-11-02 06:59:59 UTC' )

    after			= timestamp( '2014-11-02 01:02:03.123 MST' ) # (Nov 2 2014 -- 1:02 *after* DST ended)
    before			= timestamp( '2014-11-02 01:02:03.456 MDT' ) # (Nov 2 2014 --  :58 *before* DST ends)
    assert before < after
    assert before.utc	==   '2014-11-02 07:02:03.456'
    assert before.local	in ( '2014-11-02 01:02:03 MDT',
                             '2014-11-02 00:02:03 MST',
                             '2014-11-02 07:02:03 UTC' )
    assert after.utc	==   '2014-11-02 08:02:03.123'
    assert after.local	in ( '2014-11-02 01:02:03 MST',
                             '2014-11-02 08:02:03 UTC' )

    after			= timestamp( '2014-10-26 02:01:00.123 CET' )  # (Nov 26 2014 -- 1:02 *after* DST ended)
    before			= timestamp( '2014-10-26 02:01:00.456 CEST' ) # (Nov 26 2014 --  :58 *before* DST ends)
    assert before < after
    assert before.utc	==   '2014-10-26 00:01:00.456'
    assert before.local	in ( '2014-10-25 18:01:00 MDT',
                             '2014-10-25 17:01:00 MST',
                             '2014-10-26 00:01:00 UTC' )
    assert after.utc	==   '2014-10-26 01:01:00.123'
    assert after.local	in ( '2014-10-25 19:01:00 MDT',
                             '2014-10-25 18:01:00 MST',
                             '2014-10-26 01:01:00 UTC' )
コード例 #25
0
ファイル: modbus_test.py プロジェクト: Felipeasg/cpppo
def run_plc_modbus_polls( plc ):
    # Initial conditions (in case PLC is persistent between tests)
    plc.write(     1, 0 )
    plc.write( 40001, 0 )

    rate			= 1.0
    timeout			= 2 * rate 	# Nyquist
    intervals			= timeout / .05	#  w/ fixed .05s intervals
    wfkw			= dict( timeout=timeout, intervals=intervals )

    plc.poll( 40001, rate=rate )
    
    success,elapsed		= waitfor( lambda: plc.read( 40001 ) is not None, "40001 polled", **wfkw )
    assert success
    assert elapsed < 1.0
    assert plc.read( 40001 ) == 0
    
    assert plc.read(     1 ) == None
    assert plc.read( 40002 ) == None
    success,elapsed		= waitfor( lambda: plc.read( 40002 ) is not None, "40002 polled", **wfkw )
    assert success
    assert elapsed < 1.0
    assert plc.read( 40002 ) == 0
    success,elapsed		= waitfor( lambda: plc.read(     1 ) is not None, "00001 polled", **wfkw )
    assert success
    assert elapsed < 1.0
    assert plc.read(     1 ) == 0

    # Now add a bunch of new stuff to poll, and ensure polling occurs.  As we add registers the
    # number of distinct poll ranges will increase, and then decrease as we in-fill and the
    # inter-register range drops below the merge reach 10, allowing the polling to merge ranges.
    # Thus, keep track of the number of registers added, and allow
    # 
    # avg. 
    # poll
    # time
    #  
    #   |
    #   |
    # 4s|         ..
    # 3s|        .  .
    # 2s|     ...    ...
    # 1s|.....          .......
    #  -+----------------------------------
    #   |  10  20  30  40   regs

    # We'll be overwhelming the poller, so it won't be able to poll w/in the target rate, so we'll
    # need to more than double the Nyquist-rate timeout
    wfkw['timeout']	       *= 2.5
    wfkw['intervals']	       *= 2.5
    
    regs			= {}
    extent			= 100 # how many each of coil/holding registers
    total			= extent*2 # total registers in play
    elapsed			= None
    rolling			= None
    rolling_factor		= 1.0/5	# Rolling exponential moving average over last ~8 samples

    # Keep increasing the number of registers polled, up to 1/2 of all registers
    while len( regs ) < total * 50 // 100:
        # Always select a previously unpolled register; however, it might
        # have already been in a merge range; if so, get its current value
        # so we mutate it (forcing it to be re-polled)
        base			= 40001 if random.randint( 0, 1 ) else 1
        r			= None
        while r is None or r in regs:
            r			= random.randint( base, base + extent )
        v			= plc.read( r )
        if v is not None:
            logging.detail( "New reg %5d was already polled due to reach=%d", r, plc.reach )
            regs[r]		= v
        regs[r]			= ( regs[r] ^ 1 if r in regs
                                else random.randint( 0, 65535 ) if base > 40000
                                else random.randint( 0, 1 ) )

        plc.write( r, regs[r] )
        plc.poll( r )
        if len( regs ) > total * 10 // 100:
            # skip to the good parts...  After 10% of all registers are being polled, start
            # calculating.  See how long it takes, on average, to get the newly written register
            # value polled back.
            success,elapsed	= waitfor( lambda: plc.read( r ) == regs[r], "polled %5d == %5d" % ( r, regs[r] ), **wfkw )
            assert success
            rolling		= misc.exponential_moving_average( rolling, elapsed, rolling_factor )

        logging.normal( "%3d/%3d regs: polled %3d ranges w/in %7.3fs. Polled %5d == %5d w/in %7.3fs: avg. %7.3fs (load %3.2f, %3.2f, %3.2f)",
                         len( regs ), total, len( plc.polling ), plc.duration,
                         r, regs[r], elapsed or 0.0, rolling or 0.0, *[misc.nan if load is None else load for load in plc.load] )

        if len( regs ) > total * 20 // 100:
            # after 20%, start looking for the exit (ranges should merge, poll rate fall )
            if rolling < plc.rate:
                break

    assert rolling < plc.rate, \
        "Rolling average poll cycle %7.3fs should have fallen below target poll rate %7.3fs" % ( rolling, plc.rate )

    for r,v in regs.items():
        assert plc.read( r ) == v
コード例 #26
0
ファイル: history_test.py プロジェクト: srdgame/cpppo
def test_history_unparsable():
    """Test history files rendered unparsable due to dropouts.  This should be handled with no problem
    except if the initial frame of register data on the first file is missing.

    """
    for _ in range( 3 ):
        path		= "/tmp/test_unparsable_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases, containing records that are invalid.
        now		= timer()
        v		= 10000
        secs		= 10
        secs_ext	=  1.0  # adjust range of history to target out by this +/-
        basisext	=   .5  # adjust start basis time from now by this +/-
        minfactor	=   .25
        maxfactor	=  2.0
        maxlatency	=   .25
        # 1/N file lines corrupted (kills 2 records; the current and following).  0 --> no errors
        maxerror	= random.choice( [ None, 3, 10, 100 ] )
        oldest		= None
        newest		= None
        logging.normal( "Corrupting %s of all history lines", None if not maxerror else "1/%d" % maxerror )
        for e in range( secs ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                ssend	= 100
                for ss in range( 0, ssend ): # subseconds up to but not including ssend...
                    js	= json.dumps( { 40001: v + e * 1000 + (ss * 1000 // ssend) } ) + '\n'
                    if maxerror and not random.randint( 0, maxerror ):
                        # Truncate some of the records (as would occur in a filesystem full or halt)
                        js = js[:random.randint( 0, len( js ) - 1)]
                    ts	= timestamp( now - e + ss/ssend )
                    if oldest is None or ts < oldest:
                        oldest = ts
                    if newest is None or ts > newest:
                        newest = ts
                    l._append( '\t'.join( (str( ts ),json.dumps( None ),js) ) )

        # Load the historical records.  This will be robust against all errors except if the first
        # line of the first history file opened is corrupt, and we therefore cannot get the initial
        # frame of register data.
        historical	= timestamp( now - random.uniform( -secs_ext, secs + secs_ext ))
        basisdelay	= random.uniform( -basisext, +basisext )
        basis		= now + basisdelay
        factor		= random.uniform( minfactor, maxfactor )
        lookahead	= 1.0
        on_bad_iframe	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        on_bad_data	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        logging.normal( "Playback starts %s (%.1f%%) of history %s-%s, in %.3fs, at x %.2f rate w/%.1fs lookahead, on_bad_iframe=%s, on_bad_data=%s",
                        historical, ( historical.value - oldest.value ) * 100 / ( newest.value - oldest.value ),
                        oldest, newest, basisdelay, factor, lookahead,
                        "SUPPRESS" if on_bad_iframe == loader.SUPPRESS else "FAIL" if on_bad_iframe  == loader.FAIL else "RAISE",
                        "SUPPRESS" if on_bad_data   == loader.SUPPRESS else "FAIL" if on_bad_data    == loader.FAIL else "RAISE" )

        ld		= loader( path,
                                historical=historical, basis=basis, factor=factor, lookahead=lookahead )
        dur		= basisext + ( secs_ext + secs + secs_ext ) / factor + basisext + 2*maxlatency # Don't be tooo strict
        beg		= timer()
        count		= 0

        while ld:
            assert timer() - beg < dur, "The loader should have ended"
            cur,events	= ld.load( on_bad_iframe=on_bad_iframe, on_bad_data=on_bad_data )
            count      += len( events )
            logging.normal( "%s loaded up to %s; %d future, %d values: %d events: %s",
                            ld, cur, len( ld.future ), len( ld.values ), len( events ), 
                            repr( events ) if logging.root.isEnabledFor( logging.DEBUG ) else reprlib.repr( events ))
            time.sleep( random.uniform( 0.0, maxlatency ))

        if on_bad_data == ld.FAIL or on_bad_iframe == ld.FAIL:
            assert ld.state in (ld.COMPLETE, ld.FAILED)
        else:
            assert ld.state == ld.COMPLETE

    except IframeError as exc:
        logging.warning( "Detected error on initial frame of registers in first history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED and count == 0, "Shouldn't have loaded any events -- only iframe failures expected"

    except DataError as exc:
        logging.warning( "Detected error on registers data in a history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        for f in files:
            if os.path.exists( f ):
                logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
            else:
                logging.warning( "%s: Couldn't find file", f )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
コード例 #27
0
ファイル: main.py プロジェクト: gregwjacobs/cpppo
def main(argv=None, **kwds):
    """Pass the desired argv (excluding the program name in sys.arg[0]; typically
    pass argv=None, which is equivalent to argv=sys.argv[1:], the default for
    argparse.  Requires at least one tag to be defined.

    If a cpppo.apidict() is passed for kwds['server']['control'], we'll use it
    to transmit server control signals via its .done, .disable, .timeout and
    .latency attributes.

    """
    global address
    global options
    global tags
    global srv_ctl
    global latency
    global timeout

    ap = argparse.ArgumentParser(description="Provide an EtherNet/IP Server",
                                 epilog="")

    ap.add_argument('-v',
                    '--verbose',
                    default=0,
                    action="count",
                    help="Display logging information.")
    ap.add_argument(
        '-a',
        '--address',
        default=("%s:%d" % address),
        help="EtherNet/IP interface[:port] to bind to (default: %s:%d)" %
        (address[0], address[1]))
    ap.add_argument('-l', '--log', help="Log file, if desired")
    ap.add_argument(
        '-w',
        '--web',
        default="",
        help="Web API [interface]:[port] to bind to (default: %s, port 80)" %
        (address[0]))
    ap.add_argument(
        '-d',
        '--delay',
        help=
        "Delay response to each request by a certain number of seconds (default: 0.0)",
        default="0.0")
    ap.add_argument('-p',
                    '--profile',
                    help="Output profiling data to a file (default: None)",
                    default=None)
    ap.add_argument(
        'tags',
        nargs="+",
        help=
        "Any tags, their type (default: INT), and number (default: 1), eg: tag=INT[1000]"
    )

    args = ap.parse_args(argv)

    # Deduce interface:port address to bind, and correct types (default is address, above)
    bind = args.address.split(':')
    assert 1 <= len(
        bind
    ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    bind = (str(bind[0]) if bind[0] else address[0],
            int(bind[1]) if len(bind) > 1 and bind[1] else address[1])

    # Set up logging level (-v...) and --log <file>
    levelmap = {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
    }
    cpppo.log_cfg['level'] = (levelmap[args.verbose]
                              if args.verbose in levelmap else logging.DEBUG)

    idle_service = None
    if args.log:
        # Output logging to a file, and handle UNIX-y log file rotation via 'logrotate', which sends
        # signals to indicate that a service's log file has been moved/renamed and it should re-open
        cpppo.log_cfg['filename'] = args.log
        signal.signal(signal.SIGHUP, logrotate_request)
        idle_service = logrotate_perform

    logging.basicConfig(**cpppo.log_cfg)

    # Pull out a 'server.control...' supplied in the keywords, and make certain it's a
    # cpppo.apidict.  We'll use this to transmit control signals to the server thread.  Set the
    # current values to sane initial defaults/conditions.
    if 'server' in kwds:
        assert 'control' in kwds[
            'server'], "A 'server' keyword provided without a 'control' attribute"
        srv_ctl = cpppo.dotdict(kwds.pop('server'))
        assert isinstance(
            srv_ctl['control'],
            cpppo.apidict), "The server.control... must be a cpppo.apidict"
    else:
        srv_ctl.control = cpppo.apidict(timeout=timeout)

    srv_ctl.control['done'] = False
    srv_ctl.control['disable'] = False
    srv_ctl.control.setdefault('latency', latency)

    # Global options data.  Copy any remaining keyword args supplied to main().  This could
    # include an alternative enip_process, for example, instead of defaulting to logix.process.
    options.update(kwds)

    # Specify a response delay.  The options.delay is another dotdict() layer, so it's attributes
    # (eg. .value, .range) are available to the web API for manipulation.  Therefore, they can be
    # set to arbitrary values at random times!  However, the type will be retained.
    def delay_range(*args, **kwds):
        """If a delay.range like ".1-.9" is specified, then change the delay.value every second to something
        in that range."""
        assert 'delay' in kwds and 'range' in kwds['delay'] and '-' in kwds['delay']['range'], \
            "No delay=#-# specified"
        log.normal("Delaying all responses by %s seconds",
                   kwds['delay']['range'])
        while True:
            # Once we start, changes to delay.range will be re-evaluated each loop
            time.sleep(1)
            try:
                lo, hi = map(float, kwds['delay']['range'].split('-'))
                kwds['delay']['value'] = random.uniform(lo, hi)
                log.info("Mutated delay == %g", kwds['delay']['value'])
            except Exception as exc:
                log.warning("No delay=#[.#]-#[.#] range specified: %s", exc)

    options.delay = cpppo.dotdict()
    try:
        options.delay.value = float(args.delay)
        log.normal("Delaying all responses by %r seconds", options.delay.value)
    except:
        assert '-' in args.delay, \
            "Unrecognized --delay=%r option" % args.delay
        # A range #-#; set up a thread to mutate the option.delay.value over the .range
        options.delay.range = args.delay
        options.delay.value = 0.0
        mutator = threading.Thread(target=delay_range, kwargs=options)
        mutator.daemon = True
        mutator.start()

    # Create all the specified tags/Attributes.  The enip_process function will (somehow) assign the
    # given tag name to reference the specified Attribute.
    for t in args.tags:
        tag_name, rest = t, ''
        if '=' in tag_name:
            tag_name, rest = tag_name.split('=', 1)
        tag_type, rest = rest or 'INT', ''
        tag_size = 1
        if '[' in tag_type:
            tag_type, rest = tag_type.split('[', 1)
            assert ']' in rest, "Invalid tag; mis-matched [...]"
            tag_size, rest = rest.split(']', 1)
        assert not rest, "Invalid tag specified; expected tag=<type>[<size>]: %r" % t
        tag_type = str(tag_type).upper()
        typenames = {
            "INT": parser.INT,
            "DINT": parser.DINT,
            "SINT": parser.SINT,
            "REAL": parser.REAL
        }
        assert tag_type in typenames, "Invalid tag type; must be one of %r" % list(
            typenames.keys())
        try:
            tag_size = int(tag_size)
        except:
            raise AssertionError("Invalid tag size: %r" % tag_size)

        # Ready to create the tag and its Attribute (and error code to return, if any).  If tag_size
        # is 1, it will be a scalar Attribute.
        log.normal("Creating tag: %s=%s[%d]", tag_name, tag_type, tag_size)
        tags[tag_name] = cpppo.dotdict()
        tags[tag_name].attribute = device.Attribute(
            tag_name,
            typenames[tag_type],
            default=(0 if tag_size == 1 else [0] * tag_size))
        tags[tag_name].error = 0x00

    # Use the Logix simulator by default (unless some other one was supplied as a keyword options to
    # main(), loaded above into 'options').  This key indexes an immutable value (not another dotdict
    # layer), so is not available for the web API to report/manipulate.
    options.setdefault('enip_process', logix.process)

    # The Web API

    # Deduce web interface:port address to bind, and correct types (default is address, above).
    # Default to the same interface as we're bound to, port 80.  We'll only start if non-empty --web
    # was provided, though (even if it's just ':', to get all defaults).  Usually you'll want to
    # specify at least --web :[<port>].
    http = args.web.split(':')
    assert 1 <= len(
        http) <= 2, "Invalid --web [<interface>]:[<port>}: %s" % args.web
    http = (str(http[0]) if http[0] else bind[0],
            int(http[1]) if len(http) > 1 and http[1] else 80)

    if args.web:
        assert 'web' in sys.modules, "Failed to import web API module; --web option not available.  Run 'pip install web.py'"
        logging.normal("EtherNet/IP Simulator Web API Server: %r" % (http, ))
        webserver = threading.Thread(target=web_api, kwargs={'http': http})
        webserver.daemon = True
        webserver.start()

    # The EtherNet/IP Simulator.  Pass all the top-level options keys/values as keywords, and pass
    # the entire tags dotdict as a tags=... keyword.  The server_main server.control signals (.done,
    # .disable) are also passed as the server= keyword.  We are using an cpppo.apidict with a long
    # timeout; this will block the web API for several seconds to allow all threads to respond to
    # the signals delivered via the web API.
    logging.normal("EtherNet/IP Simulator: %r" % (bind, ))
    kwargs = dict(options, latency=latency, tags=tags, server=srv_ctl)

    tf = network.server_thread
    tf_kwds = dict()
    if args.profile:
        tf = network.server_thread_profiling
        tf_kwds['filename'] = args.profile

    disabled = False  # Recognize toggling between en/disabled
    while not srv_ctl.control.done:
        if not srv_ctl.control.disable:
            if disabled:
                logging.detail("EtherNet/IP Server enabled")
                disabled = False
            network.server_main(address=bind,
                                target=enip_srv,
                                kwargs=kwargs,
                                idle_service=idle_service,
                                thread_factory=tf,
                                **tf_kwds)
        else:
            if not disabled:
                logging.detail("EtherNet/IP Server disabled")
                disabled = True
            time.sleep(latency)  # Still disabled; wait a bit

    return 0
コード例 #28
0
def run_plc_modbus_polls(plc):
    # Initial conditions (in case PLC is persistent between tests)
    plc.write(1, 0)
    plc.write(40001, 0)

    rate = 1.0
    timeout = 2 * rate  # Nyquist
    intervals = timeout / .05  #  w/ fixed .05s intervals
    wfkw = dict(timeout=timeout, intervals=intervals)

    plc.poll(40001, rate=rate)

    success, elapsed = waitfor(lambda: plc.read(40001) is not None,
                               "40001 polled", **wfkw)
    assert success
    assert elapsed < 1.0
    assert plc.read(40001) == 0

    assert plc.read(1) == None
    assert plc.read(40002) == None
    success, elapsed = waitfor(lambda: plc.read(40002) is not None,
                               "40002 polled", **wfkw)
    assert success
    assert elapsed < 1.0
    assert plc.read(40002) == 0
    success, elapsed = waitfor(lambda: plc.read(1) is not None, "00001 polled",
                               **wfkw)
    assert success
    assert elapsed < 1.0
    assert plc.read(1) == 0

    # Now add a bunch of new stuff to poll, and ensure polling occurs.  As we add registers the
    # number of distinct poll ranges will increase, and then decrease as we in-fill and the
    # inter-register range drops below the merge reach 10, allowing the polling to merge ranges.
    # Thus, keep track of the number of registers added, and allow
    #
    # avg.
    # poll
    # time
    #
    #   |
    #   |
    # 4s|         ..
    # 3s|        .  .
    # 2s|     ...    ...
    # 1s|.....          .......
    #  -+----------------------------------
    #   |  10  20  30  40   regs

    # We'll be overwhelming the poller, so it won't be able to poll w/in the target rate, so we'll
    # need to more than double the Nyquist-rate timeout
    wfkw['timeout'] *= 2.5
    wfkw['intervals'] *= 2.5

    regs = {}
    extent = 100  # how many each of coil/holding registers
    total = extent * 2  # total registers in play
    elapsed = None
    rolling = None
    rolling_factor = 1.0 / 5  # Rolling exponential moving average over last ~8 samples

    # Keep increasing the number of registers polled, up to 1/2 of all registers
    while len(regs) < total * 50 // 100:
        # Always select a previously unpolled register; however, it might
        # have already been in a merge range; if so, get its current value
        # so we mutate it (forcing it to be re-polled)
        base = 40001 if random.randint(0, 1) else 1
        r = None
        while r is None or r in regs:
            r = random.randint(base, base + extent)
        v = plc.read(r)
        if v is not None:
            logging.detail("New reg %5d was already polled due to reach=%d", r,
                           plc.reach)
            regs[r] = v
        regs[r] = (regs[r] ^ 1 if r in regs else random.randint(0, 65535)
                   if base > 40000 else random.randint(0, 1))

        plc.write(r, regs[r])
        plc.poll(r)
        if len(regs) > total * 10 // 100:
            # skip to the good parts...  After 10% of all registers are being polled, start
            # calculating.  See how long it takes, on average, to get the newly written register
            # value polled back.
            success, elapsed = waitfor(lambda: plc.read(r) == regs[r],
                                       "polled %5d == %5d" % (r, regs[r]),
                                       **wfkw)
            assert success
            rolling = misc.exponential_moving_average(rolling, elapsed,
                                                      rolling_factor)

        logging.normal(
            "%3d/%3d regs: polled %3d ranges w/in %7.3fs. Polled %5d == %5d w/in %7.3fs: avg. %7.3fs (load %3.2f, %3.2f, %3.2f)",
            len(regs), total, len(plc.polling), plc.duration, r, regs[r],
            elapsed or 0.0, rolling or 0.0,
            *[misc.nan if load is None else load for load in plc.load])

        if len(regs) > total * 20 // 100:
            # after 20%, start looking for the exit (ranges should merge, poll rate fall )
            if rolling < plc.rate:
                break

    assert rolling < plc.rate, \
        "Rolling average poll cycle %7.3fs should have fallen below target poll rate %7.3fs" % ( rolling, plc.rate )

    for r, v in regs.items():
        assert plc.read(r) == v
コード例 #29
0
def main(argv=None,
         attribute_class=device.Attribute,
         idle_service=None,
         identity_class=None,
         UCMM_class=None,
         message_router_class=None,
         connection_manager_class=None,
         **kwds):
    """Pass the desired argv (excluding the program name in sys.arg[0]; typically pass argv=None, which
    is equivalent to argv=sys.argv[1:], the default for argparse.  Requires at least one tag to be
    defined.

    If a cpppo.apidict() is passed for kwds['server']['control'], we'll use it to transmit server
    control signals via its .done, .disable, .timeout and .latency attributes.

    Uses the provided attribute_class (default: device.Attribute) to process all EtherNet/IP
    attribute I/O (eg. Read/Write Tag [Fragmented]) requests.  By default, device.Attribute stores
    and retrieves the supplied data.  To perform other actions (ie. forward the data to your own
    application), derive from device.Attribute, and override the __getitem__ and __setitem__
    methods.

    If an idle_service function is provided, it will be called after a period of latency between
    incoming requests.

    """
    global address
    global options
    global tags
    global srv_ctl
    global latency
    global timeout

    ap = argparse.ArgumentParser(description="Provide an EtherNet/IP Server",
                                 epilog="")

    ap.add_argument('-v',
                    '--verbose',
                    default=0,
                    action="count",
                    help="Display logging information.")
    ap.add_argument(
        '-a',
        '--address',
        default=("%s:%d" % address),
        help="EtherNet/IP interface[:port] to bind to (default: %s:%d)" %
        (address[0], address[1]))
    ap.add_argument('-p',
                    '--print',
                    default=False,
                    action='store_true',
                    help="Print a summary of operations to stdout")
    ap.add_argument('-l', '--log', help="Log file, if desired")
    ap.add_argument(
        '-w',
        '--web',
        default="",
        help="Web API [interface]:[port] to bind to (default: %s, port 80)" %
        (address[0]))
    ap.add_argument(
        '-d',
        '--delay',
        default="0.0",
        help=
        "Delay response to each request by a certain number of seconds (default: 0.0)"
    )
    ap.add_argument(
        '-s',
        '--size',
        help=
        "Limit EtherNet/IP encapsulated request size to the specified number of bytes (default: None)",
        default=None)
    ap.add_argument(
        '--route-path',
        default=None,
        help=
        "Route Path, in JSON, eg. %r (default: None); 0/false to accept only empty route_path"
        % (str(json.dumps(route_path_default))))
    ap.add_argument('-P',
                    '--profile',
                    default=None,
                    help="Output profiling data to a file (default: None)")
    ap.add_argument(
        'tags',
        nargs="+",
        help=
        "Any tags, their type (default: INT), and number (default: 1), eg: tag=INT[1000]"
    )

    args = ap.parse_args(argv)

    # Deduce interface:port address to bind, and correct types (default is address, above)
    bind = args.address.split(':')
    assert 1 <= len(
        bind
    ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    bind = (str(bind[0]) if bind[0] else address[0],
            int(bind[1]) if len(bind) > 1 and bind[1] else address[1])

    # Set up logging level (-v...) and --log <file>
    levelmap = {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
    }
    cpppo.log_cfg['level'] = (levelmap[args.verbose]
                              if args.verbose in levelmap else logging.DEBUG)

    # Chain any provided idle_service function with log rotation; these may (also) consult global
    # signal flags such as logrotate_request, so execute supplied functions before logrotate_perform
    idle_service = [idle_service] if idle_service else []
    if args.log:
        # Output logging to a file, and handle UNIX-y log file rotation via 'logrotate', which sends
        # signals to indicate that a service's log file has been moved/renamed and it should re-open
        cpppo.log_cfg['filename'] = args.log
        signal.signal(signal.SIGHUP, logrotate_request)
        idle_service.append(logrotate_perform)

    logging.basicConfig(**cpppo.log_cfg)

    # Pull out a 'server.control...' supplied in the keywords, and make certain it's a
    # cpppo.apidict.  We'll use this to transmit control signals to the server thread.  Set the
    # current values to sane initial defaults/conditions.
    if 'server' in kwds:
        assert 'control' in kwds[
            'server'], "A 'server' keyword provided without a 'control' attribute"
        srv_ctl = cpppo.dotdict(kwds.pop('server'))
        assert isinstance(
            srv_ctl['control'],
            cpppo.apidict), "The server.control... must be a cpppo.apidict"
    else:
        srv_ctl.control = cpppo.apidict(timeout=timeout)

    srv_ctl.control['done'] = False
    srv_ctl.control['disable'] = False
    srv_ctl.control.setdefault('latency', latency)

    # Global options data.  Copy any remaining keyword args supplied to main().  This could
    # include an alternative enip_process, for example, instead of defaulting to logix.process.
    options.update(kwds)

    # Specify a response delay.  The options.delay is another dotdict() layer, so it's attributes
    # (eg. .value, .range) are available to the web API for manipulation.  Therefore, they can be
    # set to arbitrary values at random times!  However, the type will be retained.
    def delay_range(*args, **kwds):
        """If a delay.range like ".1-.9" is specified, then change the delay.value every second to something
        in that range."""
        assert 'delay' in kwds and 'range' in kwds['delay'] and '-' in kwds['delay']['range'], \
            "No delay=#-# specified"
        log.normal("Delaying all responses by %s seconds",
                   kwds['delay']['range'])
        while True:
            # Once we start, changes to delay.range will be re-evaluated each loop
            time.sleep(1)
            try:
                lo, hi = map(float, kwds['delay']['range'].split('-'))
                kwds['delay']['value'] = random.uniform(lo, hi)
                log.info("Mutated delay == %g", kwds['delay']['value'])
            except Exception as exc:
                log.warning("No delay=#[.#]-#[.#] range specified: %s", exc)

    options.delay = cpppo.dotdict()
    try:
        options.delay.value = float(args.delay)
        log.normal("Delaying all responses by %r seconds", options.delay.value)
    except:
        assert '-' in args.delay, \
            "Unrecognized --delay=%r option" % args.delay
        # A range #-#; set up a thread to mutate the option.delay.value over the .range
        options.delay.range = args.delay
        options.delay.value = 0.0
        mutator = threading.Thread(target=delay_range, kwargs=options)
        mutator.daemon = True
        mutator.start()

    # Create all the specified tags/Attributes.  The enip_process function will (somehow) assign the
    # given tag name to reference the specified Attribute.  We'll define an Attribute to print
    # I/O if args.print is specified; reads will only be logged at logging.NORMAL and above.
    class Attribute_print(attribute_class):
        def __getitem__(self, key):
            value = super(Attribute_print, self).__getitem__(key)
            if log.isEnabledFor(logging.NORMAL):
                print("%20s[%5s-%-5s] == %s" %
                      (self.name, key.indices(len(self))[0] if isinstance(
                          key, slice) else key, key.indices(len(self))[1] -
                       1 if isinstance(key, slice) else key, value))
            return value

        def __setitem__(self, key, value):
            super(Attribute_print, self).__setitem__(key, value)
            print("%20s[%5s-%-5s] <= %s" %
                  (self.name, key.indices(len(self))[0] if isinstance(
                      key, slice) else key, key.indices(len(self))[1] -
                   1 if isinstance(key, slice) else key, value))

    for t in args.tags:
        tag_name, rest = t, ''
        if '=' in tag_name:
            tag_name, rest = tag_name.split('=', 1)
        tag_type, rest = rest or 'INT', ''
        tag_size = 1
        if '[' in tag_type:
            tag_type, rest = tag_type.split('[', 1)
            assert ']' in rest, "Invalid tag; mis-matched [...]"
            tag_size, rest = rest.split(']', 1)
        assert not rest, "Invalid tag specified; expected tag=<type>[<size>]: %r" % t
        tag_type = str(tag_type).upper()
        typenames = {
            "BOOL": (parser.BOOL, 0),
            "INT": (parser.INT, 0),
            "DINT": (parser.DINT, 0),
            "SINT": (parser.SINT, 0),
            "REAL": (parser.REAL, 0.0),
            "SSTRING": (parser.SSTRING, ''),
        }
        assert tag_type in typenames, "Invalid tag type; must be one of %r" % list(
            typenames)
        tag_class, tag_default = typenames[tag_type]
        try:
            tag_size = int(tag_size)
        except:
            raise AssertionError("Invalid tag size: %r" % tag_size)

        # Ready to create the tag and its Attribute (and error code to return, if any).  If tag_size
        # is 1, it will be a scalar Attribute.  Since the tag_name may contain '.', we don't want
        # the normal dotdict.__setitem__ resolution to parse it; use plain dict.__setitem__.
        log.normal("Creating tag: %s=%s[%d]", tag_name, tag_type, tag_size)
        tag_entry = cpppo.dotdict()
        tag_entry.attribute = (
            Attribute_print if args.print else attribute_class)(
                tag_name,
                tag_class,
                default=(tag_default if tag_size == 1 else [tag_default] *
                         tag_size))
        tag_entry.error = 0x00
        dict.__setitem__(tags, tag_name, tag_entry)

    # Use the Logix simulator and all the basic required default CIP message processing classes by
    # default (unless some other one was supplied as a keyword options to main(), loaded above into
    # 'options').  This key indexes an immutable value (not another dotdict layer), so is not
    # available for the web API to report/manipulate.  By default, we'll specify no route_path, so
    # any request route_path will be accepted.  Otherwise, we'll create a UCMM-derived class with
    # the specified route_path, which will filter only requests w/ the correct route_path.
    options.setdefault('enip_process', logix.process)
    if identity_class:
        options.setdefault('identity_class', identity_class)
    assert not UCMM_class or not args.route_path, \
        "Specify either a route-path, or a custom UCMM_class; not both"
    if args.route_path is not None:
        # Must be JSON, eg. '[{"link"...}]', or '0'/'false' to explicitly specify no route_path
        # accepted (must be empty in request)
        class UCMM_class_with_route(device.UCMM):
            route_path = json.loads(args.route_path)

        UCMM_class = UCMM_class_with_route
    if UCMM_class:
        options.setdefault('UCMM_class', UCMM_class)
    if message_router_class:
        options.setdefault('message_router_class', message_router_class)
    if connection_manager_class:
        options.setdefault('connection_manager_class',
                           connection_manager_class)

    # The Web API

    # Deduce web interface:port address to bind, and correct types (default is address, above).
    # Default to the same interface as we're bound to, port 80.  We'll only start if non-empty --web
    # was provided, though (even if it's just ':', to get all defaults).  Usually you'll want to
    # specify at least --web :[<port>].
    http = args.web.split(':')
    assert 1 <= len(
        http) <= 2, "Invalid --web [<interface>]:[<port>}: %s" % args.web
    http = (str(http[0]) if http[0] else bind[0],
            int(http[1]) if len(http) > 1 and http[1] else 80)

    if args.web:
        assert 'web' in sys.modules, "Failed to import web API module; --web option not available.  Run 'pip install web.py'"
        logging.normal("EtherNet/IP Simulator Web API Server: %r" % (http, ))
        webserver = threading.Thread(target=web_api, kwargs={'http': http})
        webserver.daemon = True
        webserver.start()

    # The EtherNet/IP Simulator.  Pass all the top-level options keys/values as keywords, and pass
    # the entire tags dotdict as a tags=... keyword.  The server_main server.control signals (.done,
    # .disable) are also passed as the server= keyword.  We are using an cpppo.apidict with a long
    # timeout; this will block the web API for several seconds to allow all threads to respond to
    # the signals delivered via the web API.
    logging.normal("EtherNet/IP Simulator: %r" % (bind, ))
    kwargs = dict(options,
                  latency=latency,
                  size=args.size,
                  tags=tags,
                  server=srv_ctl)

    tf = network.server_thread
    tf_kwds = dict()
    if args.profile:
        tf = network.server_thread_profiling
        tf_kwds['filename'] = args.profile

    disabled = False  # Recognize toggling between en/disabled
    while not srv_ctl.control.done:
        if not srv_ctl.control.disable:
            if disabled:
                logging.detail("EtherNet/IP Server enabled")
                disabled = False
            network.server_main(
                address=bind,
                target=enip_srv,
                kwargs=kwargs,
                idle_service=lambda: map(lambda f: f(), idle_service),
                thread_factory=tf,
                **tf_kwds)
        else:
            if not disabled:
                logging.detail("EtherNet/IP Server disabled")
                disabled = True
            time.sleep(latency)  # Still disabled; wait a bit

    return 0