Ejemplo n.º 1
0
 def check( self, predicate, deadline=None ):
     """Check if 'predicate' comes True before 'deadline', every self.rate seconds"""
     done			= predicate()
     while not done and ( deadline is None or cpppo.timer() < deadline ):
         time.sleep( self.rate if deadline is None
                     else min( self.rate, max( 0, deadline - cpppo.timer() )))
         done		= predicate()
     return done
Ejemplo n.º 2
0
    def complete( self, actuator=1, svoff=False, timeout=None ):
        """Ensure that any prior operation on the actuator is complete w/in timeout; return True iff the
        current operation is detected as being complete.

        According to the documentation, the absence of the X4B "INP" flag should indicate
        completion, (see LEC Modbus RTU op Manual.pdf, section 4.4).  However, this does not work,
        and the X48 "BUSY" flag seems to serve this purpose; perhaps it is a documentation error.
    
        If 'svoff' is True, we'll also turn off the servo (clear Y19_SVON) if we detect completion.

        """
        begin			= cpppo.timer()
        if timeout is None:
            timeout		= self.TIMEOUT
        unit			= self.unit( uid=actuator )
        # Loop on True/None; terminate only on False; X48_BUSY contains 0/False when complete
        complete		= self.check(
            predicate=lambda: unit.read( data.X48_BUSY.addr ) == False,
            deadline=None if timeout is None else begin + timeout )
        ( logging.warning if not complete else logging.detail )(
            "Complete: actuator %3d %s", actuator, "success" if complete else "failure" )
        if svoff and complete:
            logging.detail( "ServoOff: actuator %3d", actuator )
            unit.write( data.Y19_SVON.addr, 0 )
        return complete
Ejemplo n.º 3
0
    def _recv( self, size ):
        """On a receive timeout, closes the socket and raises a ConnectionException.  Otherwise,
        returns the available input"""
        if not self.socket:
            raise ConnectionException( self.__str_() )
        begun			= cpppo.timer()
        timeout			= self.timeout # This computes the remaining timeout available

        r, w, e			= select.select( [self.socket], [], [], timeout )
        if r:
            result		= super( ModbusTcpClientTimeout, self )._recv( size )
            log.debug( "Receive success in %7.3f/%7.3fs" % ( cpppo.timer() - begun, timeout ) )
            return result

        self.close()
        log.debug( "Receive failure in %7.3f/%7.3fs" % ( cpppo.timer() - begun, timeout ) )
        raise ConnectionException("Receive from (%s, %s) failed: Timeout" % (
                self.host, self.port ))
Ejemplo n.º 4
0
def test_smc_basic( simulated_actuator_1, simulated_actuator_2 ):

    command,address		= simulated_actuator_1
    command,address		= simulated_actuator_2

    positioner			= smc.smc_modbus()

    '''
    # Initiate polling of actuator 2
    assert positioner.status( actuator=2 )['current_position'] is None
    '''

    # Test polling of actuator 1
    status 			= None
    now				= cpppo.timer()
    while cpppo.timer() < now + 1 and (
            not status
            or status['current_position'] is None ):
        time.sleep( .1 )
        status			= positioner.status( actuator=1 )
    assert status['current_position'] == 0

    # Modify actuator 1 current position
    unit			= positioner.unit( uid=1 )
    unit.write( 40001 + 0x9000, 0x0000 )
    unit.write( 40001 + 0x9001, 0x3a98 )
    
    # make certain it gets polled correctly with updated value
    now				= cpppo.timer()
    status			= None
    while cpppo.timer() < now + 1 and (
            not status
            or status['current_position'] != 15000 ):
        time.sleep( .1 )
        status			= positioner.status( actuator=1 )
    assert status['current_position'] == 15000

    '''
    # but the unmodified actuator should still now be polling a 0...
    assert positioner.status( actuator=2 )['current_position'] is 0
    '''
    positioner.close()
Ejemplo n.º 5
0
    def connect(self):
        """Duplicate the functionality of connect (handling optional .source_address attribute added
        in pymodbus 1.2.0), but pass the computed remaining timeout.

        """
        if self.socket: return True
        log.debug( "Connecting to (%s, %s)" % ( self.host, self.port ))
        begun			= cpppo.timer()
        timeout			= self.timeout # This computes the remaining timeout available
        try:
            self.socket		= socket.create_connection( (self.host, self.port),
                                    timeout=timeout, source_address=getattr( self, 'source_address', None ))
        except socket.error as exc:
            log.debug('Connection to (%s, %s) failed: %s' % (
                self.host, self.port, exc ))
            self.close()
        finally:
            log.debug( "Connect completed in %.3fs" % ( cpppo.timer() - begun ))

        return self.socket != None
Ejemplo n.º 6
0
 def timeout( self ):
     """Returns the Defaults.Timeout, if no timeout = True|#.# (a hard timeout) has been specified."""
     if self._timeout in (None, True):
         log.debug( "Transaction timeout default: %.3fs" % ( Defaults.Timeout ))
         return Defaults.Timeout
     now		= cpppo.timer()
     eta		= self._started + self._timeout
     if eta > now:
         log.debug( "Transaction timeout remaining: %.3fs" % ( eta - now ))
         return eta - now
     log.debug( "Transaction timeout expired" )
     return 0
Ejemplo n.º 7
0
def logix_remote( count, svraddr, kwargs ):
  try:
    time.sleep(.25) # Wait for server to be established
    # Confirm that a known Register encodes as expected
    data			= cpppo.dotdict()
    data.enip			= {}
    data.enip.options		= 0
    data.enip.session_handle	= 0
    data.enip.status		= 0
    data.enip.sender_context	= {}
    data.enip.sender_context.input = bytearray( [0x00] * 8 )
    data.enip.CIP		= {}
    data.enip.CIP.register 	= {}
    data.enip.CIP.register.options 		= 0
    data.enip.CIP.register.protocol_version	= 1

    data.enip.input		= bytearray( enip.CIP.produce( data.enip ))
    data.input			= bytearray( enip.enip_encode( data.enip ))
    log.normal( "Register Request: %r" % data )
    
    assert bytes( data.input ) == rss_004_request

    # Try to Register a real session, followed by commands
    timeout			= 5

    begun			= cpppo.timer()
    cli				= client.client( host=svraddr[0], port=svraddr[1] )
    assert cli.writable( timeout=timeout )
    elapsed			= cpppo.timer() - begun
    log.normal( "Client Connected in  %7.3f/%7.3fs" % ( elapsed, timeout ))

    begun			= cpppo.timer()
    with cli:
        cli.register( timeout=timeout )
        data,elapsed		= client.await( cli, timeout=timeout )
    log.normal( "Client Register Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data )
    assert data is not None and 'enip.CIP.register' in data, "Failed to receive Register response"
    assert data.enip.status == 0, "Register response indicates failure: %s" % data.enip.status

    # Establish the EtherNet/IP "session handle" used by all further requests
    cli.session			= data.enip.session_handle

    start			= cpppo.timer()
    with cli:
        for _ in range( count ):
            begun		= cpppo.timer()
            cli.read( path=[{'symbolic': 'SCADA'}, {'element': 12}],
                      elements=201, offset=2, timeout=timeout )
            data,elapsed	= client.await( cli, timeout=timeout )
            log.normal( "Client ReadFrg. Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data )

    duration			= cpppo.timer() - start
    log.warning( "Client ReadFrg. Average %7.3f TPS (%7.3fs ea)." % ( count / duration, duration / count ))

    log.normal( "Signal shutdown w/ server.control in object %s", id( kwargs['server']['control'] ))
  finally:
    kwargs['server']['control'].done= True # Signal the server to terminate
Ejemplo n.º 8
0
    def timeout( self, timeout ):
        """When a self.timeout = True|0|#.# is specified, initiate a hard timeout around the following
        transaction(s).  This means that any connect and/or read/write (_recv) must complete within
        the specified timeout (Defaults.Timeout, if 'True' or 0), starting *now*.  Reset to default
        behaviour with self.timeout = None.

        """
        if timeout is None:
            self._started = None
            self._timeout = None
        else:
            self._started = cpppo.timer()
            self._timeout = ( Defaults.Timeout 
                              if ( timeout is True or timeout == 0 )
                              else timeout )
Ejemplo n.º 9
0
 def remember( self, what, last, chng ):
     """ All new events are prepended (inserted at index 0) to the ._events
     container, so newest events appear earliest. """
     if self._events is not None:
         level	= ( self._level( what, last, chng )
                         if hasattr( self._level, '__call__' )
                         else self._level )
         if level is not None and level >= 0: # may be +'ve/0/None, or -'ve (ignored)
             message	= ( self._formatter( what, last, chng )
                         if self._formatter
                         else "%s (was %s)" % ( cpppo.reprlib.repr( chng ), cpppo.reprlib.repr( last )))
             self._events.insert( 0, { 
                     "time":		cpppo.timer(),
                     "level":	level,
                     "group":	self._group,
                     "description":	self._descr,
                     "message":	message,
                     } )
Ejemplo n.º 10
0
def signal_service():
    """Service known signals.  When logging, default to logat NORMAL, but ensure the
    message is seen if higher (eg. WARNING).  Support being in unknown logging
    levels when in/decreasing.

    """
    global levelmap_change
    if levelmap_change:
        rootlog			= logging.getLogger()
        actual			= rootlog.getEffectiveLevel()
        closest			= min( logging_levelmap.values(), key=lambda x:abs(x-actual) )
        highest			= max( logging_levelmap.keys() )
        for i,lvl in logging_levelmap.items():
            if lvl == closest:
                key		= i + levelmap_change
                break
        desired			= logging_levelmap.get( key, logging.DEBUG if key > highest else logging.ERROR )
        if actual != desired:
            rootlog.setLevel( desired )
        levelmap_change		= 0

    global logrotate_signalled
    global uptime_signalled
    if logrotate_signalled:
        logrotate_signalled	= False
        uptime_signalled	= True

        rootlog			= logging.getLogger()
        actual			= rootlog.getEffectiveLevel()
        rootlog.log( max( logging.NORMAL, actual ), "Rotating log files due to signal" )
        for hdlr in logging.root.handlers:
            if isinstance( hdlr, logging.FileHandler ):
                hdlr.close()

    global uptime_basis
    if uptime_signalled:
        uptime_signalled	= False
        uptime			= cpppo.timer() - uptime_basis

        rootlog			= logging.getLogger()
        actual			= rootlog.getEffectiveLevel()
        rootlog.log( max( logging.NORMAL, actual ), "Uptime: %3d:%02d:%06.3f",
                     int( uptime // 3600 ), int( uptime % 3600 // 60 ), uptime % 60 )
Ejemplo n.º 11
0
 def _receive( self ):
     """ Receive any previously cached data, with a latency of roughly 1/2
     the specified polling rate"""
     now			= cpppo.timer()
     if self._polled + self.rate > now:
         return
     log.debug( "%s polled" % ( self.description ))
     self._polled       	+= self.rate
     for address, vlist in self._cache.items():
         while vlist:
             t, value	= vlist[0]
             if t > self._polled:
                 break
             # When offline, any value other than the last one written to the
             # PLC is lost; _store will discard if offline, logging with -x>
             # indicates that we'll never see it.
             if self.online or len( vlist ) > 1:
                 self._store( address, value )
             if not self.online:
                 break
             vlist.popleft()
Ejemplo n.º 12
0
 def events( self, since=None, purge=False ):
     """ A generator yielding the stream of relevant events (None if since is
     None, otherwise limited to those with a ["time"] strictly greater than
     'since'), optionally purging as we go; if we complete the iteration, our
     ._events will be replaced with the retain list.  This is not a property,
     because it doesn't return simple value, and we want to (optionally)
     purge the internal _events list, or provide a 'since' time."""
     unique		= set()
     retain		= []
     now	        = cpppo.timer()
     for e in self._events:
         if self._retain:
             if e["description"] not in unique:
                 unique.add( e["description"] )
             else:
                 if e["time"] + self._retain < now:
                     # old event, and we've seen this description before; skip
                     log.debug( "Purging event: %r" % ( e ))
                     continue
             retain.append( e )
         if since is not None and e["time"] > since:
             yield e
     if self._retain:
         self._events= retain
Ejemplo n.º 13
0
if __name__ == "__main__":
    ap				= argparse.ArgumentParser()
    ap.add_argument( '-d', '--depth',    default=0, help="Pipelining depth" )
    ap.add_argument( '-m', '--multiple', default=0, help="Multiple Service Packet size limit" )
    ap.add_argument( '-r', '--repeat',   default=1, help="Repeat requests this many times" )
    ap.add_argument( '-a', '--address',  default='localhost', help="Hostname of target Controller" )
    ap.add_argument( '-t', '--timeout',  default=None, help="I/O timeout seconds (default: None)" )
    ap.add_argument( 'tags', nargs='+', help="Tags to read/write" )
    args			= ap.parse_args()

    depth			= int( args.depth )
    multiple			= int( args.multiple )
    repeat			= int( args.repeat )
    operations			= client.parse_operations( args.tags * repeat )
    timeout			= None
    if args.timeout is not None:
        timeout			= float( args.timeout )

    with client.connector( host=args.address, timeout=timeout ) as conn:
        start			= cpppo.timer()
        num,idx			= -1,-1
        for num,(idx,dsc,op,rpy,sts,val) in enumerate( conn.pipeline(
                operations=operations, depth=depth,
                multiple=multiple, timeout=timeout )):
            print( "%s: %3d: %s" % ( timestamp(), idx, val ))
    
        elapsed			= cpppo.timer() - start
        print( "%3d operations using %3d requests in %7.2fs at pipeline depth %2s; %5.1f TPS" % (
            num+1, idx+1, elapsed, args.depth, num / elapsed ))
Ejemplo n.º 14
0
def failure( exc ):
    failed.append( (cpppo.timer(),str(exc)) )
Ejemplo n.º 15
0
    def _poller(self):
        """Asynchronously (ie. in another thread) poll all the specified registers, on the designated
        poll cycle.  Until we have something to do (self.rate isn't None), just wait.

        We'll log whenever we begin/cease polling any given range of registers.

        """
        target = cpppo.timer()
        while not self.done and logging:  # Module may be gone in shutting down
            # Poller is dormant 'til a non-None/zero rate and data specified
            if not self.rate or not self._data:
                time.sleep(.1)
                continue

            # Delay 'til poll target
            now = cpppo.timer()
            if now < target:
                time.sleep(target - now)
                now = cpppo.timer()

            # Ready for another poll.  Check if we've slipped (missed cycle(s)), and then compute
            # the next poll cycle target; this attempts to retain cadence.
            slipped = int((now - target) / self.rate)
            if slipped:
                log.normal("Polling slipped; missed %d cycles" % (slipped))
            target += self.rate * (slipped + 1)

            # Perform polls, re-acquiring lock between each poll to allow others
            # to interject.  We'll sort the known register addresses in _data,
            # merge ranges, read the values from the PLC, and store them in
            # _data.

            # TODO: Split on and optimize counts for differing multi-register
            # limits for Coils, Registers

            # WARN: list comprehension over self._data must be atomic, because
            # we don't lock, and someone could call read/poll, adding entries to
            # self._data between reads.  However, since merge's register ranges
            # are sorted, all self._data keys are consumed before the list is
            # iterated.
            rngs = set(merge(((a, 1) for a in self._data), reach=self.reach))
            succ = set()
            fail = set()
            busy = 0.0
            for address, count in rngs:
                with self.lock:
                    begin = cpppo.timer()
                    try:
                        # Read values; on success (no exception, something other
                        # than None returned), immediately take online;
                        # otherwise attempts to _store will be rejected.
                        value = self._read(address, count)
                        if not self.online:
                            self.online = True
                            log.critical(
                                "Polling: PLC %s online; success polling %s: %s"
                                % (self.description, address,
                                   cpppo.reprlib.repr(value)))
                        if (address, count) not in self.polling:
                            log.detail("Polling %6d-%-6d (%5d)" %
                                       (address, address + count - 1, count))
                        succ.add((address, count))
                        self._store(
                            address,
                            value)  # Handle scalar or list/tuple value(s)
                    except ModbusException as exc:
                        # Modbus error; Couldn't read the given range.  Only log
                        # the first time failure to poll this range is detected
                        fail.add((address, count))
                        if (address, count) not in self.failing:
                            log.warning("Failing %6d-%-6d (%5d): %s" %
                                        (address, address + count - 1, count,
                                         str(exc)))
                    except Exception as exc:
                        # Something else; always log
                        fail.add((address, count))
                        log.warning("Failing %6d-%-6d (%5d): %s" %
                                    (address, address + count - 1, count,
                                     traceback.format_exc()))
                    busy += cpppo.timer() - begin

                # Prioritize other lockers (ie. write).  Contrary to popular opinion, sleep(0) does
                # *not* effectively yield the current Thread's quanta, at least on Python 2.7.6!
                time.sleep(0.001)

            # We've already warned about polls that have failed; also log all
            # polls that have ceased (failed, or been replaced by larger polls)
            ceasing = self.polling - succ - fail
            for address, count in ceasing:
                log.info("Ceasing %6d-%-6d (%5d)" %
                         (address, address + count - 1, count))

            self.polling = succ
            self.failing = fail
            self.duration = busy

            # The "load" is computed by comparing the "duration" of the last poll vs. the target
            # poll rate (in seconds).  A load of 1.0 indicates the polls consumed exactly 100% of
            # the target rate.  Compute loads over approximately the last 1, 5 and 15 minutes worth
            # of polls.  The load is the proportion of the current poll rate that is consumed by
            # poll activity.  Even if the load < 1.0, polls may "slip" due to other (eg. write)
            # activity using PLC I/O capacity.
            load = (busy / self.rate) if self.rate > 0 else 1.0
            ppm = (60.0 / self.rate) if self.rate > 0 else 1.0
            self.load = tuple(
                cpppo.exponential_moving_average(cur, load, 1.0 /
                                                 (minutes * ppm))
                for minutes, cur in zip((1, 5, 15), self.load))

            # Finally, if we've got stuff to poll and we aren't polling anything successfully, and
            # we're not yet offline, warn and take offline, and then eport the completion of another
            # poll cycle.
            if self._data and not succ and self.online:
                log.critical("Polling: PLC %s offline" % (self.description))
                self.online = False
            self.counter += 1
Ejemplo n.º 16
0
def logix_remote( count, svraddr, kwargs ):
    time.sleep(.25)
    data			= cpppo.dotdict()
    data.enip			= {}
    data.enip.options		= 0
    data.enip.session_handle	= 0
    data.enip.status		= 0
    data.enip.sender_context	= {}
    data.enip.sender_context.input = bytearray( [0x00] * 8 )
    	#array.array( cpppo.type_bytes_array_symbol, "\x00" * 8 )
    data.enip.CIP		= {}
    data.enip.CIP.register 	= {}
    data.enip.CIP.register.options 		= 0
    data.enip.CIP.register.protocol_version	= 1

    data.enip.input		= bytearray( enip.CIP.produce( data.enip ))
    data.input			= bytearray( enip.enip_encode( data.enip ))
    log.normal( "Register Request: %r" % data )
    
    assert bytes( data.input ) == rss_004_request


    timeout			= 5

    begun			= cpppo.timer()
    cli				= client.client( host=svraddr[0], port=svraddr[1] )
    assert cli.writable( timeout=timeout )
    elapsed			= cpppo.timer() - begun
    log.normal( "Client Connected in  %7.3f/%7.3fs" % ( elapsed, timeout ))

    begun			= cpppo.timer()
    request			= cli.register( timeout=timeout )
    elapsed			= cpppo.timer() - begun
    log.normal( "Client Register Sent %7.3f/%7.3fs: %r" % ( elapsed, timeout, request ))
    for data in cli:
        elapsed			= cpppo.timer() - begun
        log.detail( "Client Register Resp %7.3f/%7.3fs: %r" % ( elapsed, timeout, data ))
        if data is None:
            if elapsed <= timeout:
                cli.readable( timeout=timeout - elapsed )
                continue
        break
    elapsed			= cpppo.timer() - begun
    log.normal( "Client Register Rcvd %7.3f/%7.3fs: %r" % ( elapsed, timeout, data ))
    assert data is not None and 'enip.CIP.register' in data, "Failed to receive Register response"
    assert data.enip.status == 0, "Register response indicates failure: %s" % data.enip.status

    cli.session			= data.enip.session_handle


    start			= cpppo.timer()
    for _ in range( count ):
        begun			= cpppo.timer()
        request			= cli.read( path=[{'symbolic': 'SCADA'}, {'element': 12}],
                                                elements=1, offset=0, timeout=timeout )
        elapsed			= cpppo.timer() - begun
        log.normal( "Client ReadFrg. Sent %7.3f/%7.3fs: %r" % ( elapsed, timeout, request ))
        for data in cli:
            elapsed		= cpppo.timer() - begun
            log.detail( "Client ReadFrg. Resp %7.3f/%7.3fs: %r" % ( elapsed, timeout, data ))
            if data is None:
                if elapsed <= timeout:
                    cli.readable( timeout=timeout - elapsed )
                    continue
            break
        elapsed			= cpppo.timer() - begun
        log.normal( "Client ReadFrg. Rcvd %7.3f/%7.3fs: %r" % ( elapsed, timeout, data ))

    duration			= cpppo.timer() - start
    log.warning( "Client ReadFrg. Average %7.3f TPS (%7.3fs ea)." % ( count / duration, duration / count ))

    kwargs['server'].control.done= True
Ejemplo n.º 17
0
def test_history_sequential():
    for _ in range( 3 ):
        path		= "/tmp/test_sequential_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases.  Note: times are truncated to milliseconds, so timestamps saved out will
        # probably evaluate as < the original value when read back in!  Since each file contains
        # only one record, we must be careful to use 'strict', to ensure we open the next file
        # strictly greater than the last timestamp (or we'll open the same file again!)
        now		= timer()
        count		= 10
        for e in range( count ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has 0 extension
            files.append( f )
            with logger( f ) as l:
                l.write( { 40001: count - e }, now=now - e )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of  some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    fd.write( open( f, 'rb' ).read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        # Attempt to begin loading history around the middle of the recording
        rdr		= reader( path,
                                          historical=now - random.uniform( 3.0, 9.0 ),
                                          basis=now + random.uniform( -.5, +.5 ),
                                          factor=3 )

        # Begin with the first historical file before our computed advancing historical time (we
        # could provide a specific timestamp here, if we wanted).  No lookahead.
        ts_l		= None
        f_l		= None
        after		= False # only first open is "before"; rest are "after"
        strict		= False # only goes false when timestamp increases in the same file
        deadline	= now + count
        while timer() <= deadline:
            # open next file beginning after the last ts
            o		= rdr.open( target=ts_l, after=after, strict=strict ) # Generator; doesn't do much here...
            after	= True
            strict	= True
            for (f,l,cur),(ts,js) in o: # raises HistoryExhausted on open() generator failure
                assert ts_l is None or ts >= ts_l, \
                    "Historical record out of sequence; %s isn't >= %s" % ( ts, ts_l )
                ts_l	= ts
                if js is None:
                    logging.info( "@%s: not yet available", ts )
                    assert ts > cur, "Next record should have been returned; not in future"
                    time.sleep( .1 )
                else:
                    logging.normal( "@%s: %r", ts, js )
                    assert ts <= cur, "Next record shouldn't have been returned; yet future"
                    if f == f_l and ts > ts_l:
                        strict = False
                f_l,ts_l= f,ts
        assert False, "Should have raised HistoryExhausted by now"
    except HistoryExhausted as exc:
        logging.normal( "History exhausted: %s", exc )

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Ejemplo n.º 18
0
def test_history_performance():
    try:
        tracemalloc.start()
    except:
        pass

    for _ in range( 3 ):
        path		= "/tmp/test_performance_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        day		= 24*60*60
        dur		= 3*day		# a few days worth of data
        regstps		= 0.0,5.0	# 0-5secs between updates
        numfiles	= dur//day+1	# ~1 file/day, but at least 2
        values		= {}		# Initial register values
        regscount	= 1000		# Number of different registers
        regschanged	= 1,10		# From 1-25 registers per row
        regsbase	= 40001

        start		= timer()

        now = beg	= start - dur
        linecnt		= 0
        for e in reversed( range( numfiles )):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                if values:
                    l.write( values, now=now ); linecnt += 1
                while now < beg + len(files) * dur/numfiles:
                    lst	= now
                    now += random.uniform( *regstps )
                    assert now >= lst
                    assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now ))
                    updates = {}
                    for _ in range( random.randint( *regschanged )):
                        updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 )
                    values.update( updates )
                    l.write( updates, now=now ); linecnt += 1
                lst 	= now
                now    += random.uniform( *regstps )
                assert now >= lst
                assert timestamp( now ) >= timestamp( lst )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    fd.write( open( f, 'rb' ).read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt )

        # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to
        # find the first file.  Try to do it all in the next 'playback' second (just to push it to
        # the max), in 'chunks' pieces.
        historical	= timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 ))
        basis		= timer()
        playback	= 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU
        chunks		= 1000
        factor		= dur / playback
        lookahead	= 60.0
        duration	= None
        if random.choice( (True,False) ):
            duration	= random.uniform( dur * 98/100, dur * 102/100 )

        begoff		= historical.value - beg
        endoff		= 0 if duration is None else (( historical.value + duration ) - ( beg + dur ))
        logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s",
                         timestamp( beg ), format_offset( begoff, ms=False ),
                         None if duration is None else format_offset( duration, ms=False, symbols='-+' ),
                         timestamp( beg + dur ), format_offset( endoff, ms=False ))

        ld		= loader(
            path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration )
        eventcnt	= 0
        slept		= 0
        cur		= None
        while ld:
            once	= False
            while ld.state < ld.AWAITING or not once:
                once		= True
                upcoming	= None
                limit		= random.randint( 0, 250 )
                if random.choice( (True,False) ):
                    upcoming	= ld.advance()
                    if random.choice( (True,False) ) and cur:
                        # ~25% of the time, provide an 'upcoming' timestamp that is between the
                        # current advancing historical time and the last load time.
                        upcoming-= random.uniform( 0, upcoming.value - cur.value )
                cur,events	= ld.load( upcoming=upcoming, limit=limit )
                eventcnt       += len( events )
                advance		= ld.advance()
                offset		= advance.value - cur.value
                logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" ,
                                ld, cur, format_offset( offset ),
                                format_offset( upcoming.value - advance.value ) if upcoming is not None else None,
                                len( ld.future ), len( ld.values ), len( events ), limit )

            logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total",
                                ld, cur, len( ld.future ), len( ld.values ), eventcnt )
            try:
                snapshot	= tracemalloc.take_snapshot()
                display_top( snapshot, limit=10 )
            except:
                pass

            time.sleep( playback/chunks )
            slept	       += playback/chunks

        elapsed		= timer() - basis
        eventtps	= eventcnt // ( elapsed - slept )
        logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec",
                       elapsed, slept, eventcnt, eventtps )
        if not logging.getLogger().isEnabledFor( logging.NORMAL ):
            # Ludicrously low threshold, to pass tests on very slow machines
            assert eventtps >= 1000, \
                "Historical event processing performance low: %d records/sec" % eventtps
        try:
            display_biggest_traceback()
        except:
            pass

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        '''
        for f in files:
            logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
        '''
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Ejemplo n.º 19
0
 def __init__( self, description, **kwargs ):
     super( poller_simulator, self ).__init__( description=description, **kwargs )
     self._cache		= {}		# times/values stored to simulated PLC
     self._polled		= cpppo.timer()	#   scheduled to be polled
Ejemplo n.º 20
0
    #logix_performance()
    test_logix_remote()

    print('\n\nSORTED BY TOT TIME')
    yappi.print_stats( sys.stdout, sort_type=yappi.SORTTYPE_TTOT, limit=100 )
    
    print('\n\nSORTED BY SUB TIME')
    yappi.print_stats( sys.stdout, sort_type=yappi.SORTTYPE_TSUB, limit=100 )
    '''

    # To profile using line_profiler: kernprof.py -v -l logix_test.py
    repetitions = 250
    count = repetitions

    start = cpppo.timer()
    logix_performance(repeat=count)
    duration = cpppo.timer() - start
    log.warning("Local  ReadFrg. Average %7.3f TPS (%7.3fs ea)." %
                (count / duration, duration / count))
    '''

    # Profile the main thread
    import cProfile
    import pstats
    prof_file			= "logix_test.profile"

    start			= cpppo.timer()
    cProfile.run( "logix_performance( repeat=%d )" % repetitions, prof_file )
    duration			= cpppo.timer() - start
    log.warning( "Local  ReadFrg. Average %7.3f TPS (%7.3fs ea)." % ( count / duration, duration / count ))
Ejemplo n.º 21
0
Archivo: main.py Proyecto: ekw/cpppo
def enip_srv( conn, addr, enip_process=None, delay=None, **kwds ):
    """Serve one Ethernet/IP client 'til EOF; then close the socket.  Parses headers and encapsulated
    EtherNet/IP request data 'til either the parser fails (the Client has submitted an un-parsable
    request), or the request handler fails.  Otherwise, encodes the data.response in an EtherNet/IP
    packet and sends it back to the client.

    Use the supplied enip_process function to process each parsed EtherNet/IP frame, returning True
    if a data.response is formulated, False if the session has ended cleanly, or raise an Exception
    if there is a processing failure (eg. an unparsable request, indicating that the Client is
    speaking an unknown dialect and the session must close catastrophically.)

    If a partial EtherNet/IP header is parsed and an EOF is received, the enip_header parser will
    raise an AssertionError, and we'll simply drop the connection.  If we receive a valid header and
    request, the supplied enip_process function is expected to formulate an appropriate error
    response, and we'll continue processing requests.

    An option numeric delay value (or any delay object with a .value attribute evaluating to a
    numeric value) may be specified; every response will be delayed by the specified number of
    seconds.  We assume that such a value may be altered over time, so we access it afresh for each
    use.

    All remaining keywords are passed along to the supplied enip_process function.
    """
    global latency
    global timeout

    name			= "enip_%s" % addr[1]
    log.normal( "EtherNet/IP Server %s begins serving peer %s", name, addr )


    source			= cpppo.rememberable()
    with parser.enip_machine( name=name, context='enip' ) as enip_mesg:

        # We can be provided a dotdict() to contain our stats.  If one has been passed in, then this
        # means that our stats for this connection will be available to the web API; it may set
        # stats.eof to True at any time, terminating the connection!  The web API will try to coerce
        # its input into the same type as the variable, so we'll keep it an int (type bool doesn't
        # handle coercion from strings).  We'll use an apidict, to ensure that attribute values set
        # via the web API thread (eg. stats.eof) are blocking 'til this thread wakes up and reads
        # them.  Thus, the web API will block setting .eof, and won't return to the caller until the
        # thread is actually in the process of shutting down.  Internally, we'll use __setitem__
        # indexing to change stats values, so we don't block ourself!
        stats			= cpppo.apidict( timeout=timeout )
        connkey			= ( "%s_%d" % addr ).replace( '.', '_' )
        connections[connkey]	= stats
        try:
            assert enip_process is not None, \
                "Must specify an EtherNet/IP processing function via 'enip_process'"
            stats['requests']	= 0
            stats['received']	= 0
            stats['eof']	= False
            stats['interface']	= addr[0]
            stats['port']	= addr[1]
            while not stats.eof:
                data		= cpppo.dotdict()

                source.forget()
                # If no/partial EtherNet/IP header received, parsing will fail with a NonTerminal
                # Exception (dfa exits in non-terminal state).  Build data.request.enip:
                begun		= cpppo.timer()
                log.detail( "Transaction begins" )
                for mch,sta in enip_mesg.run( path='request', source=source, data=data ):
                    if sta is None:
                        # No more transitions available.  Wait for input.  EOF (b'') will lead to
                        # termination.  We will simulate non-blocking by looping on None (so we can
                        # check our options, in case they've been changed).  If we still have input
                        # available to process right now in 'source', we'll just check (0 timeout);
                        # otherwise, use the specified server.control.latency.
                        msg	= None
                        while msg is None and not stats.eof:
                            wait=( kwds['server']['control']['latency']
                                   if source.peek() is None else 0 )
                            brx = cpppo.timer()
                            msg	= network.recv( conn, timeout=wait )
                            now = cpppo.timer()
                            log.detail( "Transaction receive after %7.3fs (%5s bytes in %7.3f/%7.3fs)" % (
                                now - begun, len( msg ) if msg is not None else "None",
                                now - brx, wait ))

                            # After each block of input (or None), check if the server is being
                            # signalled done/disabled; we need to shut down so signal eof.  Assumes
                            # that (shared) server.control.{done,disable} dotdict be in kwds.  We do
                            # *not* read using attributes here, to avoid reporting completion to
                            # external APIs (eg. web) awaiting reception of these signals.
                            if kwds['server']['control']['done'] or kwds['server']['control']['disable']:
                                log.detail( "%s done, due to server done/disable", 
                                            enip_mesg.name_centered() )
                                stats['eof']	= True
                            if msg is not None:
                                stats['received']+= len( msg )
                                stats['eof']	= stats['eof'] or not len( msg )
                                log.detail( "%s recv: %5d: %s", enip_mesg.name_centered(),
                                            len( msg ) if msg is not None else 0, cpppo.reprlib.repr( msg ))
                                source.chain( msg )
                            else:
                                # No input.  If we have symbols available, no problem; continue.
                                # This can occur if the state machine cannot make a transition on
                                # the input symbol, indicating an unacceptable sentence for the
                                # grammar.  If it cannot make progress, the machine will terminate
                                # in a non-terminal state, rejecting the sentence.
                                if source.peek() is not None:
                                    break
                                # We're at a None (can't proceed), and no input is available.  This
                                # is where we implement "Blocking"; just loop.

                log.detail( "Transaction parsed  after %7.3fs" % ( cpppo.timer() - begun ))
                # Terminal state and EtherNet/IP header recognized, or clean EOF (no partial
                # message); process and return response
                if 'request' in data:
                    stats['requests'] += 1
                try:
                    # enip_process must be able to handle no request (empty data), indicating the
                    # clean termination of the session if closed from this end (not required if
                    # enip_process returned False, indicating the connection was terminated by
                    # request.)
                    delayseconds= 0	# response delay (if any)
                    if enip_process( addr, data=data, **kwds ):
                        # Produce an EtherNet/IP response carrying the encapsulated response data.
                        # If no encapsulated data, ensure we also return a non-zero EtherNet/IP
                        # status.  A non-zero status indicates the end of the session.
                        assert 'response.enip' in data, "Expected EtherNet/IP response; none found"
                        if 'input' not in data.response.enip or not data.response.enip.input:
                            log.warning( "Expected EtherNet/IP response encapsulated message; none found" )
                            assert data.response.enip.status, "If no/empty response payload, expected non-zero EtherNet/IP status"

                        rpy	= parser.enip_encode( data.response.enip )
                        log.detail( "%s send: %5d: %s %s", enip_mesg.name_centered(),
                                    len( rpy ), cpppo.reprlib.repr( rpy ),
                                    ("delay: %r" % delay) if delay else "" )
                        if delay:
                            # A delay (anything with a delay.value attribute) == #[.#] (converible
                            # to float) is ok; may be changed via web interface.
                            try:
                                delayseconds = float( delay.value if hasattr( delay, 'value' ) else delay )
                                if delayseconds > 0:
                                    time.sleep( delayseconds )
                            except Exception as exc:
                                log.detail( "Unable to delay; invalid seconds: %r", delay )
                        try:
                            conn.send( rpy )
                        except socket.error as exc:
                            log.detail( "Session ended (client abandoned): %s", exc )
                            stats['eof'] = True
                        if data.response.enip.status:
                            log.warning( "Session ended (server EtherNet/IP status: 0x%02x == %d)",
                                        data.response.enip.status, data.response.enip.status )
                            stats['eof'] = True
                    else:
                        # Session terminated.  No response, just drop connection.
                        log.detail( "Session ended (client initiated): %s",
                                    parser.enip_format( data ))
                        stats['eof'] = True
                    log.detail( "Transaction complete after %7.3fs (w/ %7.3fs delay)" % (
                        cpppo.timer() - begun, delayseconds ))
                except:
                    log.error( "Failed request: %s", parser.enip_format( data ))
                    enip_process( addr, data=cpppo.dotdict() ) # Terminate.
                    raise

            stats['processed']	= source.sent
        except:
            # Parsing failure.  We're done.  Suck out some remaining input to give us some context.
            stats['processed']	= source.sent
            memory		= bytes(bytearray(source.memory))
            pos			= len( source.memory )
            future		= bytes(bytearray( b for b in source ))
            where		= "at %d total bytes:\n%s\n%s (byte %d)" % (
                stats.processed, repr(memory+future), '-' * (len(repr(memory))-1) + '^', pos )
            log.error( "EtherNet/IP error %s\n\nFailed with exception:\n%s\n", where,
                         ''.join( traceback.format_exception( *sys.exc_info() )))
            raise
        finally:
            # Not strictly necessary to close (network.server_main will discard the socket,
            # implicitly closing it), but we'll do it explicitly here in case the thread doesn't die
            # for some other reason.  Clean up the connections entry for this connection address.
            connections.pop( connkey, None )
            log.normal( "%s done; processed %3d request%s over %5d byte%s/%5d received (%d connections remain)", name,
                        stats.requests,  " " if stats.requests == 1  else "s",
                        stats.processed, " " if stats.processed == 1 else "s", stats.received,
                        len( connections ))
            sys.stdout.flush()
            conn.close()
Ejemplo n.º 22
0
 def _write(self, address, value):
     """ Remember a data value, with a timer to simulate delayed polling """
     self._cache.setdefault(address, collections.deque()).append(
         (cpppo.timer(), value))
Ejemplo n.º 23
0
    def position( self, actuator=1, timeout=TIMEOUT, home=True, noop=False, svoff=False, **kwds ):
        """Begin position operation on 'actuator' w/in 'timeout'.  

        :param home: Return to home position before any other movement
        :param noop: Do not perform final activation

        Running with specified data

        1   - Set internal flag Y30 (input invalid flag)
        2   - Write 1 to internal flag Y19 (SVON)
        2a  -   and confirm internal flag X49 (SVRE) has become "1"
        3   - Write 1 to internal flag Y1C (SETUP)
        3a  -   and confirm internal flag X4A (SETON) has become "1"
        4   - Write data to D9102-D9110
        5   - Write Operation Start instruction "1" to D9100 (returns to 0 after processed)

        If no positioning kwds are provided, then no new position is configured.  If 'noop' is True,
        everything except the final activation is performed.

        """
        begin			= cpppo.timer()
        if timeout is None:
            timeout		= self.TIMEOUT
        assert self.complete( actuator=actuator, svoff=svoff, timeout=timeout ), \
            "Previous actuator position incomplete within timeout %r" % timeout
        status			= self.status( actuator=actuator )
        if not kwds:
            return status

        # Previous positioning complete, and possibly new position keywords provided.
        logging.detail( "Position: actuator %3d setdata: %r", actuator, kwds )
        unit			= self.unit( uid=actuator )

        # 1: set INPUT_INVALID
        unit.write( data.Y30_INPUT_INVALID.addr, 1 )

        # 2: set SVON, check SVRE
        if timeout:
            assert cpppo.timer() <= begin + timeout, \
                "Failed to complete positioning SVON/SVRE within timeout"
        unit.write( data.Y19_SVON.addr, 1 )
        svre			= self.check(
            predicate=lambda: unit.read( data.Y19_SVON.addr ) and unit.read( data.X49_SVRE.addr ),
            deadline=None if timeout is None else begin + timeout )
        assert svre, \
            "Failed to set SVON True and read SVRE True"

        # 3: Return to home? set SETUP, check SETON.  Otherwise, clear SETUP.  It is very unclear
        #    whether we need to do this, and/or whether we need to clear it afterwards.
        if home:
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning SETUP/SETON within timeout"
            unit.write( data.Y1C_SETUP.addr, 1 )
            seton			= self.check(
                predicate=lambda: unit.read( data.Y1C_SETUP.addr ) and unit.read( data.X4A_SETON.addr ),
                deadline=None if timeout is None else begin + timeout )
            if not seton:
                logging.warning( "Failed to set SETUP True and read SETON True" )
            # assert seton, \
            #    "Failed to set SETUP True and read SETON True"
        else:
            unit.write( data.Y1C_SETUP.addr, 0 )
        
        # 4: Write any changed position data.  The actuator doesn't accept individual register
        # writes, so we use multiple register writes for each value.
        for k,v in kwds.items():
            assert k in data, \
                "Unrecognized positioning keyword: %s == %v" % ( k, v )
            assert STEP_DATA_BEG <= data[k].addr <= STEP_DATA_END, \
                "Invalid positioning keyword: %s == %v; not within position data address range" % ( k, v )
            format		= data[k].get( 'format' )
            if format:
                # Create a big-endian buffer.  This will be some multiple of register size.  Then,
                # unpack it into some number of 16-bit big-endian registers (this will be a tuple).
                buf		= struct.pack( '>'+format, v )
                values		= [ struct.unpack_from( '>H', buf[o:] )[0] for o in range( 0, len( buf ), 2 ) ]
            else:
                values		= [ v ]
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning data update within timeout"
            logging.normal( "Position: actuator %3d updated: %16s: %8s (== %s)", actuator, k, v, values )
            unit.write( data[k].addr, values )

        # 5: set operation_start to 0x0100 (1 in high-order bytes) unless 'noop'
        if not noop:
            unit.write( data.operation_start.addr, 0x0100 )
            started			= self.check(
                predicate=lambda: unit.read( data.operation_start.addr ) == 0x0100,
                deadline=None if timeout is None else begin + timeout )
            assert started, \
                "Failed to detect positioning start within timeout"

        return self.status( actuator=actuator )
Ejemplo n.º 24
0
    def handle_tcp(
        self, conn, address, session, name, enip_process, delay=None, **kwds
    ):
        """
        Handle a TCP client
        """
        source = cpppo.rememberable()
        with parser.enip_machine(name=name, context="enip") as machine:
            try:
                assert (
                    address
                ), "EtherNet/IP CIP server for TCP/IP must be provided a peer address"
                stats, connkey = self.stats_for(address)
                while not stats.eof:
                    data = cpppo.dotdict()
                    source.forget()
                    # If no/partial EtherNet/IP header received, parsing will fail with a NonTerminal
                    # Exception (dfa exits in non-terminal state).  Build data.request.enip:
                    begun = cpppo.timer()
                    with contextlib.closing(
                        machine.run(path="request", source=source, data=data)
                    ) as engine:
                        # PyPy compatibility; avoid deferred destruction of generators
                        for _, sta in engine:
                            if sta is not None:
                                continue
                            # No more transitions available.  Wait for input.  EOF (b'') will lead to
                            # termination.  We will simulate non-blocking by looping on None (so we can
                            # check our options, in case they've been changed).  If we still have input
                            # available to process right now in 'source', we'll just check (0 timeout);
                            # otherwise, use the specified server.control.latency.
                            msg = None
                            while msg is None and not stats.eof:
                                wait = (
                                    kwds["server"]["control"]["latency"]
                                    if source.peek() is None
                                    else 0
                                )
                                brx = cpppo.timer()
                                msg = network.recv(conn, timeout=wait)
                                now = cpppo.timer()
                                (logger.info if msg else logger.debug)(
                                    "Transaction receive after %7.3fs (%5s bytes in %7.3f/%7.3fs)",
                                    now - begun,
                                    len(msg) if msg is not None else "None",
                                    now - brx,
                                    wait,
                                )

                                # After each block of input (or None), check if the server is being
                                # signalled done/disabled; we need to shut down so signal eof.  Assumes
                                # that (shared) server.control.{done,disable} dotdict be in kwds.  We do
                                # *not* read using attributes here, to avoid reporting completion to
                                # external APIs (eg. web) awaiting reception of these signals.
                                if (
                                    kwds["server"]["control"]["done"]
                                    or kwds["server"]["control"]["disable"]
                                ):
                                    logger.info(
                                        "%s done, due to server done/disable",
                                        machine.name_centered(),
                                    )
                                    stats["eof"] = True
                                if msg is not None:
                                    stats["received"] += len(msg)
                                    stats["eof"] = stats["eof"] or not len(msg)
                                    if logger.getEffectiveLevel() <= logging.INFO:
                                        logger.info(
                                            "%s recv: %5d: %s",
                                            machine.name_centered(),
                                            len(msg),
                                            cpppo.reprlib.repr(msg),
                                        )
                                    source.chain(msg)
                                else:
                                    # No input.  If we have symbols available, no problem; continue.
                                    # This can occur if the state machine cannot make a transition on
                                    # the input symbol, indicating an unacceptable sentence for the
                                    # grammar.  If it cannot make progress, the machine will terminate
                                    # in a non-terminal state, rejecting the sentence.
                                    if source.peek() is not None:
                                        break
                                        # We're at a None (can't proceed), and no input is available.  This
                                        # is where we implement "Blocking"; just loop.

                    logger.info(
                        "Transaction parsed  after %7.3fs", cpppo.timer() - begun
                    )
                    # Terminal state and EtherNet/IP header recognized, or clean EOF (no partial
                    # message); process and return response
                    if "request" in data:
                        stats["requests"] += 1
                    try:
                        # enip_process must be able to handle no request (empty data), indicating the
                        # clean termination of the session if closed from this end (not required if
                        # enip_process returned False, indicating the connection was terminated by
                        # request.)
                        delayseconds = 0  # response delay (if any)
                        if enip_process(address, data=data, **kwds):
                            # Produce an EtherNet/IP response carrying the encapsulated response data.
                            # If no encapsulated data, ensure we also return a non-zero EtherNet/IP
                            # status.  A non-zero status indicates the end of the session.
                            assert (
                                "response.enip" in data
                            ), "Expected EtherNet/IP response; none found"
                            if (
                                "input" not in data.response.enip
                                or not data.response.enip.input
                            ):
                                logger.warning(
                                    "Expected EtherNet/IP response encapsulated message; none found"
                                )
                                assert (
                                    data.response.enip.status
                                ), "If no/empty response payload, expected non-zero EtherNet/IP status"

                            rpy = parser.enip_encode(data.response.enip)
                            if logger.getEffectiveLevel() <= logging.INFO:
                                logger.info(
                                    "%s send: %5d: %s %s",
                                    machine.name_centered(),
                                    len(rpy),
                                    cpppo.reprlib.repr(rpy),
                                    ("delay: %r" % delay) if delay else "",
                                )
                            if delay:
                                # A delay (anything with a delay.value attribute) == #[.#] (converible
                                # to float) is ok; may be changed via web interface.
                                try:
                                    delayseconds = float(
                                        delay.value
                                        if hasattr(delay, "value")
                                        else delay
                                    )
                                    if delayseconds > 0:
                                        time.sleep(delayseconds)
                                except Exception as exc:
                                    logger.info(
                                        "Unable to delay; invalid seconds: %r", delay
                                    )
                            try:
                                conn.send(rpy)
                            except socket.error as exc:
                                logger.info("Session ended (client abandoned): %s", exc)
                                stats["eof"] = True
                            if data.response.enip.status:
                                logger.warning(
                                    "Session ended (server EtherNet/IP status: 0x%02x == %d)",
                                    data.response.enip.status,
                                    data.response.enip.status,
                                )
                                stats["eof"] = True
                        else:
                            # Session terminated.  No response, just drop connection.
                            if logger.getEffectiveLevel() <= logging.INFO:
                                logger.info(
                                    "Session ended (client initiated): %s",
                                    parser.enip_format(data),
                                )
                            stats["eof"] = True
                        logger.info(
                            "Transaction complete after %7.3fs (w/ %7.3fs delay)",
                            cpppo.timer() - begun,
                            delayseconds,
                        )
                        session.add_event({"type": "CONNECTION_CLOSED"})
                    except:
                        logger.error("Failed request: %s", parser.enip_format(data))
                        enip_process(address, data=cpppo.dotdict())  # Terminate.
                        raise

                stats["processed"] = source.sent
            except:
                # Parsing failure.
                stats["processed"] = source.sent
                memory = bytes(bytearray(source.memory))
                pos = len(source.memory)
                future = bytes(bytearray(b for b in source))
                where = "at %d total bytes:\n%s\n%s (byte %d)" % (
                    stats.processed,
                    repr(memory + future),
                    "-" * (len(repr(memory)) - 1) + "^",
                    pos,
                )
                logger.error(
                    "EtherNet/IP error %s\n\nFailed with exception:\n%s\n",
                    where,
                    "".join(traceback.format_exception(*sys.exc_info())),
                )
                raise
            finally:
                # Not strictly necessary to close (network.server_main will discard the socket,
                # implicitly closing it), but we'll do it explicitly here in case the thread doesn't die
                # for some other reason.  Clean up the connections entry for this connection address.
                self.connections.pop(connkey, None)
                logger.info(
                    "%s done; processed %3d request%s over %5d byte%s/%5d received (%d connections remain)",
                    name,
                    stats.requests,
                    " " if stats.requests == 1 else "s",
                    stats.processed,
                    " " if stats.processed == 1 else "s",
                    stats.received,
                    len(self.connections),
                )
                sys.stdout.flush()
                conn.close()
Ejemplo n.º 25
0
    def handle_udp(self, conn, name, enip_process, session, **kwds):
        """
        Process UDP packets from multiple clients
        """
        with parser.enip_machine(name=name, context="enip") as machine:
            while (
                not kwds["server"]["control"]["done"]
                and not kwds["server"]["control"]["disable"]
            ):
                try:
                    source = cpppo.rememberable()
                    data = cpppo.dotdict()

                    # If no/partial EtherNet/IP header received, parsing will fail with a NonTerminal
                    # Exception (dfa exits in non-terminal state).  Build data.request.enip:
                    begun = cpppo.timer()  # waiting for next transaction
                    addr, stats = None, None
                    with contextlib.closing(
                        machine.run(path="request", source=source, data=data)
                    ) as engine:
                        # PyPy compatibility; avoid deferred destruction of generators
                        for _, sta in engine:
                            if sta is not None:
                                # No more transitions available.  Wait for input.
                                continue
                            assert not addr, "Incomplete UDP request from client %r" % (
                                addr
                            )
                            msg = None
                            while msg is None:
                                # For UDP, we'll allow no input only at the start of a new request parse
                                # (addr is None); anything else will be considered a failed request Back
                                # to the trough for more symbols, after having already received a packet
                                # from a peer?  No go!
                                wait = (
                                    kwds["server"]["control"]["latency"]
                                    if source.peek() is None
                                    else 0
                                )
                                brx = cpppo.timer()
                                msg, frm = network.recvfrom(conn, timeout=wait)
                                now = cpppo.timer()
                                if not msg:
                                    if (
                                        kwds["server"]["control"]["done"]
                                        or kwds["server"]["control"]["disable"]
                                    ):
                                        return
                                (logger.info if msg else logger.debug)(
                                    "Transaction receive after %7.3fs (%5s bytes in %7.3f/%7.3fs): %r",
                                    now - begun,
                                    len(msg) if msg is not None else "None",
                                    now - brx,
                                    wait,
                                    self.stats_for(frm)[0],
                                )
                                # If we're at a None (can't proceed), and we haven't yet received input,
                                # then this is where we implement "Blocking"; we just loop for input.

                            # We have received exactly one packet from an identified peer!
                            begun = now
                            addr = frm
                            stats, _ = self.stats_for(addr)
                            # For UDP, we don't ever receive incoming EOF, or set stats['eof'].
                            # However, we can respond to a manual eof (eg. from web interface) by
                            # ignoring the peer's packets.
                            assert stats and not stats.get(
                                "eof"
                            ), "Ignoring UDP request from client %r: %r" % (addr, msg)
                            stats["received"] += len(msg)
                            logger.debug(
                                "%s recv: %5d: %s",
                                machine.name_centered(),
                                len(msg),
                                cpppo.reprlib.repr(msg),
                            )
                            source.chain(msg)

                    # Terminal state and EtherNet/IP header recognized; process and return response
                    assert stats
                    if "request" in data:
                        stats["requests"] += 1
                    # enip_process must be able to handle no request (empty data), indicating the
                    # clean termination of the session if closed from this end (not required if
                    # enip_process returned False, indicating the connection was terminated by
                    # request.)
                    if enip_process(addr, data=data, **kwds):
                        # Produce an EtherNet/IP response carrying the encapsulated response data.
                        # If no encapsulated data, ensure we also return a non-zero EtherNet/IP
                        # status.  A non-zero status indicates the end of the session.
                        assert (
                            "response.enip" in data
                        ), "Expected EtherNet/IP response; none found"
                        if (
                            "input" not in data.response.enip
                            or not data.response.enip.input
                        ):
                            logger.warning(
                                "Expected EtherNet/IP response encapsulated message; none found"
                            )
                            assert (
                                data.response.enip.status
                            ), "If no/empty response payload, expected non-zero EtherNet/IP status"

                        rpy = parser.enip_encode(data.response.enip)
                        logger.debug(
                            "%s send: %5d: %s",
                            machine.name_centered(),
                            len(rpy),
                            cpppo.reprlib.repr(rpy),
                        )
                        conn.sendto(rpy, addr)

                    logger.debug(
                        "Transaction complete after %7.3fs", cpppo.timer() - begun
                    )
                    session.add_event({"type": "CONNECTION_CLOSED"})
                    stats["processed"] = source.sent
                except:
                    # Parsing failure.  Suck out some remaining input to give us some context, but don't re-raise
                    if stats:
                        stats["processed"] = source.sent
                    memory = bytes(bytearray(source.memory))
                    pos = len(source.memory)
                    future = bytes(bytearray(b for b in source))
                    where = "at %d total bytes:\n%s\n%s (byte %d)" % (
                        stats.get("processed", 0) if stats else 0,
                        repr(memory + future),
                        "-" * (len(repr(memory)) - 1) + "^",
                        pos,
                    )
                    logger.error(
                        "Client %r EtherNet/IP error %s\n\nFailed with exception:\n%s\n",
                        addr,
                        where,
                        "".join(traceback.format_exception(*sys.exc_info())),
                    )
                    session.add_event({"type": "CONNECTION_FAILED"})
Ejemplo n.º 26
0
def process( p, v ):
    values[p]			= (cpppo.timer(),v)
Ejemplo n.º 27
0
 def _write( self, address, value ):
     """ Remember a data value, with a timer to simulate delayed polling """
     self._cache.setdefault( address, collections.deque() ).append( (cpppo.timer(), value) )
Ejemplo n.º 28
0
def main( argv=None ):
    """Get Attribute(s) Single/All the specified Instance or Attribute level address(es)

    """
    ap				= argparse.ArgumentParser(
        description = "An EtherNet/IP Get Attribute Single/All and Set Attribute Single client",
        formatter_class = argparse.RawDescriptionHelpFormatter,
        epilog = """\

One or more EtherNet/IP CIP Object/Instance Attributes may be read or
written.  The full format for specifying a tag and an operation is:

    @<Object>/<Instance>/<Attribute>[=<value>,<value>...]

The default Send Path is '@6/1', and the default Route Path is [{"link": 0,
"port":1}].  This should work with a device that can route requests to links
(eg. a *Logix Controller), with the Processor is slot 1 of the chassis.  If you
have a simpler device (ie. something that does not route requests, such as an AB
PowerFlex for example), then you may want to specify:

    --send-path='' --route-path=false

to eliminate the *Logix-style Unconnected Send (service 0x52) encapsulation
which is required to carry this Send/Route Path data. """ )

    ap.add_argument( '-a', '--address',
                     default=( "%s:%d" % enip.address ),
                     help="EtherNet/IP interface[:port] to connect to (default: %s:%d)" % (
                         enip.address[0], enip.address[1] ))
    ap.add_argument( '--no-print', action='store_false', dest='print',
                     help="Disable printing of summary of operations to stdout" )
    ap.add_argument( '--print', action='store_true',
                     default=True, # inconsistent default from client.py, for historical reasons
                     help="Printing a summary of operations to stdout (default: True)" )
    ap.add_argument( '-m', '--multiple', action='store_true',
                     help="Use Multiple Service Packet request targeting ~500 bytes (default: False)" )
    ap.add_argument( '-d', '--depth',
                     default=0,
                     help="Pipelining depth" )
    ap.add_argument( '-t', '--timeout',
                     default=5.0,
                     help="EtherNet/IP timeout (default: 5s)" )
    ap.add_argument( '-v', '--verbose', action="count",
                     default=0, 
                     help="Display logging information." )
    ap.add_argument( '-l', '--log',
                     help="Log file, if desired" )
    ap.add_argument( '--route-path',
                     default=None,
                     help="Route Path, in JSON (default: %r); 0/false to specify no/empty route_path" % (
                         str( json.dumps( client.connector.route_path_default ))))
    ap.add_argument( '--send-path',
                     default=None,
                     help="Send Path to UCMM (default: @6/1); Specify an empty string '' for no Send Path" )
    ap.add_argument( '-S', '--simple', action='store_true',
                     default=False,
                     help="Access a simple (non-routing) EtherNet/IP CIP device (eg. MicroLogix)")
    ap.add_argument( '-P', '--profile', action='store_true',
                     help="Activate profiling (default: False)" )
    ap.add_argument( 'tags', nargs="+",
                     help="Class/Instance[/Attribute] to get (- to read from stdin), eg: @2/1 @2/1/1" )

    args			= ap.parse_args( argv )

    # Set up logging level (-v...) and --log <file>
    levelmap 			= {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )
    if args.log:
        cpppo.log_cfg['filename'] = args.log

    logging.basicConfig( **cpppo.log_cfg )

    addr			= args.address.split(':')
    assert 1 <= len( addr ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    addr			= ( str( addr[0] ) if addr[0] else enip.address[0],
                                    int( addr[1] ) if len( addr ) > 1 and addr[1] else enip.address[1] )
    timeout			= float( args.timeout )
    depth			= int( args.depth )
    multiple			= 500 if args.multiple else 0
    route_path			= json.loads( args.route_path ) if args.route_path \
                                  else [] if args.simple else None # may be None/0/False/[]
    send_path			= args.send_path                if args.send_path \
                                  else '' if args.simple else None # uses '@2/1/1' by default

    if '-' in args.tags:
        # Collect tags from sys.stdin 'til EOF, at position of '-' in argument list
        minus			= args.tags.index( '-' )
        tags			= itertools.chain( args.tags[:minus], sys.stdin, args.tags[minus+1:] )
    else:
        tags			= args.tags

    profiler			= None
    if args.profile:
        import cProfile as profile
        import pstats
        import StringIO
        profiler		= profile.Profile()

    failures			= 0
    with client.connector( host=addr[0], port=addr[1], timeout=timeout, profiler=profiler ) as connection:
        idx			= -1
        start			= cpppo.timer()
        operations		= attribute_operations( tags, route_path=route_path, send_path=send_path )
        for idx,dsc,op,rpy,sts,val in connection.pipeline(
                operations=operations, depth=depth, multiple=multiple, timeout=timeout ):
            if args.print:
                print( "%s: %3d: %s == %s" % ( time.ctime(), idx, dsc, val ))
            failures	       += 1 if sts else 0
        elapsed			= cpppo.timer() - start
        log.normal( "%3d requests in %7.3fs at pipeline depth %2s; %7.3f TPS" % (
            idx+1, elapsed, args.depth, (idx+1) / elapsed ))

    if profiler:
        s			= StringIO.StringIO()
        ps			= pstats.Stats( profiler, stream=s )
        for sortby in [ 'cumulative', 'time' ]:
            ps.sort_stats( sortby )
            ps.print_stats( 25 )
        print( s.getvalue() )

    return 1 if failures else 0
Ejemplo n.º 29
0
def logix_remote(count, svraddr, kwargs):
    time.sleep(.25)  # Wait for server to be established

    # Confirm that a known Register encodes as expected
    data = cpppo.dotdict()
    data.enip = {}
    data.enip.options = 0
    data.enip.session_handle = 0
    data.enip.status = 0
    data.enip.sender_context = {}
    data.enip.sender_context.input = bytearray([0x00] * 8)
    data.enip.CIP = {}
    data.enip.CIP.register = {}
    data.enip.CIP.register.options = 0
    data.enip.CIP.register.protocol_version = 1

    data.enip.input = bytearray(enip.CIP.produce(data.enip))
    data.input = bytearray(enip.enip_encode(data.enip))
    log.normal("Register Request: %r" % data)

    assert bytes(data.input) == rss_004_request

    # Try to Register a real session, followed by commands
    timeout = 5

    begun = cpppo.timer()
    cli = client.client(host=svraddr[0], port=svraddr[1])
    assert cli.writable(timeout=timeout)
    elapsed = cpppo.timer() - begun
    log.normal("Client Connected in  %7.3f/%7.3fs" % (elapsed, timeout))

    begun = cpppo.timer()
    with cli:
        cli.register(timeout=timeout)
        data, elapsed = client. await (cli, timeout=timeout)
    log.normal("Client Register Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data)
    assert data is not None and 'enip.CIP.register' in data, "Failed to receive Register response"
    assert data.enip.status == 0, "Register response indicates failure: %s" % data.enip.status

    # Establish the EtherNet/IP "session handle" used by all further requests
    cli.session = data.enip.session_handle

    start = cpppo.timer()
    with cli:
        for _ in range(count):
            begun = cpppo.timer()
            cli.read(path=[{
                'symbolic': 'SCADA'
            }, {
                'element': 12
            }],
                     elements=201,
                     offset=2,
                     timeout=timeout)
            data, elapsed = client. await (cli, timeout=timeout)
            log.normal("Client ReadFrg. Rcvd %7.3f/%7.3fs: %r", elapsed,
                       timeout, data)

    duration = cpppo.timer() - start
    log.warning("Client ReadFrg. Average %7.3f TPS (%7.3fs ea)." %
                (count / duration, duration / count))

    kwargs['server'].control.done = True  # Signal the server to terminate
Ejemplo n.º 30
0
def test_history_timestamp():
    """Test timestamp, ensuring comparison deals in UTC only.  Supports testing in local timezones:
    
        Canada/Edmonton		-- A generic, ambiguous DST/non-DST timezone
        MST			-- A DST-specific non-DST timezone
        UTC			-- UTC

    """
    trtab			= ( string 
                                    if sys.version_info[0] < 3
                                    else str ).maketrans( ":-.", "   " )

    def utc_strp( loctime ):
        if '.' in loctime:
            unaware		= datetime.datetime.strptime( loctime, timestamp._fmt + ".%f" )
        else:
            unaware		= datetime.datetime.strptime( loctime, timestamp._fmt )
        return pytz.utc.localize( unaware )

    def utc_trns( loctime ):
        terms			= loctime.translate( trtab ).split()
        if len( terms ) == 7:
            # convert .123 into 123000 microseconds
            terms[6]               += '0' * ( 6 - len( terms[6] ))
        return datetime.datetime( *map( int, terms ), tzinfo=pytz.utc )

    # Basic millisecond hygiene.  Comparisons are by standard UTC format to 3 sub-second decimal
    # places of precision.  Unfortunately, the Python 2/3 strftime microsecond formatters are
    # different, so we don't use them.  If no precision, we do NOT round; we truncate, to avoid the
    # surprising effect of formatting a UNIX value manually using strftime produces a different
    # second than formatting it using render() with no sub-second precision.
    assert timestamp( 1399326141.999836 ) >= timestamp( 1399326141.374836 )
    assert timestamp( 1399326141.999836 ).render( ms=False ) == '2014-05-05 21:42:21'
    assert timestamp( 1399326141.999836 ).render( ms=5 ) == '2014-05-05 21:42:21.99984'
    assert timestamp( 1399326141.999836 ).render() == '2014-05-05 21:42:22.000'

    # Type caste support
    assert abs( float( timestamp( 1399326141.999836 )) - 1399326141.999836 ) < 1e-6
    assert int( timestamp( 1399326141.999836 )) == 1399326141

    # Adjust timestamp default precision and comparison epsilon.
    save			= timestamp._precision,timestamp._epsilon
    try:
        ts			= timestamp( 1399326141.999836 )
        for p in range( 0, 7 ):
            timestamp._precision= p
            timestamp._epsilon	= 10**-p if p else 0

            assert ts.render( ms=True ) == {
                0: '2014-05-05 21:42:21', # Truncates at 0 digits of sub-second precision
                1: '2014-05-05 21:42:22.0',
                2: '2014-05-05 21:42:22.00',
                3: '2014-05-05 21:42:22.000',
                4: '2014-05-05 21:42:21.9998',
                5: '2014-05-05 21:42:21.99984',
                6: '2014-05-05 21:42:21.999836',
            }[timestamp._precision]
            # For p == 0, try exact precision.  1e-6 is the smallest delta that can be reliably
            # added to a typical UNIX timestamp (eg.  1399326141.999836) in a double and still
            # expect it to affect the value (can store 15-17 decimal digits of precision).
            s,l			= (timestamp._epsilon*f for f in (0.9,1.1)) if p else (0,10**-6)
            assert     ts == ts + s
            assert     ts == ts - s
            assert not(ts == ts + l)
            assert not(ts == ts - l)
            assert     ts != ts + l
            assert     ts != ts - l
            assert not(ts <  ts + s)
            assert not(ts <  ts - s)
            assert     ts <  ts + l
            assert not(ts <  ts - l)
            assert     ts <= ts + s
            assert     ts <= ts - s
            assert     ts <= ts + l
            assert not(ts <= ts - l)
            assert not(ts >  ts + s)
            assert not(ts >  ts - s)
            assert not(ts >  ts + l)
            assert     ts >  ts - l
            assert     ts >= ts + s
            assert     ts >= ts - s
            assert not(ts >= ts + l)
            assert     ts >= ts - l
    finally:
        timestamp._precision,timestamp._epsilon = save


    # Maintain DST specificity when rendering in DST-specific timezones?  Nope, only when using
    # specially constructed non-DST versions of timezones, when they are made available by pytz.
    timestamp.support_abbreviations( None, reset=True )

    assert timestamp.timezone_info('MST') == (pytz.timezone( 'MST' ),None)
    assert timestamp( 1399326141.999836 ).render(
        tzinfo='MST', ms=False )		== '2014-05-05 14:42:21 MST'

    # Get MST/MDT etc., and CET/CEST abbreviations
    timestamp.support_abbreviations( ['CA','Europe/Berlin'], reset=True )

    assert timestamp.timezone_info('MST') == (pytz.timezone( 'America/Edmonton' ),False)
    assert timestamp( 1399326141.999836 ).render(
        tzinfo='MST', ms=False )		== '2014-05-05 15:42:21 MDT'


    # $ TZ=UTC date --date=@1388559600
    # Wed Jan  1 07:00:00 UTC 2014
    # 1396531199
    # Thu Apr  3 07:19:59 MDT 2014
    assert '2014-01-02 03:04:55.123'.translate( trtab ) == '2014 01 02 03 04 55 123'

    cnt				= 10000
    beg				= timer()
    for _ in range( cnt ):
        utc1			= utc_strp( '2014-01-02 03:04:55.123' )
    dur1			= timer() - beg
    beg				= timer()
    for _ in range( cnt ):
        utc2			= utc_trns( '2014-01-02 03:04:55.123' )
    dur2			= timer() - beg
    beg				= timer()
    for _ in range( cnt ):
        utc3			= timestamp.datetime_from_string( '2014-01-02 03:04:55.123' )
    dur3			= timer() - beg
    assert utc1.strftime( timestamp._fmt ) \
        == utc2.strftime( timestamp._fmt ) \
        == utc3.strftime( timestamp._fmt ) == '2014-01-02 03:04:55'
    logging.detail( "strptime: %d/s, translate: %d/s, timestamp: %d/s", cnt/dur1, cnt/dur2, cnt/dur3 )

    now				= timer()
    assert timestamp( now ) < timestamp( now + 1 )

    # From a numeric timestamp
    ts				= timestamp( 1396531199 )
    assert ts.utc	== '2014-04-03 13:19:59.000' == str( ts )

    assert ts.local	in ( '2014-04-03 07:19:59 MDT',
                             '2014-04-03 06:19:59 MST',
                             '2014-04-03 13:19:59 UTC' )

    # From a string UTC time
    dt				= timestamp.datetime_from_string( '2014-01-01 07:00:00.0' )
    assert str( dt )	== '2014-01-01 07:00:00+00:00'
    assert repr( dt )	== 'datetime.datetime(2014, 1, 1, 7, 0, tzinfo=<UTC>)'
    #assert dt.strftime( '%s' ) != '1388559600' # !? (will fail if machine is in UTC timezone )
    #assert pytz.utc.normalize( dt ).strftime( '%s' ) != '1388559600' # !?
    assert 1388559559.999999 < timestamp.number_from_datetime( dt ) < 1388559600.000001 # ok
    ts				= timestamp( '2014-01-01 07:00:00.0' )
    assert  1388559559.999999 < ts.value < 1388559600.000001
    assert ts.utc	== '2014-01-01 07:00:00.000' == str( ts )
    assert ts.local	in ( '2014-01-01 00:00:00 MST',
                             '2014-01-01 07:00:00 UTC' )

    # OK, now try a UTC time where the local timezone is in MDT
    ts.utc			= '2014-04-01 07:00:00.000'
    assert ts.local	in ( '2014-04-01 01:00:00 MDT',
                             '2014-04-01 00:00:00 MST',
                             '2014-04-01 07:00:00 UTC' )

    # Make sure that local times are unambiguous over daylight savings time
    # Mar 9 02:00 -> 03:00    1394355540 == Mar 9 2014 01:59
    # Nov 2 02:00 -> 01:00    1414915140 == Nov 2 2014 01:59
    ts				= timestamp( 1394355540 )
    assert ts.local	in ( '2014-03-09 01:59:00 MST',
                             '2014-03-09 08:59:00 UTC' )
    ts			       += 61
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )

    ts				= timestamp( 1414915140 )
    assert ts.local	in ( '2014-11-02 01:59:00 MDT',
                             '2014-11-02 00:59:00 MST',
                             '2014-11-02 07:59:00 UTC' )
    ts			       += 61
    assert ts.local	in ( '2014-11-02 01:00:01 MST',
                             '2014-03-09 02:00:01 MST',
                             '2014-11-02 08:00:01 UTC' )

    # Now try converting a few strings that have a specific timezone.  We can use either .utc =
    # ... or .local = ...; they just default to the UTC or (local) timezone, respectively.  Using a
    # DST-specific timezone such as MST/MDT, we can unambiguously specify whether a time is inside
    # or outside DST.
    try:
        ts.local		= '2014-03-09 02:00:01 America/Edmonton' # Just inside MDT 2014
        assert False, """Should have failed -- time doesn't exist during "spring ahead" """
    except Exception as exc:
        assert "NonExistentTimeError" in str( exc )
    ts.local			= '2014-03-09 03:00:01 MDT' # Just inside MDT 2014
    assert 1394355600.999999 < ts.value < 1394355601.000001
    assert ts.utc 	==   '2014-03-09 09:00:01.000' # MDT == UCT-6:00
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )
    # However, we CAN use a specifically non-DST timezone to specify times non-existent in DST
    ts.local			= '2014-03-09 02:00:01 MST' # No such time in MDT!!
    assert 1394355600.999999 < ts.value < 1394355601.000001
    assert ts.utc	==   '2014-03-09 09:00:01.000'
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )

    ts.local			= '2014-11-02 01:00:01 MST' # 1 second after the end of DST
    assert 1414915200.999999 < ts.value < 1414915201.000001
    assert ts.utc	==   '2014-11-02 08:00:01.000'
    assert ts.local	in ( '2014-11-02 01:00:01 MST',
                             '2014-11-02 00:59:59 MST',
                             '2014-11-02 08:00:01 UTC' )

    ts			       -= 2 # Go back 2 seconds, into DST
    assert ts.utc	==   '2014-11-02 07:59:59.000'
    assert ts.local	in ( '2014-11-02 01:59:59 MDT',
                             '2014-11-02 00:59:59 MST',
                             '2014-11-02 07:59:59 UTC' )

    ts.local			= '2014-11-02 01:59:58 MDT' # 2 seconds before end of DST
    assert 1414915197.999999 < ts.value < 1414915198.000001
    assert ts.utc	==   '2014-11-02 07:59:58.000'
    assert ts.local	in ( '2014-11-02 01:59:58 MDT',
                             '2014-11-02 00:59:58 MST',
                             '2014-11-02 07:59:58 UTC' )

    # Using a canonical timezone such as 'America/Edmonton', an "ambiguous" time (eg. during the
    # overlap in the fall) cannot be specified.  Using a DST-specific timezone, we can.
    try:
        ts.local		= '2014-11-02 01:00:01 America/Edmonton' # Inside DST?
    except Exception as exc:
        assert "AmbiguousTimeError" in str( exc )

    ts.local			= '2014-11-02 00:59:59 America/Edmonton' # 2 seconds before end of DST
    assert 1414911598.999999 < ts.value < 1414911599.000001
    assert ts.utc	==   '2014-11-02 06:59:59.000'
    assert ts.local	in ( '2014-11-02 00:59:59 MDT',
                             '2014-11-01 23:59:59 MST',
                             '2014-11-02 06:59:59 UTC' )

    after			= timestamp( '2014-11-02 01:02:03.123 MST' ) # (Nov 2 2014 -- 1:02 *after* DST ended)
    before			= timestamp( '2014-11-02 01:02:03.456 MDT' ) # (Nov 2 2014 --  :58 *before* DST ends)
    assert before < after
    assert before.utc	==   '2014-11-02 07:02:03.456'
    assert before.local	in ( '2014-11-02 01:02:03 MDT',
                             '2014-11-02 00:02:03 MST',
                             '2014-11-02 07:02:03 UTC' )
    assert after.utc	==   '2014-11-02 08:02:03.123'
    assert after.local	in ( '2014-11-02 01:02:03 MST',
                             '2014-11-02 08:02:03 UTC' )

    after			= timestamp( '2014-10-26 02:01:00.123 CET' )  # (Nov 26 2014 -- 1:02 *after* DST ended)
    before			= timestamp( '2014-10-26 02:01:00.456 CEST' ) # (Nov 26 2014 --  :58 *before* DST ends)
    assert before < after
    assert before.utc	==   '2014-10-26 00:01:00.456'
    assert before.local	in ( '2014-10-25 18:01:00 MDT',
                             '2014-10-25 17:01:00 MST',
                             '2014-10-26 00:01:00 UTC' )
    assert after.utc	==   '2014-10-26 01:01:00.123'
    assert after.local	in ( '2014-10-25 19:01:00 MDT',
                             '2014-10-25 18:01:00 MST',
                             '2014-10-26 01:01:00 UTC' )
Ejemplo n.º 31
0
    def handle_tcp(self, conn, address, name, enip_process, delay=None, **kwds):
        """
        Handle a TCP client
        """
        source = cpppo.rememberable()
        with parser.enip_machine(name=name, context='enip') as machine:
            try:
                assert address, "EtherNet/IP CIP server for TCP/IP must be provided a peer address"
                stats, connkey = self.stats_for(address)
                while not stats.eof:
                    data = cpppo.dotdict()
                    source.forget()
                    # If no/partial EtherNet/IP header received, parsing will fail with a NonTerminal
                    # Exception (dfa exits in non-terminal state).  Build data.request.enip:
                    begun = cpppo.timer()
                    with contextlib.closing(machine.run(path='request', source=source, data=data)) as engine:
                        # PyPy compatibility; avoid deferred destruction of generators
                        for mch, sta in engine:
                            if sta is not None:
                                continue
                            # No more transitions available.  Wait for input.  EOF (b'') will lead to
                            # termination.  We will simulate non-blocking by looping on None (so we can
                            # check our options, in case they've been changed).  If we still have input
                            # available to process right now in 'source', we'll just check (0 timeout);
                            # otherwise, use the specified server.control.latency.
                            msg = None
                            while msg is None and not stats.eof:
                                wait = (kwds['server']['control']['latency'] if source.peek() is None else 0)
                                brx = cpppo.timer()
                                msg = network.recv(conn, timeout=wait)
                                now = cpppo.timer()
                                (logger.info if msg else logger.debug)(
                                    "Transaction receive after %7.3fs (%5s bytes in %7.3f/%7.3fs)",
                                    now - begun,
                                    len(msg) if msg is not None else "None",
                                    now - brx, wait)

                                # After each block of input (or None), check if the server is being
                                # signalled done/disabled; we need to shut down so signal eof.  Assumes
                                # that (shared) server.control.{done,disable} dotdict be in kwds.  We do
                                # *not* read using attributes here, to avoid reporting completion to
                                # external APIs (eg. web) awaiting reception of these signals.
                                if kwds['server']['control']['done'] or  kwds['server']['control']['disable']:
                                    logger.info("%s done, due to server done/disable", machine.name_centered())
                                    stats['eof'] = True
                                if msg is not None:
                                    stats['received'] += len(msg)
                                    stats['eof'] = stats['eof'] or not len(msg)
                                    if logger.getEffectiveLevel() <= logging.INFO:
                                        logger.info("%s recv: %5d: %s", machine.name_centered(), len(msg), cpppo.reprlib.repr(msg))
                                    source.chain(msg)
                                else:
                                    # No input.  If we have symbols available, no problem; continue.
                                    # This can occur if the state machine cannot make a transition on
                                    # the input symbol, indicating an unacceptable sentence for the
                                    # grammar.  If it cannot make progress, the machine will terminate
                                    # in a non-terminal state, rejecting the sentence.
                                    if source.peek() is not None:
                                        break
                                        # We're at a None (can't proceed), and no input is available.  This
                                        # is where we implement "Blocking"; just loop.

                    logger.info("Transaction parsed  after %7.3fs", cpppo.timer() - begun)
                    # Terminal state and EtherNet/IP header recognized, or clean EOF (no partial
                    # message); process and return response
                    if 'request' in data:
                        stats['requests'] += 1
                    try:
                        # enip_process must be able to handle no request (empty data), indicating the
                        # clean termination of the session if closed from this end (not required if
                        # enip_process returned False, indicating the connection was terminated by
                        # request.)
                        delayseconds = 0  # response delay (if any)
                        if enip_process(address, data=data, **kwds):
                            # Produce an EtherNet/IP response carrying the encapsulated response data.
                            # If no encapsulated data, ensure we also return a non-zero EtherNet/IP
                            # status.  A non-zero status indicates the end of the session.
                            assert 'response.enip' in data, "Expected EtherNet/IP response; none found"
                            if 'input' not in data.response.enip or not data.response.enip.input:
                                logger.warning("Expected EtherNet/IP response encapsulated message; none found")
                                assert data.response.enip.status, "If no/empty response payload, expected non-zero EtherNet/IP status"

                            rpy = parser.enip_encode(data.response.enip)
                            if logger.getEffectiveLevel() <= logging.INFO:
                                logger.info("%s send: %5d: %s %s", machine.name_centered(), len(rpy),
                                            cpppo.reprlib.repr(rpy), ("delay: %r" % delay) if delay else "")
                            if delay:
                                # A delay (anything with a delay.value attribute) == #[.#] (converible
                                # to float) is ok; may be changed via web interface.
                                try:
                                    delayseconds = float( delay.value if hasattr(delay, 'value') else delay)
                                    if delayseconds > 0:
                                        time.sleep(delayseconds)
                                except Exception as exc:
                                    logger.info( "Unable to delay; invalid seconds: %r", delay)
                            try:
                                conn.send(rpy)
                            except socket.error as exc:
                                logger.info("Session ended (client abandoned): %s", exc)
                                stats['eof'] = True
                            if data.response.enip.status:
                                logger.warning( "Session ended (server EtherNet/IP status: 0x%02x == %d)",
                                                data.response.enip.status, data.response.enip.status)
                                stats['eof'] = True
                        else:
                            # Session terminated.  No response, just drop connection.
                            if logger.getEffectiveLevel() <= logging.INFO:
                                logger.info("Session ended (client initiated): %s", parser.enip_format(data))
                            stats['eof'] = True
                        logger.info( "Transaction complete after %7.3fs (w/ %7.3fs delay)", cpppo.timer() - begun, delayseconds)
                    except:
                        logger.error("Failed request: %s", parser.enip_format(data))
                        enip_process(address, data=cpppo.dotdict())  # Terminate.
                        raise

                stats['processed'] = source.sent
            except:
                # Parsing failure.
                stats['processed'] = source.sent
                memory = bytes(bytearray(source.memory))
                pos = len(source.memory)
                future = bytes(bytearray(b for b in source))
                where = "at %d total bytes:\n%s\n%s (byte %d)" % (stats.processed, repr(memory + future), '-' * (len(repr(memory)) - 1) + '^', pos)
                logger.error("EtherNet/IP error %s\n\nFailed with exception:\n%s\n", where, ''.join(traceback.format_exception(*sys.exc_info())))
                raise
            finally:
                # Not strictly necessary to close (network.server_main will discard the socket,
                # implicitly closing it), but we'll do it explicitly here in case the thread doesn't die
                # for some other reason.  Clean up the connections entry for this connection address.
                self.connections.pop(connkey, None)
                logger.info( "%s done; processed %3d request%s over %5d byte%s/%5d received (%d connections remain)",
                    name, stats.requests, " " if stats.requests == 1  else "s",
                    stats.processed, " " if stats.processed == 1 else "s",
                    stats.received, len(self.connections))
                sys.stdout.flush()
                conn.close()
Ejemplo n.º 32
0
def test_history_unparsable():
    """Test history files rendered unparsable due to dropouts.  This should be handled with no problem
    except if the initial frame of register data on the first file is missing.

    """
    for _ in range( 3 ):
        path		= "/tmp/test_unparsable_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases, containing records that are invalid.
        now		= timer()
        v		= 10000
        secs		= 10
        secs_ext	=  1.0  # adjust range of history to target out by this +/-
        basisext	=   .5  # adjust start basis time from now by this +/-
        minfactor	=   .25
        maxfactor	=  2.0
        maxlatency	=   .25
        # 1/N file lines corrupted (kills 2 records; the current and following).  0 --> no errors
        maxerror	= random.choice( [ None, 3, 10, 100 ] )
        oldest		= None
        newest		= None
        logging.normal( "Corrupting %s of all history lines", None if not maxerror else "1/%d" % maxerror )
        for e in range( secs ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                ssend	= 100
                for ss in range( 0, ssend ): # subseconds up to but not including ssend...
                    js	= json.dumps( { 40001: v + e * 1000 + (ss * 1000 // ssend) } ) + '\n'
                    if maxerror and not random.randint( 0, maxerror ):
                        # Truncate some of the records (as would occur in a filesystem full or halt)
                        js = js[:random.randint( 0, len( js ) - 1)]
                    ts	= timestamp( now - e + ss/ssend )
                    if oldest is None or ts < oldest:
                        oldest = ts
                    if newest is None or ts > newest:
                        newest = ts
                    l._append( '\t'.join( (str( ts ),json.dumps( None ),js) ) )

        # Load the historical records.  This will be robust against all errors except if the first
        # line of the first history file opened is corrupt, and we therefore cannot get the initial
        # frame of register data.
        historical	= timestamp( now - random.uniform( -secs_ext, secs + secs_ext ))
        basisdelay	= random.uniform( -basisext, +basisext )
        basis		= now + basisdelay
        factor		= random.uniform( minfactor, maxfactor )
        lookahead	= 1.0
        on_bad_iframe	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        on_bad_data	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        logging.normal( "Playback starts %s (%.1f%%) of history %s-%s, in %.3fs, at x %.2f rate w/%.1fs lookahead, on_bad_iframe=%s, on_bad_data=%s",
                        historical, ( historical.value - oldest.value ) * 100 / ( newest.value - oldest.value ),
                        oldest, newest, basisdelay, factor, lookahead,
                        "SUPPRESS" if on_bad_iframe == loader.SUPPRESS else "FAIL" if on_bad_iframe  == loader.FAIL else "RAISE",
                        "SUPPRESS" if on_bad_data   == loader.SUPPRESS else "FAIL" if on_bad_data    == loader.FAIL else "RAISE" )

        ld		= loader( path,
                                historical=historical, basis=basis, factor=factor, lookahead=lookahead )
        dur		= basisext + ( secs_ext + secs + secs_ext ) / factor + basisext + 2*maxlatency # Don't be tooo strict
        beg		= timer()
        count		= 0

        while ld:
            assert timer() - beg < dur, "The loader should have ended"
            cur,events	= ld.load( on_bad_iframe=on_bad_iframe, on_bad_data=on_bad_data )
            count      += len( events )
            logging.normal( "%s loaded up to %s; %d future, %d values: %d events: %s",
                            ld, cur, len( ld.future ), len( ld.values ), len( events ), 
                            repr( events ) if logging.root.isEnabledFor( logging.DEBUG ) else reprlib.repr( events ))
            time.sleep( random.uniform( 0.0, maxlatency ))

        if on_bad_data == ld.FAIL or on_bad_iframe == ld.FAIL:
            assert ld.state in (ld.COMPLETE, ld.FAILED)
        else:
            assert ld.state == ld.COMPLETE

    except IframeError as exc:
        logging.warning( "Detected error on initial frame of registers in first history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED and count == 0, "Shouldn't have loaded any events -- only iframe failures expected"

    except DataError as exc:
        logging.warning( "Detected error on registers data in a history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        for f in files:
            if os.path.exists( f ):
                logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
            else:
                logging.warning( "%s: Couldn't find file", f )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Ejemplo n.º 33
0
    def handle_udp(self, conn, name, enip_process, **kwds):
        """
        Process UDP packets from multiple clients
        """
        with parser.enip_machine(name=name, context='enip') as machine:
            while not kwds['server']['control']['done'] and not kwds['server']['control']['disable']:
                try:
                    source = cpppo.rememberable()
                    data = cpppo.dotdict()

                    # If no/partial EtherNet/IP header received, parsing will fail with a NonTerminal
                    # Exception (dfa exits in non-terminal state).  Build data.request.enip:
                    begun = cpppo.timer()  # waiting for next transaction
                    addr, stats = None, None
                    with contextlib.closing(machine.run(
                            path='request', source=source, data=data)) as engine:
                        # PyPy compatibility; avoid deferred destruction of generators
                        for mch, sta in engine:
                            if sta is not None:
                                # No more transitions available.  Wait for input.
                                continue
                            assert not addr, "Incomplete UDP request from client %r" % (addr)
                            msg = None
                            while msg is None:
                                # For UDP, we'll allow no input only at the start of a new request parse
                                # (addr is None); anything else will be considered a failed request Back
                                # to the trough for more symbols, after having already received a packet
                                # from a peer?  No go!
                                wait = (kwds['server']['control']['latency']
                                        if source.peek() is None else 0)
                                brx = cpppo.timer()
                                msg, frm = network.recvfrom(conn, timeout=wait)
                                now = cpppo.timer()
                                (logger.info if msg else logger.debug)(
                                    "Transaction receive after %7.3fs (%5s bytes in %7.3f/%7.3fs): %r",
                                    now - begun, len(msg) if msg is not None else "None",
                                    now - brx, wait, self.stats_for(frm)[0])
                                # If we're at a None (can't proceed), and we haven't yet received input,
                                # then this is where we implement "Blocking"; we just loop for input.

                            # We have received exactly one packet from an identified peer!
                            begun = now
                            addr = frm
                            stats, _ = self.stats_for(addr)
                            # For UDP, we don't ever receive incoming EOF, or set stats['eof'].
                            # However, we can respond to a manual eof (eg. from web interface) by
                            # ignoring the peer's packets.
                            assert stats and not stats.get('eof'), \
                                "Ignoring UDP request from client %r: %r" % (addr, msg)
                            stats['received'] += len(msg)
                            logger.debug("%s recv: %5d: %s", machine.name_centered(), len(msg), cpppo.reprlib.repr(msg))
                            source.chain(msg)

                    # Terminal state and EtherNet/IP header recognized; process and return response
                    assert stats
                    if 'request' in data:
                        stats['requests'] += 1
                    # enip_process must be able to handle no request (empty data), indicating the
                    # clean termination of the session if closed from this end (not required if
                    # enip_process returned False, indicating the connection was terminated by
                    # request.)
                    if enip_process(addr, data=data, **kwds):
                        # Produce an EtherNet/IP response carrying the encapsulated response data.
                        # If no encapsulated data, ensure we also return a non-zero EtherNet/IP
                        # status.  A non-zero status indicates the end of the session.
                        assert 'response.enip' in data, "Expected EtherNet/IP response; none found"
                        if 'input' not in data.response.enip or not data.response.enip.input:
                            logger.warning("Expected EtherNet/IP response encapsulated message; none found")
                            assert data.response.enip.status, "If no/empty response payload, expected non-zero EtherNet/IP status"

                        rpy = parser.enip_encode(data.response.enip)
                        logger.debug("%s send: %5d: %s", machine.name_centered(),
                                       len(rpy), cpppo.reprlib.repr(rpy))
                        conn.sendto(rpy, addr)

                    logger.debug("Transaction complete after %7.3fs", cpppo.timer() - begun)

                    stats['processed'] = source.sent
                except:
                    # Parsing failure.  Suck out some remaining input to give us some context, but don't re-raise
                    if stats:
                        stats['processed'] = source.sent
                    memory = bytes(bytearray(source.memory))
                    pos = len(source.memory)
                    future = bytes(bytearray(b for b in source))
                    where = "at %d total bytes:\n%s\n%s (byte %d)" % (
                        stats.get('processed', 0) if stats else 0,
                        repr(memory + future), '-' * (len(repr(memory)) - 1) + '^', pos)
                    logger.error("Client %r EtherNet/IP error %s\n\nFailed with exception:\n%s\n", addr, where, ''.join(
                        traceback.format_exception(*sys.exc_info())))
Ejemplo n.º 34
0
Archivo: main.py Proyecto: ekw/cpppo
def api_request( group, match, command, value,
                      queries=None, environ=None, accept=None,
                      framework=None ):
    """Return a JSON object containing the response to the request:
      {
        data:     { ... },
        ...,
      }

    The data list contains objects representing all matching objects, executing
    the optional command.  If an accept encoding is supplied, use it.
    Otherwise, detect it from the environ's' "HTTP_ACCEPT"; default to
    "application/json".

        group		-- A device group, w/globbing; no default
        match		-- A device id match, w/globbing; default is: '*'
        command		-- The command to execute on the device; default is: 'get'
        value		-- All remaining query parameters; default is: []

        queries		-- All HTTP queries and/or form parameters
        environ		-- The HTTP request environment
        accept		-- A forced MIME encoding (eg. application/json).
        framework	-- The web framework module being used
    """

    global options
    global connections
    global tags
    global srv_ctl
    accept		= deduce_encoding( [ "application/json",
                                             "text/javascript",
                                             "text/plain",
                                             "text/html" ],
                                           environ=environ, accept=accept )

    # Deduce the device group and id match, and the optional command and value.
    # Values provided on the URL are overridden by those provided as query options.
    if "group" in queries and queries["group"]:
        group		= queries["group"]
        del queries["group"]
    if not group:
        group		= "*"

    if "match" in queries and queries["match"]:
        match		= queries["match"]
        del queries["match"]
    if not match:
        match		= "*" 

    # The command/value defaults to the HTTP request, but also may be overridden by
    # the query option.
    if "command" in queries and queries["command"]:
        command		= queries["command"]
        del queries["command"]
    if "value" in queries and queries["value"]:
        value		= queries["value"]
        del queries["value"]

    # The "since" query option may be supplied, and is used to prevent (None) or
    # limit (0,...)  the "alarm" responses to those that have been updated/added
    # since the specified time.
    since		= None
    if "since" in queries and queries["since"]:
        since		= float( queries["since"] )
        del queries["since"]

    # Collect up all the matching objects, execute any command, and then get
    # their attributes, adding any command { success: ..., message: ... }
    content		= {
        "alarm":	[],
        "command":	None,
        "data":		{},
        "since":	since,		# time, 0, None (null)
        "until":	cpppo.timer(),	# time (default, unless we return alarms)
        }

    logging.debug( "Searching for %s/%s, since: %s (%s)" % (
            group, match, since, 
            None if since is None else time.ctime( since )))

    # Effectively:
    #     group.match.command = value
    # Look through each "group" object's dir of available attributes for "match".  Then, see if 
    # that target attribute exists, and is something we can get attributes from.
    for grp, obj in [ 
            ('options',		options),
            ('connections', 	connections),
            ('tags',		tags ),
            ('server',		srv_ctl )]: 
        for mch in [ m for m in dir( obj ) if not m.startswith( '_' ) ]:
            log.detail( "Evaluating %s.%s: %r", grp, mch, getattr( obj, mch, None ))
            if not fnmatch.fnmatch( grp, group ):
                continue
            if not fnmatch.fnmatch( mch, match ):
                continue
            target		= getattr( obj, mch, None )
            if not target:
                log.warning( "Couldn't find advertised attribute %s.%s", grp, mch )
                continue
            if not hasattr( target, '__getattr__' ):
                continue
          
            # The obj's group name 'grp' matches requested group (glob), and the entry 'mch' matches
            # request match (glob).  /<group>/<match> matches this obj.key.
            result		= {}
            if command is not None:
                # A request/assignment is being made.  Retain the same type as the current value,
                # and allow indexing!  We want to ensure that we don't cause failures by corrupting
                # the types of value.  Unfortunately, this makes it tricky to support "bool", as
                # coercion from string is not supported.
                try:
                    cur		= getattr( target, command )
                    result["message"] = "%s.%s.%s: %r" % ( grp, mch, command, cur )
                    if value is not None:
                        typ	= type( cur )
                        if typ is bool:
                            # Either 'true'/'yes' or 'false'/'no', otherwise it must be a number
                            if value.lower() in ('true', 'yes'):
                                cvt	= True
                            elif value.lower() in ('false', 'no'):
                                cvt	= False
                            else:
                                cvt	= bool( int( value ))
                        else:
                            cvt		= typ( value )
                        setattr( target, command, cvt )
                        result["message"] = "%s.%s.%s=%r (%r)" % ( grp, mch, command, value, cvt )
                    result["success"]	= True
                except Exception as exc:
                    result["success"]	= False
                    result["message"]	= "%s.%s.%s=%r failed: %s" % ( grp, mch, command, value, exc )
                    logging.warning( "%s.%s.%s=%s failed: %s\n%s" % ( grp, mch, command, value, exc,
                                                                       traceback.format_exc() ))

            # Get all of target's attributes (except _*) advertised by its dir() results
            attrs		= [ a for a in dir( target ) if not a.startswith('_') ]
            data		= {}
            for a in attrs:
                data[a]		= getattr( target, a )
            content["command"]	= result
            content["data"].setdefault( grp, {} )[mch] = data
        

    # Report the end of the time-span of alarm results returned; if none, then
    # the default time will be the _timer() at beginning of this function.  This
    # ensures we don't duplicately report alarms (or miss any)
    if content["alarm"]:
        content["until"]= content["alarm"][0]["time"]

    # JSON
    response            = json.dumps( content, sort_keys=True, indent=4, default=lambda obj: repr( obj ))

    if accept in ("text/html"):
        # HTML; dump any request query options, wrap JSON response in <pre>
        response	= html_wrap( "Response:", "h2" ) \
            		+ html_wrap( response, "pre" )
        response        = html_wrap( "Queries:",  "h2" ) \
            		+ html_wrap( "\n".join(
                            ( "%(query)-16.16s %(value)r" % {
                                "query":	str( query ) + ":",
                                "value":	value,
                                }
                              for iterable in ( queries,
                                                [("group", group),
                                                 ("match", match),
                                                 ("command", command),
                                                 ("value", value),
                                                 ("since", since),
                                                 ] )
                                  for query, value in iterable )), tag="pre" ) \
                  	+ response
        response        = html_head( response,
                                     title='/'.join( ["api", group or '', match or '', command or ''] ))
    elif accept and accept not in ("application/json", "text/javascript", "text/plain"):
        # Invalid encoding requested.  Return appropriate 406 Not Acceptable
        message		=  "Invalid encoding: %s, for Accept: %s" % (
            accept, environ.get( "HTTP_ACCEPT", "*.*" ))
        raise http_exception( framework, 406, message )

    # Return the content-type we've agreed to produce, and the result.
    return accept, response
Ejemplo n.º 35
0
    def _poller( self ):
        """Asynchronously (ie. in another thread) poll all the specified registers, on the designated
        poll cycle.  Until we have something to do (self.rate isn't None), just wait.

        We'll log whenever we begin/cease polling any given range of registers.

        """
        target			= cpppo.timer()
        while not self.done and logging:	# Module may be gone in shutting down
            # Poller is dormant 'til a non-None/zero rate and data specified
            if not self.rate or not self._data:
                time.sleep( .1 )
                continue

            # Delay 'til poll target
            now			= cpppo.timer()
            if now < target:
                time.sleep( target - now )
                now		= cpppo.timer()

            # Ready for another poll.  Check if we've slipped (missed cycle(s)), and then compute
            # the next poll cycle target; this attempts to retain cadence.
            slipped		= int( ( now - target ) / self.rate )
            if slipped:
                log.normal( "Polling slipped; missed %d cycles" % ( slipped ))
            target	       += self.rate * ( slipped + 1 )

            # Perform polls, re-acquiring lock between each poll to allow others
            # to interject.  We'll sort the known register addresses in _data,
            # merge ranges, read the values from the PLC, and store them in
            # _data.

            # TODO: Split on and optimize counts for differing multi-register
            # limits for Coils, Registers

            # WARN: list comprehension over self._data must be atomic, because
            # we don't lock, and someone could call read/poll, adding entries to
            # self._data between reads.  However, since merge's register ranges
            # are sorted, all self._data keys are consumed before the list is
            # iterated.
            rngs		= set( merge( ( (a,1) for a in self._data ), reach=self.reach ))
            succ		= set()
            fail		= set()
            busy		= 0.0
            for address, count in rngs:
                with self.lock:
                    begin	= cpppo.timer()
                    try:
                        # Read values; on success (no exception, something other
                        # than None returned), immediately take online;
                        # otherwise attempts to _store will be rejected.
                        value	= self._read( address, count )
                        if not self.online:
                            self.online = True
                            log.critical( "Polling: PLC %s online; success polling %s: %s" % (
                                    self.description, address, cpppo.reprlib.repr( value )))
                        if (address,count) not in self.polling:
                            log.detail( "Polling %6d-%-6d (%5d)" % ( address, address+count-1, count ))
                        succ.add( (address, count) )
                        self._store( address, value ) # Handle scalar or list/tuple value(s)
                    except ModbusException as exc:
                        # Modbus error; Couldn't read the given range.  Only log
                        # the first time failure to poll this range is detected
                        fail.add( (address, count) )
                        if (address, count) not in self.failing:
                            log.warning( "Failing %6d-%-6d (%5d): %s" % (
                                    address, address+count-1, count, str( exc )))
                    except Exception as exc:
                        # Something else; always log
                        fail.add( (address, count) )
                        log.warning( "Failing %6d-%-6d (%5d): %s" % (
                                address, address+count-1, count, traceback.format_exc() ))
                    busy       += cpppo.timer() - begin

                # Prioritize other lockers (ie. write).  Contrary to popular opinion, sleep(0) does
                # *not* effectively yield the current Thread's quanta, at least on Python 2.7.6!
                time.sleep(0.001)

            # We've already warned about polls that have failed; also log all
            # polls that have ceased (failed, or been replaced by larger polls)
            ceasing		= self.polling - succ - fail
            for address, count in ceasing:
                log.info( "Ceasing %6d-%-6d (%5d)" % ( address, address+count-1, count ))

            self.polling	= succ
            self.failing	= fail
            self.duration	= busy

            # The "load" is computed by comparing the "duration" of the last poll vs. the target
            # poll rate (in seconds).  A load of 1.0 indicates the polls consumed exactly 100% of
            # the target rate.  Compute loads over approximately the last 1, 5 and 15 minutes worth
            # of polls.  The load is the proportion of the current poll rate that is consumed by
            # poll activity.  Even if the load < 1.0, polls may "slip" due to other (eg. write)
            # activity using PLC I/O capacity.
            load		= ( busy / self.rate ) if self.rate > 0 else 1.0
            ppm			= ( 60.0 / self.rate ) if self.rate > 0 else 1.0
            self.load		= tuple(
                cpppo.exponential_moving_average( cur, load, 1.0 / ( minutes * ppm ))
                for minutes,cur in zip((1, 5, 15), self.load ))

            # Finally, if we've got stuff to poll and we aren't polling anything successfully, and
            # we're not yet offline, warn and take offline, and then eport the completion of another
            # poll cycle.
            if self._data and not succ and self.online:
                log.critical( "Polling: PLC %s offline" % ( self.description ))
                self.online	= False
            self.counter       += 1
Ejemplo n.º 36
0
    print( "\n\nCUMULATIVE:")
    prof.sort_stats(  'cumulative' ).print_stats( 100 )
    '''


    '''
    # Profile all threads
    import yappi
    yappi.start()

    #import line_profiler
    #profile = line_profiler.LineProfiler( cpppo.server.enip.enip_srv )

    #logix_performance()
    test_logix_remote()

    print('\n\nSORTED BY TOT TIME')
    yappi.print_stats( sys.stdout, sort_type=yappi.SORTTYPE_TTOT, limit=100 )
    
    print('\n\nSORTED BY SUB TIME')
    yappi.print_stats( sys.stdout, sort_type=yappi.SORTTYPE_TSUB, limit=100 )
    '''
    count			= repetitions
    start			= cpppo.timer()
    logix_performance( repeat=count )
    duration			= cpppo.timer() - start
    log.warning( "Local  ReadFrg. Average %7.3f TPS (%7.3fs ea)." % ( count / duration, duration / count ))

    test_logix_remote( count=100 )
Ejemplo n.º 37
0
 def __init__(self, description, **kwargs):
     super(poller_simulator, self).__init__(description=description,
                                            **kwargs)
     self._cache = {}  # times/values stored to simulated PLC
     self._polled = cpppo.timer()  #   scheduled to be polled