Esempio n. 1
0
def handle(irc, userlist, operlist, msg):
    """
    Main function to deal with commands in PRIVMSGs
    """
    logging.normal(f"handler.py: {msg}")
    splt = msg.split(" ")
    _uid = splt[0][1:]
    _data = ' '.join(splt[3:])[1:]
    _nick = userlist[_uid]
    irc.utx(f"PRIVMSG {config.log_chan} :{userlist[_uid]} {_data}")
    if _uid not in operlist:
        irc.utx(
            f"NOTICE {_uid} :You are not authorized to use {config.client_nick}."
        )
        return True
    dsplit = _data.split(" ")
    if dsplit[0] == "spam":
        lttrs = list("QWERTYUIOPASDFGHJKLZXCVBNM")
        for i in range(int(dsplit[2])):
            ts = time.time()
            vuuid = "502" + random.choice(lttrs) + random.choice(
                lttrs) + random.choice(lttrs) + random.choice(
                    lttrs) + random.choice(lttrs) + random.choice(lttrs)
            irc.stx(
                f"UID {vuuid} {ts} VirtUser{str(i)} " +
                f"0.0.0.0 spec/virt{str(i)} virt{str(i)} spec/virt{str(i)} {ts} "
                + f"+i :SpecVirtUser{str(i)}")
            irc.stx(f"FJOIN {dsplit[1]} {str(int(time.time()))} + :+," + vuuid)
            irc.utx(f"NOTICE {_uid} :Finished spamming.")
    else:
        irc.tx(_data)
        irc.utx(f"NOTICE {_uid} :Sent to uplink.")
    return True
Esempio n. 2
0
def run(via,
        process,
        failure=None,
        backoff_min=None,
        backoff_multiplier=None,
        backoff_max=None,
        latency=None,
        **kwds):
    """Perform polling loop 'til process.done (or forever), and process each poll result.

    On Exception, invoke the supplied poll failure method (if any), and apply exponential back-off
    between repeated attempts to run the polling loop.  The default backoff starts at the poll cycle
    (or 1.0) seconds, and defaults to increase up to 10 times that, at a default rate of 1.5x the
    current backoff.

    One or more instance of poll.run may be using the same 'via' EtherNet/IP CIP proxy instance;
    it is assumed that Thread blocking behaviour is performed within the I/O processing code to
    ensure that only one Thread is performing I/O.

    """
    if backoff_min is None:
        backoff_min = kwds.get('cycle')
        if backoff_min is None:
            backoff_min = 1.0
    if backoff_max is None:
        backoff_max = backoff_min * 10
    if backoff_multiplier is None:
        backoff_multiplier = 1.5
    if latency is None:
        latency = .5

    backoff = None
    lst, dly = 0, 0
    beg = timer()
    while not hasattr(process, 'done') or not process.done:
        # Await expiry of 'dly', checking flags at least every 'latency' seconds
        ela = timer() - beg
        if ela < dly:
            time.sleep(min(latency, dly - ela))
            continue
        # Perform a poll.loop and/or increase exponential back-off.
        try:
            lst, dly, res = loop(via, last_poll=lst, **kwds)
            for p, v in res:
                process(p, v)
            backoff = None  # Signal a successfully completed poll!
        except Exception as exc:
            if backoff is None:
                backoff = backoff_min
                logging.normal("Polling failure: waiting %7.3fs; %s", backoff,
                               exc)
            else:
                backoff = min(backoff * backoff_multiplier, backoff_max)
                logging.detail("Polling backoff: waiting %7.3fs; %s", backoff,
                               exc)
            dly = backoff
            if failure is not None:
                failure(exc)
        beg = timer()
Esempio n. 3
0
def loop(via, cycle=None, last_poll=None, **kwds):
    """Monitor the desired cycle time (default: 1.0 seconds), perform a poll, and return the start of
    the poll cycle, the number of seconds to delay 'til the next poll cycle, and the list of
    parameter,value pairs polled:

        1449583850.949138,4.35,[(<parameter>,<value>),...]

    If the poll fails, an Exception is raised (and the powerflex proxy's gateway is closed in
    preparation for future poll attempts).  It is expected that the caller will re-attempt, after an
    appropriate delay (eg. one or more cycles).

    Call repeatedly (after waiting for the designated delay seconds to pass), passing the returned
    start of poll cycle in the 'last_poll' parameter.

    """
    # Detect where we are in poll cycle, logging early/missed polls, and advance last_poll to the
    # start of the current poll cycle.  We retain cadence by only initializing last_poll to the
    # current timer() if this is the first poll; otherwise, we always advance by cycles.
    if not cycle:
        cycle = 1.0
    init_poll = timer()
    dt = init_poll - last_poll
    if dt < cycle:
        # An early poll; maybe just an out-of-cycle refresh...  Don't advance poll cycles
        logging.info("Premature poll at %7.3fs into %7.3fs poll cycle", dt,
                     cycle)
    else:
        # We're into this poll cycle....
        missed = dt // cycle
        if last_poll:
            if missed > 1:
                logging.normal(
                    "Missed %3d polls, %7.3fs past %7.3fs poll cycle", missed,
                    dt - cycle, cycle)
            last_poll += cycle * missed
        else:
            last_poll = init_poll

    # last_poll has been advanced to indicate the start of the poll cycle we're within
    logging.info("Polling started   %7.3fs into %7.3fs poll cycle",
                 init_poll - last_poll, cycle)

    # Perform poll.  Whatever code "reifies" the powerflex.read generator must catch exceptions and
    # tell the (failed) powerflex instance to close its gateway.  This prepares the proxy's gateway
    # for subsequent I/O attempts (if any).
    with via:  # ensure via.close_gateway invoked on any Exception
        with contextlib.closing(execute(via, **kwds)) as executor:
            # PyPy compatibility; avoid deferred destruction of generators
            results = list(executor)

    done_poll = timer()
    duration = done_poll - init_poll
    logging.info(
        "Polling finished  %7.3fs into %7.3fs poll cycle, taking %7.3fs (%5.1f TPS)",
        done_poll - last_poll, cycle, duration,
        (1.0 / duration) if duration else float('inf'))

    # Return this poll cycle time stamp, remaining time 'til next poll cycle (if any), and results
    return last_poll, max(0, last_poll + cycle - done_poll), results
Esempio n. 4
0
def test_rs485_poll( simulated_modbus_rtu_ttyS0 ):
    """Multiple poller_modbus instances may be polling different slave RTUs at different unit IDs.

    """
    command,address		= simulated_modbus_rtu_ttyS0
    Defaults.Timeout		= PORT_TIMEOUT
    client			= modbus_client_rtu( framer=modbus_rtu_framer_collecting,
        port=PORT_MASTER, stopbits=PORT_STOPBITS, bytesize=PORT_BYTESIZE,
        parity=PORT_PARITY, baudrate=PORT_BAUDRATE )

    unit			= 2
    plc				= poller_modbus( "RS485 unit %s" % ( unit ), client=client, unit=unit, rate=.25 )

    wfkw			= dict( timeout=1.0, intervals=10 )

    try:
        plc.write( 1, 0 )
        plc.write( 40001, 0 )

        plc.poll( 40001 )

        success,elapsed		= waitfor( lambda: plc.read( 40001 ) is not None, "40001 polled", **wfkw )
        assert success
        assert elapsed < 1.0
        assert plc.read( 40001 ) == 0
    
        assert plc.read(     1 ) == None
        assert plc.read( 40002 ) == None
        success,elapsed		= waitfor( lambda: plc.read( 40002 ) is not None, "40002 polled", **wfkw )
        assert success
        assert elapsed < 1.0
        assert plc.read( 40002 ) == 0
        success,elapsed		= waitfor( lambda: plc.read(     1 ) is not None, "00001 polled", **wfkw )
        assert success
        assert elapsed < 1.0
        assert plc.read(     1 ) == 0

        plc.write( 40001, 99 )
        success,elapsed		= waitfor( lambda: plc.read( 40001 ) == 99, "40001 polled", **wfkw )
        assert success
        assert elapsed < 1.0
        
        # See if we converge on our target poll time
        count			= plc.counter
        while plc.counter < count + 20:
            logging.normal( "%s at poll %d: Load: %s ", plc.description, plc.counter, plc.load )
            time.sleep( .5 )
        logging.normal( "%s at poll %d: Load: %s ", plc.description, plc.counter, plc.load )

    except Exception:
        logging.warning( "%s poller failed: %s", plc.description, traceback.format_exc() )
        raise
    finally:
        logging.info( "Stopping plc polling" )
        plc.done		= True
        waitfor( lambda: not plc.is_alive(), "%s poller done" % ( plc.description ), timeout=1.0 )
Esempio n. 5
0
def start_powerflex_simulator( *options, **kwds ):
    """Start a simple EtherNet/IP CIP simulator (execute this file as __main__), optionally with
    Tag=<type>[<size>] (or other) positional arguments appended to the command-line.  Return the
    command-line used, and the detected (host,port) address bound.  Looks for something like:

        11-11 11:46:16.301     7fff7a619000 network  NORMAL   server_mai enip_srv server PID [ 7573] running on ('', 44818)

    containing a repr of the (<host>,<port>) tuple.  Recover this address using the safe
    ast.literal_eval.  Use the -A to provide this on stdout, or just -v if stderr is redirected to
    stdout (the default, w/o a stderr parameter to nonblocking_command)

    At least one positional parameter containing a Tag=<type>[<size>] must be provided.

    Note that the output of this file's interpreter is not *unbuffered* (above), so we can receive
    and parse the 'running on ...'!  We assume that server/network.py flushes stdout when printing
    the bindings.  We could use #!/usr/bin/env -S python3 -u instead to have all output unbuffered.

    """
    command                     = nonblocking_command( [
        sys.executable, os.path.abspath( __file__ ),
        '-a', ':0', '-A', '-p', '-v', '--no-udp',
    ] + list( options ), stderr=None, bufsize=0, blocking=None )

    # For python 2/3 compatibility (can't mix positional wildcard, keyword parameters in Python 2)
    CMD_WAIT			= kwds.pop( 'CMD_WAIT', 10.0 )
    CMD_LATENCY			= kwds.pop( 'CMD_LATENCY', 0.1 )
    assert not kwds, "Unrecognized keyword parameter: %s" % ( ", ".join( kwds ))

    begun			= timer()
    address			= None
    data			= ''
    while address is None and timer() - begun < CMD_WAIT:
        # On Python2, socket will raise IOError/EAGAIN; on Python3 may return None 'til command started.
        raw			= None
        try:
            raw			= command.stdout.read()
            logging.debug( "Socket received: %r", raw)
            if raw:
                data  	       += raw.decode( 'utf-8', 'backslashreplace' )
        except IOError as exc:
            logging.debug( "Socket blocking...: {exc}".format( exc=exc ))
            assert exc.errno == errno.EAGAIN, "Expected only Non-blocking IOError"
        except Exception as exc:
            logging.warning("Socket read return Exception: %s", exc)
        if not raw: # got nothing; wait a bit
            time.sleep( CMD_LATENCY )
        while data.find( '\n' ) >= 0:
            line,data		= data.split( '\n', 1 )
            logging.info( "%s", line )
            m			= re.search( r"running on (\([^)]*\))", line )
            if m:
                address		= ast.literal_eval( m.group(1).strip() )
                logging.normal( "EtherNet/IP CIP Simulator started after %7.3fs on %s:%d",
                                    timer() - begun, address[0], address[1] )
                break
    return command,address
Esempio n. 6
0
def handle(irc, userlist, operlist, msg):
    """
    Main function to deal with joins
    """
    logging.normal(f"akick.py: {msg}")
    _chan = msg.split(" ")[2]
    _uid = msg.split(",")[-1]
    if _chan == "#bots":
        irc.utx(f"KICK {_chan} {_uid} :Disabled channel")
    return True
Esempio n. 7
0
def loop( via, cycle=None, last_poll=None, **kwds ):
    """Monitor the desired cycle time (default: 1.0 seconds), perform a poll, and return the start of
    the poll cycle, the number of seconds to delay 'til the next poll cycle, and the list of
    parameter,value pairs polled:

        1449583850.949138,4.35,[(<parameter>,<value>),...]

    If the poll fails, an Exception is raised (and the powerflex proxy's gateway is closed in
    preparation for future poll attempts).  It is expected that the caller will re-attempt, after an
    appropriate delay (eg. one or more cycles).

    Call repeatedly (after waiting for the designated delay seconds to pass), passing the returned
    start of poll cycle in the 'last_poll' parameter.

    """
    # Detect where we are in poll cycle, logging early/missed polls, and advance last_poll to the
    # start of the current poll cycle.  We retain cadence by only initializing last_poll to the
    # current timer() if this is the first poll; otherwise, we always advance by cycles.
    if not cycle:
        cycle			= 1.0
    init_poll			= timer()
    dt				= init_poll - last_poll
    if dt < cycle:
        # An early poll; maybe just an out-of-cycle refresh...  Don't advance poll cycles
        logging.info( "Premature poll at %7.3fs into %7.3fs poll cycle", dt, cycle )
    else:
        # We're into this poll cycle....
        missed			= dt // cycle
        if last_poll:
            if missed > 1:
                logging.normal( "Missed %3d polls, %7.3fs past %7.3fs poll cycle",
                                missed, dt-cycle, cycle )
            last_poll	       += cycle * missed
        else:
            last_poll		= init_poll

    # last_poll has been advanced to indicate the start of the poll cycle we're within
    logging.info( "Polling started   %7.3fs into %7.3fs poll cycle", init_poll - last_poll, cycle )

    # Perform poll.  Whatever code "reifies" the powerflex.read generator must catch exceptions and
    # tell the (failed) powerflex instance to close its gateway.  This prepares the proxy's gateway
    # for subsequent I/O attempts (if any).
    with via: # ensure via.close_gateway invoked on any Exception
        with contextlib.closing( execute( via, **kwds )) as executor:
            # PyPy compatibility; avoid deferred destruction of generators
            results		= list( executor )

    done_poll			= timer()
    duration			= done_poll - init_poll
    logging.info( "Polling finished  %7.3fs into %7.3fs poll cycle, taking %7.3fs (%5.1f TPS)",
                  done_poll - last_poll, cycle, duration, (1.0/duration) if duration else float('inf'))

    # Return this poll cycle time stamp, remaining time 'til next poll cycle (if any), and results
    return last_poll,max( 0, last_poll+cycle-done_poll ),results
Esempio n. 8
0
def start_powerflex_simulator(*options, **kwds):
    """Start a simple EtherNet/IP CIP simulator (execute this file as __main__), optionally with
    Tag=<type>[<size>] (or other) positional arguments appended to the command-line.  Return the
    command-line used, and the detected (host,port) address bound.  Looks for something like:

        11-11 11:46:16.301     7fff7a619000 network  NORMAL   server_mai enip_srv server PID [ 7573] running on ('', 44818)

    containing a repr of the (<host>,<port>) tuple.  Recover this address using the safe ast.literal_eval.

    At least one positional parameter containing a Tag=<type>[<size>] must be provided.

    """
    command = nonblocking_command([
        'python',
        os.path.abspath(__file__),
        '-v',
    ] + list(options))

    # For python 2/3 compatibility (can't mix positional wildcard, keyword parameters in Python 2)
    CMD_WAIT = kwds.pop('CMD_WAIT', 10.0)
    CMD_LATENCY = kwds.pop('CMD_LATENCY', 0.1)
    assert not kwds, "Unrecognized keyword parameter: %s" % (", ".join(kwds))

    begun = timer()
    address = None
    data = ''
    while address is None and timer() - begun < CMD_WAIT:
        # On Python2, socket will raise IOError/EAGAIN; on Python3 may return None 'til command started.
        try:
            raw = command.stdout.read()
            logging.debug("Socket received: %r", raw)
            if raw:
                data += raw.decode('utf-8')
        except IOError as exc:
            logging.debug("Socket blocking...")
            assert exc.errno == errno.EAGAIN, "Expected only Non-blocking IOError"
        except Exception as exc:
            logging.warning("Socket read return Exception: %s", exc)
        if not data:
            time.sleep(CMD_LATENCY)
        while data.find('\n') >= 0:
            line, data = data.split('\n', 1)
            logging.info("%s", line)
            m = re.search(r"running on (\([^)]*\))", line)
            if m:
                address = ast.literal_eval(m.group(1).strip())
                logging.normal(
                    "EtherNet/IP CIP Simulator started after %7.3fs on %s:%d",
                    timer() - begun, address[0], address[1])
                break
    return command, address
Esempio n. 9
0
def run( via, process, failure=None, backoff_min=None, backoff_multiplier=None, backoff_max=None,
         latency=None, **kwds ):
    """Perform polling loop 'til process.done (or forever), and process each poll result.

    On Exception, invoke the supplied poll failure method (if any), and apply exponential back-off
    between repeated attempts to run the polling loop.  The default backoff starts at the poll cycle
    (or 1.0) seconds, and defaults to increase up to 10 times that, at a default rate of 1.5x the
    current backoff.

    One or more instance of poll.run may be using the same 'via' EtherNet/IP CIP proxy instance;
    it is assumed that Thread blocking behaviour is performed within the I/O processing code to
    ensure that only one Thread is performing I/O.

    """
    if backoff_min is None:
        backoff_min		= kwds.get( 'cycle' )
        if backoff_min is None:
            backoff_min		= 1.0
    if backoff_max is None:
        backoff_max		= backoff_min * 10
    if backoff_multiplier is None:
        backoff_multiplier	= 1.5
    if latency is None:
        latency			= .5

    backoff			= None
    lst,dly			= 0,0
    beg				= timer()
    while not hasattr( process, 'done' ) or not process.done:
        # Await expiry of 'dly', checking flags at least every 'latency' seconds
        ela			= timer() - beg
        if ela < dly:
            time.sleep( min( latency, dly - ela ))
            continue
        # Perform a poll.loop and/or increase exponential back-off.
        try:
            lst,dly,res		= loop( via, last_poll=lst, **kwds )
            for p,v in res:
                process( p, v )
            backoff		= None # Signal a successfully completed poll!
        except Exception as exc:
            if backoff is None:
                backoff		= backoff_min
                logging.normal( "Polling failure: waiting %7.3fs; %s", backoff, exc )
            else:
                backoff		= min( backoff * backoff_multiplier, backoff_max )
                logging.detail(  "Polling backoff: waiting %7.3fs; %s", backoff, exc )
            dly			= backoff
            if failure is not None:
                failure( exc )
        beg			= timer()
Esempio n. 10
0
def start_modbus_simulator(options):
    """Start bin/modbus_sim.py; assumes it flushes stdout when printing bindings so we can parse it
    here.

    """
    command = nonblocking_command([
        sys.executable,
        os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bin',
                     'modbus_sim.py'),
    ] + list(options),
                                  stderr=None,
                                  bufsize=0)

    begun = misc.timer()
    address = None
    data = ''
    while address is None and misc.timer() - begun < RTU_WAIT:
        # On Python2, socket will raise IOError/EAGAIN; on Python3 may return None 'til command started.
        raw = None
        try:
            raw = command.stdout.read()
            logging.debug("Socket received: %r", raw)
            if raw:
                data += raw.decode('utf-8', 'backslashreplace')
        except IOError as exc:
            logging.debug("Socket blocking...")
            assert exc.errno == errno.EAGAIN, "Expected only Non-blocking IOError"
        except Exception as exc:
            logging.warning("Socket read return Exception: %s", exc)
        if not raw:
            time.sleep(RTU_LATENCY)
        while data.find('\n') >= 0:
            line, data = data.split('\n', 1)
            logging.info("%s", line)
            m = re.search("address = (.*)", line)
            if m:
                try:
                    host, port = m.group(1).split(':')
                    address = host, int(port)
                    logging.normal(
                        "Modbus/TCP Simulator started after %7.3fs on %s:%d",
                        misc.timer() - begun, address[0], address[1])
                except:
                    assert m.group(1).startswith('/')
                    address = m.group(1)
                    logging.normal(
                        "Modbus/RTU Simulator started after %7.3fs on %s",
                        misc.timer() - begun, address)
                break
    return command, address
Esempio n. 11
0
def start_powerflex_simulator( *options, **kwds ):
    """Start a simple EtherNet/IP CIP simulator (execute this file as __main__), optionally with
    Tag=<type>[<size>] (or other) positional arguments appended to the command-line.  Return the
    command-line used, and the detected (host,port) address bound.  Looks for something like:

        11-11 11:46:16.301     7fff7a619000 network  NORMAL   server_mai enip_srv server PID [ 7573] running on ('', 44818)

    containing a repr of the (<host>,<port>) tuple.  Recover this address using the safe ast.literal_eval.

    At least one positional parameter containing a Tag=<type>[<size>] must be provided.

    """
    command                     = nonblocking_command( [
        'python',
        os.path.abspath( __file__ ),
        '-v',
    ] + list( options ))

    # For python 2/3 compatibility (can't mix positional wildcard, keyword parameters in Python 2)
    CMD_WAIT			= kwds.pop( 'CMD_WAIT', 10.0 )
    CMD_LATENCY			= kwds.pop( 'CMD_LATENCY', 0.1 )
    assert not kwds, "Unrecognized keyword parameter: %s" % ( ", ".join( kwds ))

    begun			= timer()
    address			= None
    data			= ''
    while address is None and timer() - begun < CMD_WAIT:
        # On Python2, socket will raise IOError/EAGAIN; on Python3 may return None 'til command started.
        try:
            raw			= command.stdout.read()
            logging.debug( "Socket received: %r", raw)
            if raw:
                data  	       += raw.decode( 'utf-8' )
        except IOError as exc:
            logging.debug( "Socket blocking...")
            assert exc.errno == errno.EAGAIN, "Expected only Non-blocking IOError"
        except Exception as exc:
            logging.warning("Socket read return Exception: %s", exc)
        if not data:
            time.sleep( CMD_LATENCY )
        while data.find( '\n' ) >= 0:
            line,data		= data.split( '\n', 1 )
            logging.info( "%s", line )
            m			= re.search( "running on (\([^)]*\))", line )
            if m:
                address		= ast.literal_eval( m.group(1).strip() )
                logging.normal( "EtherNet/IP CIP Simulator started after %7.3fs on %s:%d",
                                    timer() - begun, address[0], address[1] )
                break
    return command,address
Esempio n. 12
0
def connect(host=config.link_host, port=config.link_port):
    """
    Connects to a server, using config.link_host and config.link_port by default
    """
    logging.normal(f"link.py: Connecting to {host} {port}")
    sock = socket.create_connection((host, port))
    logging.good("link.py: Connection seems to have established")
    if config.link_ssl:
        ssl_context = ssl.create_default_context()
        conn = ssl_context.wrap_socket(sock, server_hostname=host)
    else:
        conn = sock
    cfile = conn.makefile(errors="repalce")
    global connection
    connection = (conn, cfile)
Esempio n. 13
0
    def null_server( conn, addr, server=None ):
        """Fake up an EtherNet/IP server that just sends a canned EtherNet/IP CIP Register and Identity
        string response, to fake the poll client into sending a poll request into a closed socket.
        Immediately does a shutdown of the incoming half of the socket, and then closes the
        connection after sending the fake replies, usually resulting in an excellent EPIPE/SIGPIPE
        on the client.  Use port 44819, to avoid interference by (possibly slow-to-exit) simulators
        running on port 44818.

        """
        logging.normal( "null_server on %s starting" % ( addr, ))
        conn.shutdown( socket.SHUT_RD )
        time.sleep( 0.1 )
        conn.send( b'e\x00\x04\x00\xc9wH\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00' )
        conn.send( b'c\x00;\x00\xd4/\x9dm\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x0c\x005\x00\x01\x00\x00\x02\xaf\x12\n\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x0e\x006\x00\x14\x0b`1\x1a\x06l\x00\x13PowerFlex/20-COMM-E\xff' )
        conn.close()
        while server and not server.control.done:
            time.sleep( .1 )
        logging.normal( "null_server on %s done" % ( addr, ))
Esempio n. 14
0
    def null_server( conn, addr, server=None ):
        """Fake up an EtherNet/IP server that just sends a canned EtherNet/IP CIP Register and Identity
        string response, to fake the poll client into sending a poll request into a closed socket.
        Immediately does a shutdown of the incoming half of the socket, and then closes the
        connection after sending the fake replies, usually resulting in an excellent EPIPE/SIGPIPE
        on the client.  Use port 44819, to avoid interference by (possibly slow-to-exit) simulators
        running on port 44818.

        """
        logging.normal( "null_server on %s starting" % ( addr, ))
        conn.shutdown( socket.SHUT_RD )
        time.sleep( 0.1 )
        conn.send( b'e\x00\x04\x00\xc9wH\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00' )
        conn.send( b'c\x00;\x00\xd4/\x9dm\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x0c\x005\x00\x01\x00\x00\x02\xaf\x12\n\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x0e\x006\x00\x14\x0b`1\x1a\x06l\x00\x13PowerFlex/20-COMM-E\xff' )
        conn.close()
        while server and not server.control.done:
            time.sleep( .1 )
        logging.normal( "null_server on %s done" % ( addr, ))
Esempio n. 15
0
def run():
    try:

        opers = []
        users = {}
        plugins = []

        logging.normal("ulined.py: Loading plugins")
        for _plugin in config.plugins_enabled:
            exec(f"import {_plugin}")
            plugins.append(eval(_plugin))
        logging.good("ulined.py: Plugins loaded")

        irc.init()
        while True:
            line = irc.rx()
            if line is None:
                logging.error("uline.py: Dead socket")
                raise Exception("Dead socket")
            done = False
            splt = line.split(" ")
            for plugin in plugins:
                if eval(plugin.trigger):
                    done = plugin.handle(irc, users, opers, line)
                    if done:
                        continue
            if done:
                continue
            if len(splt) == 4 and splt[1] == "PING":
                irc.stx("PONG " + splt[3] + " " + splt[2])
            elif len(splt) > 4 and splt[1] == "UID" and splt[4] == config.client_nick:
                irc.stx("KILL " + splt[2] + " :%s" % text_kill_nickresv)
            elif len(splt) >= 10 and splt[1] == "UID":
                users[splt[2]] = splt[4]
            elif len(splt) == 3 and splt[1] == "OPERTYPE" and splt[2] == config.oper_type:
#                irc.utx(f"NOTICE {splt[0][1:]} :{config.text_operup}")
                opers.append(splt[0][1:])
    except Exception as e:
        #logging.error(f"ulined.py: Unexpected error: {type(e)} {str(e)}")
        raise
        exit(255)
    except KeyboardInterrupt:
        logging.bad("ulined.py: Got SIGINT, quitting")
        exit(0)
Esempio n. 16
0
def init():
    """
    Initializes the server connection, does handshakes and such
    """
    logging.normal("irc.py: Initializing IRC")
    link.connect()
    logging.normal("irc.py: Negotiating capabilities")
    link.tx("CAPAB START 1202")
    link.tx("CAPAB CAPABILITIES :PROTOCOL=1202")
    link.tx("CAPAB END")
    link.tx(f"SERVER {config.irc_server} {config.irc_sendpass} 0 {config.irc_sid} :{config.irc_desc}")
    while True:
        line = link.rx()
        if line == "CAPAB END":
            logging.normal("irc.py: Done with capabilities")
            break
    line = link.rx()
    if line[:6] != "SERVER":
        logging.error("irc.py: Protocol violation: did not get SERVER after CAPAB END")
        exit(1)
    else:
        logging.good("irc.py: Got remote SERVER reply")
        splt = line.split(' ')
        _r_name = splt[1]
        _r_pass = splt[2]
        if _r_pass != config.irc_recvpass:
            logging.error(f"irc.py: {_r_name} is not sending correct password, got {_r_pass}")
            exit(2)
        else:
            del _r_name
            del _r_pass
            del splt
            logging.good("irc.py: Password is good")
            if not status['bursted']:
                ts = str(int(time.time()))
                link.stx("BURST")
                link.stx(f"UID {config.client_uid} {ts} {config.client_nick} {config.client_connaddr} " +
                         f"{config.client_host} {config.client_ident} {config.client_connaddr} {ts} " +
                         f"{config.client_umode} :{config.client_realname}")
                link.stx("ENDBURST")
                status['bursted'] = True
                ts = str(int(time.time()))
                link.utx(f"PRIVMSG NickServ :identify {config.client_nick} {config.client_nspass}")
                link.stx(f"FJOIN {config.log_chan} {str(int(time.time()))} + :{config.client_chmode}," +
                         f"{config.client_uid}")
                link.stx(f"FJOIN #chat {str(int(time.time()))} + :{config.client_chmode}," +
                         f"{config.client_uid}")
                link.stx(f"FJOIN #test {str(int(time.time()))} + :{config.client_chmode}," +
                         f"{config.client_uid}")
                link.stx(f"MODE {config.log_chan} +{config.client_chmode} {config.client_nick}")
            logging.normal("irc.py: IRC initialized")
Esempio n. 17
0
def start_modbus_simulator( options ):
    command                     = nonblocking_command( [
        'python',
        os.path.join( os.path.dirname( os.path.abspath( __file__ )), 'bin', 'modbus_sim.py' ),
    ] + list( options ))

    begun			= misc.timer()
    address			= None
    data			= ''
    while address is None and misc.timer() - begun < RTU_WAIT:
        # On Python2, socket will raise IOError/EAGAIN; on Python3 may return None 'til command started.
        try:
            raw			= command.stdout.read()
            logging.debug( "Socket received: %r", raw)
            if raw:
                data  	       += raw.decode( 'utf-8' )
        except IOError as exc:
            logging.debug( "Socket blocking...")
            assert exc.errno == errno.EAGAIN, "Expected only Non-blocking IOError"
        except Exception as exc:
            logging.warning("Socket read return Exception: %s", exc)
        if not data:
            time.sleep( RTU_LATENCY )
        while data.find( '\n' ) >= 0:
            line,data		= data.split( '\n', 1 )
            logging.info( "%s", line )
            m			= re.search( "address = (.*)", line )
            if m:
                try:
                    host,port	= m.group(1).split( ':' )
                    address	= host,int(port)
                    logging.normal( "Modbus/TCP Simulator started after %7.3fs on %s:%d",
                                    misc.timer() - begun, address[0], address[1] )
                except:
                    assert m.group(1).startswith( '/' )
                    address	= m.group(1)
                    logging.normal( "Modbus/RTU Simulator started after %7.3fs on %s",
                                    misc.timer() - begun, address )
                break
    return command,address
Esempio n. 18
0
    def request(self, data, addr=None):
        """Any exception should result in a reply being generated with a non-zero status."""
        if log.isEnabledFor(logging.DETAIL):
            log.detail("%s Request: %s", self, enip.enip_format(data))

        # Pick out our services added at this level.  We only accept ANC-120e DF1.  If unrecognized,
        # return a non-zero STS. Normal or Priority CMD codes are accepted.
        data.DF1.sts = 0x10  # Illegal command or format
        if data.DF1.get('cmd') in (0x06, 0x26) and data.DF1.get('fnc') == 0x03:
            log.warning("DF1: Diagnostic Status: %s", enip.enip_format(data))
            # eg. Status Request/Reply:
            # b'\x00\x00\x01\x00\x00\x00\x00\x00\x06\x00J\n\x03'
            # b'\x00\x00\x00\x00\x00\x00\x01\x00\x46\x00J\n\x00\xee1[#5/04       V\x00\x9e$\x05D \xfc'
            data.DF1.sts = 0
            data.DF1.data = array.array(
                cpppo.type_bytes_array_symbol,
                b'\xee1[#5/04       V\x00\x9e$\x05D \xfc')
        elif data.DF1.get('cmd') in (0x0F,
                                     0x2F) and data.DF1.get('fnc') == 0xA2:
            log.warning(
                "DF1: Protected typed Logical Read w/ 3 Address Fields: %s",
                enip.enip_format(data))
            # eg. Read Request/Reply:
            # b'\x00\x00\x01\x00\x00\x00\x00\x00\x0f\x00K\n\xa2D\x00\x01\x00\x00'
            # b'\x00\x00\x00\x00\x00\x00\x01\x00\x4f\x00K\nFX PLC P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!\x00\x000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05D\x01\x00#\x00\x04\x00\x02\x00e\x00\x03\x00\xa2\x00\xa7\x00V\x01j\x01t\x01m\x03'
            data.DF1.sts = 0
            data.DF1.data = array.array(
                cpppo.type_bytes_array_symbol,
                b'FX PLC P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!\x00\x000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05D\x01\x00#\x00\x04\x00\x02\x00e\x00\x03\x00\xa2\x00\xa7\x00V\x01j\x01t\x01m\x03'
            )
        else:
            logging.normal("DF1: Unrecognized: %s", enip.enip_format(data))

        # Convert DF1 request into a response.  Assume DF1.sts is 0 (for success), !0 for failure,
        # and DF1.data contains the response payload.  Swap src/dst.
        data.DF1.cmd |= 0x40
        data.DF1.src, data.DF1.dst = data.DF1.dst, data.DF1.src
        data.input = bytearray(self.produce(data))
        return True
Esempio n. 19
0
def main():
    parser			= argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog = """\

    Register range(s) and value(s) must be supplied:
    
      <begin>[-<end>]
    
    EXAMPLE
    
      modbus_poll --address localhost:7502 40001-40100
    
    """ )
    parser.add_argument( '-v', '--verbose',
                         default=0, action="count", help="Display logging information." )
    parser.add_argument('-l', '--log', 
                        type=str, default=None, help="Direct log output to the specified file" )
    parser.add_argument( '-a', '--address', default="0.0.0.0:502",
                         help="Default [interface][:port] to bind to (default: any, port 502)" )
    parser.add_argument( '-r', '--reach',	default=1,
                         help="Merge polls within <reach> registers of each-other" )
    parser.add_argument( '-R', '--rate',	default=1.0,
                         help="Target poll rate" )
    parser.add_argument( 'registers', nargs="+" )
    args			= parser.parse_args()
    
    # Deduce logging level and target file (if any)
    levelmap 			= {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )
    if args.log:
        cpppo.log_cfg['filename'] = args.log
    logging.basicConfig( **cpppo.log_cfg )

    # (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple
    # like ("interface",502)
    address			= None
    if args.address:
        address			= args.address.split(':')
        assert 1 <= len( address ) <= 2
        address			= (
            str( address[0] ),
            int( address[1] ) if len( address ) > 1 else Defaults.Port )
        log.info( "--address '%s' produces address=%r" % ( args.address, address ))

    # Start the PLC poller

    poller			= poller_modbus(
        "Modbus/TCP", host=address[0], port=address[1], reach=int( args.reach ), rate=float( args.rate ))

    
    for r in args.registers:
        rng			= r.split('-')
        beg,cnt			= int(rng[0]), int(rng[1])-int(rng[0])+1 if len(rng) else 1
        for reg in range( beg, beg+cnt ):
            poller.poll( reg )
    
    load			= ''
    fail			= ''
    poll			= ''
    regs			= {}
    while True:
        loadcur			= "%.2f" % ( poller.load[0] if poller.load[0] else 0 )
        if loadcur != load:
            load		= loadcur
            logging.detail( "load: %s", loadcur )
        failcur			= ", ".join( [ ("%d-%d" % (b,b+c-1)) for b,c in poller.failing ] )
        pollcur			= ", ".join( [ ("%d-%d" % (b,b+c-1)) for b,c in poller.polling ] )
        if ( failcur != fail or pollcur != poll ):
            fail, poll		= failcur, pollcur
            logging.normal( "failing: %s, polling: %s", fail, poll )
        # log data changes
        for beg,cnt in poller.polling:
            for reg in range( beg, beg+cnt ):
                val		= poller.read( reg )
                old		= regs.get( reg ) # may be None
                if val != old:
                    logging.warning( "%5d == %5d (was: %s)" %( reg, val, old ))
                    regs[reg]	= val

        time.sleep( 1 )
Esempio n. 20
0
    def position( self, actuator=1, timeout=TIMEOUT, home=True, noop=False, svoff=False, **kwds ):
        """Begin position operation on 'actuator' w/in 'timeout'.  

        :param home: Return to home position before any other movement
        :param noop: Do not perform final activation

        Running with specified data

        1   - Set internal flag Y30 (input invalid flag)
        2   - Write 1 to internal flag Y19 (SVON)
        2a  -   and confirm internal flag X49 (SVRE) has become "1"
        3   - Write 1 to internal flag Y1C (SETUP)
        3a  -   and confirm internal flag X4A (SETON) has become "1"
        4   - Write data to D9102-D9110
        5   - Write Operation Start instruction "1" to D9100 (returns to 0 after processed)

        If no positioning kwds are provided, then no new position is configured.  If 'noop' is True,
        everything except the final activation is performed.

        """
        begin			= cpppo.timer()
        if timeout is None:
            timeout		= self.TIMEOUT
        assert self.complete( actuator=actuator, svoff=svoff, timeout=timeout ), \
            "Previous actuator position incomplete within timeout %r" % timeout
        status			= self.status( actuator=actuator )
        if not kwds:
            return status

        # Previous positioning complete, and possibly new position keywords provided.
        logging.detail( "Position: actuator %3d setdata: %r", actuator, kwds )
        unit			= self.unit( uid=actuator )

        # 1: set INPUT_INVALID
        unit.write( data.Y30_INPUT_INVALID.addr, 1 )

        # 2: set SVON, check SVRE
        if timeout:
            assert cpppo.timer() <= begin + timeout, \
                "Failed to complete positioning SVON/SVRE within timeout"
        unit.write( data.Y19_SVON.addr, 1 )
        svre			= self.check(
            predicate=lambda: unit.read( data.Y19_SVON.addr ) and unit.read( data.X49_SVRE.addr ),
            deadline=None if timeout is None else begin + timeout )
        assert svre, \
            "Failed to set SVON True and read SVRE True"

        # 3: Return to home? set SETUP, check SETON.  Otherwise, clear SETUP.  It is very unclear
        #    whether we need to do this, and/or whether we need to clear it afterwards.
        if home:
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning SETUP/SETON within timeout"
            unit.write( data.Y1C_SETUP.addr, 1 )
            seton			= self.check(
                predicate=lambda: unit.read( data.Y1C_SETUP.addr ) and unit.read( data.X4A_SETON.addr ),
                deadline=None if timeout is None else begin + timeout )
            if not seton:
                logging.warning( "Failed to set SETUP True and read SETON True" )
            # assert seton, \
            #    "Failed to set SETUP True and read SETON True"
        else:
            unit.write( data.Y1C_SETUP.addr, 0 )
        
        # 4: Write any changed position data.  The actuator doesn't accept individual register
        # writes, so we use multiple register writes for each value.
        for k,v in kwds.items():
            assert k in data, \
                "Unrecognized positioning keyword: %s == %v" % ( k, v )
            assert STEP_DATA_BEG <= data[k].addr <= STEP_DATA_END, \
                "Invalid positioning keyword: %s == %v; not within position data address range" % ( k, v )
            format		= data[k].get( 'format' )
            if format:
                # Create a big-endian buffer.  This will be some multiple of register size.  Then,
                # unpack it into some number of 16-bit big-endian registers (this will be a tuple).
                buf		= struct.pack( '>'+format, v )
                values		= [ struct.unpack_from( '>H', buf[o:] )[0] for o in range( 0, len( buf ), 2 ) ]
            else:
                values		= [ v ]
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning data update within timeout"
            logging.normal( "Position: actuator %3d updated: %16s: %8s (== %s)", actuator, k, v, values )
            unit.write( data[k].addr, values )

        # 5: set operation_start to 0x0100 (1 in high-order bytes) unless 'noop'
        if not noop:
            unit.write( data.operation_start.addr, 0x0100 )
            started			= self.check(
                predicate=lambda: unit.read( data.operation_start.addr ) == 0x0100,
                deadline=None if timeout is None else begin + timeout )
            assert started, \
                "Failed to detect positioning start within timeout"

        return self.status( actuator=actuator )
Esempio n. 21
0
def run_plc_modbus_polls( plc ):
    # Initial conditions (in case PLC is persistent between tests)
    plc.write(     1, 0 )
    plc.write( 40001, 0 )

    rate			= 1.0
    timeout			= 2 * rate 	# Nyquist
    intervals			= timeout / .05	#  w/ fixed .05s intervals
    wfkw			= dict( timeout=timeout, intervals=intervals )

    plc.poll( 40001, rate=rate )
    
    success,elapsed		= waitfor( lambda: plc.read( 40001 ) is not None, "40001 polled", **wfkw )
    assert success
    assert elapsed < 1.0
    assert plc.read( 40001 ) == 0
    
    assert plc.read(     1 ) == None
    assert plc.read( 40002 ) == None
    success,elapsed		= waitfor( lambda: plc.read( 40002 ) is not None, "40002 polled", **wfkw )
    assert success
    assert elapsed < 1.0
    assert plc.read( 40002 ) == 0
    success,elapsed		= waitfor( lambda: plc.read(     1 ) is not None, "00001 polled", **wfkw )
    assert success
    assert elapsed < 1.0
    assert plc.read(     1 ) == 0

    # Now add a bunch of new stuff to poll, and ensure polling occurs.  As we add registers the
    # number of distinct poll ranges will increase, and then decrease as we in-fill and the
    # inter-register range drops below the merge reach 10, allowing the polling to merge ranges.
    # Thus, keep track of the number of registers added, and allow
    # 
    # avg. 
    # poll
    # time
    #  
    #   |
    #   |
    # 4s|         ..
    # 3s|        .  .
    # 2s|     ...    ...
    # 1s|.....          .......
    #  -+----------------------------------
    #   |  10  20  30  40   regs

    # We'll be overwhelming the poller, so it won't be able to poll w/in the target rate, so we'll
    # need to more than double the Nyquist-rate timeout
    wfkw['timeout']	       *= 2.5
    wfkw['intervals']	       *= 2.5
    
    regs			= {}
    extent			= 100 # how many each of coil/holding registers
    total			= extent*2 # total registers in play
    elapsed			= None
    rolling			= None
    rolling_factor		= 1.0/5	# Rolling exponential moving average over last ~8 samples

    # Keep increasing the number of registers polled, up to 1/2 of all registers
    while len( regs ) < total * 50 // 100:
        # Always select a previously unpolled register; however, it might
        # have already been in a merge range; if so, get its current value
        # so we mutate it (forcing it to be re-polled)
        base			= 40001 if random.randint( 0, 1 ) else 1
        r			= None
        while r is None or r in regs:
            r			= random.randint( base, base + extent )
        v			= plc.read( r )
        if v is not None:
            logging.detail( "New reg %5d was already polled due to reach=%d", r, plc.reach )
            regs[r]		= v
        regs[r]			= ( regs[r] ^ 1 if r in regs
                                else random.randint( 0, 65535 ) if base > 40000
                                else random.randint( 0, 1 ) )

        plc.write( r, regs[r] )
        plc.poll( r )
        if len( regs ) > total * 10 // 100:
            # skip to the good parts...  After 10% of all registers are being polled, start
            # calculating.  See how long it takes, on average, to get the newly written register
            # value polled back.
            success,elapsed	= waitfor( lambda: plc.read( r ) == regs[r], "polled %5d == %5d" % ( r, regs[r] ), **wfkw )
            assert success
            rolling		= misc.exponential_moving_average( rolling, elapsed, rolling_factor )

        logging.normal( "%3d/%3d regs: polled %3d ranges w/in %7.3fs. Polled %5d == %5d w/in %7.3fs: avg. %7.3fs (load %3.2f, %3.2f, %3.2f)",
                         len( regs ), total, len( plc.polling ), plc.duration,
                         r, regs[r], elapsed or 0.0, rolling or 0.0, *[misc.nan if load is None else load for load in plc.load] )

        if len( regs ) > total * 20 // 100:
            # after 20%, start looking for the exit (ranges should merge, poll rate fall )
            if rolling < plc.rate:
                break

    assert rolling < plc.rate, \
        "Rolling average poll cycle %7.3fs should have fallen below target poll rate %7.3fs" % ( rolling, plc.rate )

    for r,v in regs.items():
        assert plc.read( r ) == v
Esempio n. 22
0
 def process( p, v ):
     logging.normal( "process: %16s == %s", p, v )
     values[p]		= v
Esempio n. 23
0
File: main.py Progetto: ekw/cpppo
def main( argv=None, attribute_class=device.Attribute, identity_class=None, idle_service=None,
          **kwds ):
    """Pass the desired argv (excluding the program name in sys.arg[0]; typically pass argv=None, which
    is equivalent to argv=sys.argv[1:], the default for argparse.  Requires at least one tag to be
    defined.

    If a cpppo.apidict() is passed for kwds['server']['control'], we'll use it to transmit server
    control signals via its .done, .disable, .timeout and .latency attributes.

    Uses the provided attribute_class (default: device.Attribute) to process all EtherNet/IP
    attribute I/O (eg. Read/Write Tag [Fragmented]) requests.  By default, device.Attribute stores
    and retrieves the supplied data.  To perform other actions (ie. forward the data to your own
    application), derive from device.Attribute, and override the __getitem__ and __setitem__
    methods.

    If an idle_service function is provided, it will be called after a period of latency between
    incoming requests.

    """
    global address
    global options
    global tags
    global srv_ctl
    global latency
    global timeout

    ap				= argparse.ArgumentParser(
        description = "Provide an EtherNet/IP Server",
        epilog = "" )

    ap.add_argument( '-v', '--verbose',
                     default=0, action="count",
                     help="Display logging information." )
    ap.add_argument( '-a', '--address',
                     default=( "%s:%d" % address ),
                     help="EtherNet/IP interface[:port] to bind to (default: %s:%d)" % (
                         address[0], address[1] ))
    ap.add_argument( '-p', '--print', default=False, action='store_true',
                     help="Print a summary of operations to stdout" )
    ap.add_argument( '-l', '--log',
                     help="Log file, if desired" )
    ap.add_argument( '-w', '--web',
                     default="",
                     help="Web API [interface]:[port] to bind to (default: %s, port 80)" % (
                         address[0] ))
    ap.add_argument( '-d', '--delay',
                     help="Delay response to each request by a certain number of seconds (default: 0.0)",
                     default="0.0" )
    ap.add_argument( '-s', '--size',
                     help="Limit EtherNet/IP encapsulated request size to the specified number of bytes (default: None)",
                     default=None )
    ap.add_argument( '-P', '--profile',
                     help="Output profiling data to a file (default: None)",
                     default=None )
    ap.add_argument( 'tags', nargs="+",
                     help="Any tags, their type (default: INT), and number (default: 1), eg: tag=INT[1000]")

    args			= ap.parse_args( argv )

    # Deduce interface:port address to bind, and correct types (default is address, above)
    bind			= args.address.split(':')
    assert 1 <= len( bind ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    bind			= ( str( bind[0] ) if bind[0] else address[0],
                                    int( bind[1] ) if len( bind ) > 1 and bind[1] else address[1] )

    # Set up logging level (-v...) and --log <file>
    levelmap 			= {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )

    # Chain any provided idle_service function with log rotation; these may (also) consult global
    # signal flags such as logrotate_request, so execute supplied functions before logrotate_perform
    idle_service		= [ idle_service ] if idle_service else []
    if args.log:
        # Output logging to a file, and handle UNIX-y log file rotation via 'logrotate', which sends
        # signals to indicate that a service's log file has been moved/renamed and it should re-open
        cpppo.log_cfg['filename']= args.log
        signal.signal( signal.SIGHUP, logrotate_request )
        idle_service.append( logrotate_perform )

    logging.basicConfig( **cpppo.log_cfg )


    # Pull out a 'server.control...' supplied in the keywords, and make certain it's a
    # cpppo.apidict.  We'll use this to transmit control signals to the server thread.  Set the
    # current values to sane initial defaults/conditions.
    if 'server' in kwds:
        assert 'control' in kwds['server'], "A 'server' keyword provided without a 'control' attribute"
        srv_ctl			= cpppo.dotdict( kwds.pop( 'server' ))
        assert isinstance( srv_ctl['control'], cpppo.apidict ), "The server.control... must be a cpppo.apidict"
    else:
        srv_ctl.control		= cpppo.apidict( timeout=timeout )

    srv_ctl.control['done']	= False
    srv_ctl.control['disable']	= False
    srv_ctl.control.setdefault( 'latency', latency )

    # Global options data.  Copy any remaining keyword args supplied to main().  This could
    # include an alternative enip_process, for example, instead of defaulting to logix.process.
    options.update( kwds )

    # Specify a response delay.  The options.delay is another dotdict() layer, so it's attributes
    # (eg. .value, .range) are available to the web API for manipulation.  Therefore, they can be
    # set to arbitrary values at random times!  However, the type will be retained.
    def delay_range( *args, **kwds ):
        """If a delay.range like ".1-.9" is specified, then change the delay.value every second to something
        in that range."""
        assert 'delay' in kwds and 'range' in kwds['delay'] and '-' in kwds['delay']['range'], \
            "No delay=#-# specified"
        log.normal( "Delaying all responses by %s seconds", kwds['delay']['range'] )
        while True:
            # Once we start, changes to delay.range will be re-evaluated each loop
            time.sleep( 1 )
            try:
                lo,hi		= map( float, kwds['delay']['range'].split( '-' ))
                kwds['delay']['value'] = random.uniform( lo, hi )
                log.info( "Mutated delay == %g", kwds['delay']['value'] )
            except Exception as exc:
                log.warning( "No delay=#[.#]-#[.#] range specified: %s", exc )

    options.delay		= cpppo.dotdict()
    try:
        options.delay.value	= float( args.delay )
        log.normal( "Delaying all responses by %r seconds" , options.delay.value )
    except:
        assert '-' in args.delay, \
            "Unrecognized --delay=%r option" % args.delay
        # A range #-#; set up a thread to mutate the option.delay.value over the .range
        options.delay.range	= args.delay
        options.delay.value	= 0.0
        mutator			= threading.Thread( target=delay_range, kwargs=options )
        mutator.daemon		= True
        mutator.start()

    # Create all the specified tags/Attributes.  The enip_process function will (somehow) assign the
    # given tag name to reference the specified Attribute.  We'll define an Attribute to print
    # I/O if args.print is specified; reads will only be logged at logging.NORMAL and above.
    class Attribute_print( attribute_class ):
        def __getitem__( self, key ):
            value		= super( Attribute_print, self ).__getitem__( key )
            if log.isEnabledFor( logging.NORMAL ):
                print( "%20s[%5s-%-5s] == %s" % (
                    self.name, 
                    key.indices( len( self ))[0]   if isinstance( key, slice ) else key,
                    key.indices( len( self ))[1]-1 if isinstance( key, slice ) else key,
                    value ))
            return value

        def __setitem__( self, key, value ):
            super( Attribute_print, self ).__setitem__( key, value )
            print( "%20s[%5s-%-5s] <= %s" % (
                self.name, 
                key.indices( len( self ))[0]   if isinstance( key, slice ) else key,
                key.indices( len( self ))[1]-1 if isinstance( key, slice ) else key,
                value ))

    for t in args.tags:
        tag_name, rest		= t, ''
        if '=' in tag_name:
            tag_name, rest	= tag_name.split( '=', 1 )
        tag_type, rest		= rest or 'INT', ''
        tag_size		= 1
        if '[' in tag_type:
            tag_type, rest	= tag_type.split( '[', 1 )
            assert ']' in rest, "Invalid tag; mis-matched [...]"
            tag_size, rest	= rest.split( ']', 1 )
        assert not rest, "Invalid tag specified; expected tag=<type>[<size>]: %r" % t
        tag_type		= str( tag_type ).upper()
        typenames		= {"INT": parser.INT, "DINT": parser.DINT, "SINT": parser.SINT, "REAL": parser.REAL }
        assert tag_type in typenames, "Invalid tag type; must be one of %r" % list( typenames.keys() )
        tag_default		= 0.0 if tag_type == "REAL" else 0
        try:
            tag_size		= int( tag_size )
        except:
            raise AssertionError( "Invalid tag size: %r" % tag_size )

        # Ready to create the tag and its Attribute (and error code to return, if any).  If tag_size
        # is 1, it will be a scalar Attribute.  Since the tag_name may contain '.', we don't want
        # the normal dotdict.__setitem__ resolution to parse it; use plain dict.__setitem__.
        log.normal( "Creating tag: %s=%s[%d]", tag_name, tag_type, tag_size )
        tag_entry		= cpppo.dotdict()
        tag_entry.attribute	= ( Attribute_print if args.print else attribute_class )(
            tag_name, typenames[tag_type], default=( tag_default if tag_size == 1 else [tag_default] * tag_size ))
        tag_entry.error		= 0x00
        dict.__setitem__( tags, tag_name, tag_entry )

    # Use the Logix simulator by default (unless some other one was supplied as a keyword options to
    # main(), loaded above into 'options').  This key indexes an immutable value (not another
    # dotdict layer), so is not available for the web API to report/manipulate.
    options.setdefault( 'enip_process', logix.process )
    options.setdefault( 'identity_class', identity_class )

    # The Web API

    # Deduce web interface:port address to bind, and correct types (default is address, above).
    # Default to the same interface as we're bound to, port 80.  We'll only start if non-empty --web
    # was provided, though (even if it's just ':', to get all defaults).  Usually you'll want to
    # specify at least --web :[<port>].
    http			= args.web.split(':')
    assert 1 <= len( http ) <= 2, "Invalid --web [<interface>]:[<port>}: %s" % args.web
    http			= ( str( http[0] ) if http[0] else bind[0],
                                    int( http[1] ) if len( http ) > 1 and http[1] else 80 )


    if args.web:
        assert 'web' in sys.modules, "Failed to import web API module; --web option not available.  Run 'pip install web.py'"
        logging.normal( "EtherNet/IP Simulator Web API Server: %r" % ( http, ))
        webserver		= threading.Thread( target=web_api, kwargs={'http': http} )
        webserver.daemon	= True
        webserver.start()

        
    # The EtherNet/IP Simulator.  Pass all the top-level options keys/values as keywords, and pass
    # the entire tags dotdict as a tags=... keyword.  The server_main server.control signals (.done,
    # .disable) are also passed as the server= keyword.  We are using an cpppo.apidict with a long
    # timeout; this will block the web API for several seconds to allow all threads to respond to
    # the signals delivered via the web API.
    logging.normal( "EtherNet/IP Simulator: %r" % ( bind, ))
    kwargs			= dict( options, latency=latency, size=args.size, tags=tags, server=srv_ctl )

    tf				= network.server_thread
    tf_kwds			= dict()
    if args.profile:
        tf			= network.server_thread_profiling
        tf_kwds['filename']	= args.profile

    disabled			= False	# Recognize toggling between en/disabled
    while not srv_ctl.control.done:
        if not srv_ctl.control.disable:
            if disabled:
                logging.detail( "EtherNet/IP Server enabled" )
                disabled= False
            network.server_main( address=bind, target=enip_srv, kwargs=kwargs,
                                 idle_service=lambda: map( lambda f: f(), idle_service ),
                                 thread_factory=tf, **tf_kwds )
        else:
            if not disabled:
                logging.detail( "EtherNet/IP Server disabled" )
                disabled= True
            time.sleep( latency )            # Still disabled; wait a bit

    return 0
Esempio n. 24
0
def test_history_performance():
    try:
        tracemalloc.start()
    except:
        pass

    for _ in range( 3 ):
        path		= "/tmp/test_performance_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        day		= 24*60*60
        dur		= 3*day		# a few days worth of data
        regstps		= 0.0,5.0	# 0-5secs between updates
        numfiles	= dur//day+1	# ~1 file/day, but at least 2
        values		= {}		# Initial register values
        regscount	= 1000		# Number of different registers
        regschanged	= 1,10		# From 1-25 registers per row
        regsbase	= 40001

        start		= timer()

        now = beg	= start - dur
        linecnt		= 0
        for e in reversed( range( numfiles )):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                if values:
                    l.write( values, now=now ); linecnt += 1
                while now < beg + len(files) * dur/numfiles:
                    lst	= now
                    now += random.uniform( *regstps )
                    assert now >= lst
                    assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now ))
                    updates = {}
                    for _ in range( random.randint( *regschanged )):
                        updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 )
                    values.update( updates )
                    l.write( updates, now=now ); linecnt += 1
                lst 	= now
                now    += random.uniform( *regstps )
                assert now >= lst
                assert timestamp( now ) >= timestamp( lst )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    fd.write( open( f, 'rb' ).read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt )

        # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to
        # find the first file.  Try to do it all in the next 'playback' second (just to push it to
        # the max), in 'chunks' pieces.
        historical	= timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 ))
        basis		= timer()
        playback	= 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU
        chunks		= 1000
        factor		= dur / playback
        lookahead	= 60.0
        duration	= None
        if random.choice( (True,False) ):
            duration	= random.uniform( dur * 98/100, dur * 102/100 )

        begoff		= historical.value - beg
        endoff		= 0 if duration is None else (( historical.value + duration ) - ( beg + dur ))
        logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s",
                         timestamp( beg ), format_offset( begoff, ms=False ),
                         None if duration is None else format_offset( duration, ms=False, symbols='-+' ),
                         timestamp( beg + dur ), format_offset( endoff, ms=False ))

        ld		= loader(
            path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration )
        eventcnt	= 0
        slept		= 0
        cur		= None
        while ld:
            once	= False
            while ld.state < ld.AWAITING or not once:
                once		= True
                upcoming	= None
                limit		= random.randint( 0, 250 )
                if random.choice( (True,False) ):
                    upcoming	= ld.advance()
                    if random.choice( (True,False) ) and cur:
                        # ~25% of the time, provide an 'upcoming' timestamp that is between the
                        # current advancing historical time and the last load time.
                        upcoming-= random.uniform( 0, upcoming.value - cur.value )
                cur,events	= ld.load( upcoming=upcoming, limit=limit )
                eventcnt       += len( events )
                advance		= ld.advance()
                offset		= advance.value - cur.value
                logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" ,
                                ld, cur, format_offset( offset ),
                                format_offset( upcoming.value - advance.value ) if upcoming is not None else None,
                                len( ld.future ), len( ld.values ), len( events ), limit )

            logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total",
                                ld, cur, len( ld.future ), len( ld.values ), eventcnt )
            try:
                snapshot	= tracemalloc.take_snapshot()
                display_top( snapshot, limit=10 )
            except:
                pass

            time.sleep( playback/chunks )
            slept	       += playback/chunks

        elapsed		= timer() - basis
        eventtps	= eventcnt // ( elapsed - slept )
        logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec",
                       elapsed, slept, eventcnt, eventtps )
        if not logging.getLogger().isEnabledFor( logging.NORMAL ):
            # Ludicrously low threshold, to pass tests on very slow machines
            assert eventtps >= 1000, \
                "Historical event processing performance low: %d records/sec" % eventtps
        try:
            display_biggest_traceback()
        except:
            pass

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        '''
        for f in files:
            logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
        '''
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Esempio n. 25
0
    def request(self, data, addr=None):
        """Any exception should result in a reply being generated with a non-zero status."""

        # See if this request is for us; if not, route to the correct Object, and return its result.
        # If the resolution/lookup fails (eg. bad symbolic Tag); ignore it (return False on error)
        # and continue processing, so we can return a proper .status error code from the actual
        # request, below.
        target = self.route(data, fail=Message_Router.ROUTE_FALSE)
        if target:
            if log.isEnabledFor(logging.DETAIL):
                log.detail("%s Routing to %s: %s", self, target,
                           enip_format(data))
            return target.request(data, addr=addr)

        if log.isEnabledFor(logging.DETAIL):
            log.detail("%s Request: %s", self, enip_format(data))
        # This request is for this Object.

        # Pick out our services added at this level.  If not recognized, let superclass try; it'll
        # return an appropriate error code if not recognized.
        if (data.get('service') == self.RD_VAR_REQ
                or self.RD_VAR_CTX in data and data.setdefault(
                    'service', self.RD_VAR_REQ) == self.RD_VAR_REQ):
            # Read Dynamic Variable --> Read Dynamic Variable Reply.
            pass
        elif (data.get('service') == self.RD_STS_REQ
              or self.RD_STS_CTX in data and data.setdefault(
                  'service', self.RD_STS_REQ) == self.RD_STS_REQ):
            # Read Additional Status --> Read Additional Status Reply.
            pass
        elif (data.get('service') == self.RD_INF_REQ
              or self.RD_INF_CTX in data and data.setdefault(
                  'service', self.RD_INF_REQ) == self.RD_INF_REQ):
            # Get Device Info --> Get Device Info Reply.
            pass
        elif (data.get('service') == self.PT_INI_REQ
              or self.PT_INI_CTX in data and data.setdefault(
                  'service', self.PT_INI_REQ) == self.PT_INI_REQ):
            # Pass-thru Init --> Pass-thru Init Reply.
            pass
        elif (data.get('service') == self.PT_QRY_REQ
              or self.PT_QRY_CTX in data and data.setdefault(
                  'service', self.PT_QRY_REQ) == self.PT_QRY_REQ):
            # Pass-thru Query --> Pass-thru Query Reply.
            pass
        elif (data.get('service') == self.PT_FLQ_REQ
              or self.PT_FLQ_CTX in data and data.setdefault(
                  'service', self.PT_FLQ_REQ) == self.PT_FLQ_REQ):
            # Pass-thru Flush Queue --> Pass-thru Flush Queue Reply.
            pass
        else:
            # Not recognized; more generic command?
            return super(HART, self).request(data, addr=addr)

        # It is a recognized HART Object request.  Set the data.status to the appropriate error
        # code, should a failure occur at that location during processing.  We will be returning a
        # reply beyond this point; any exceptions generated will be captured, logged and an
        # appropriate reply .status error code returned.

        if not hasattr(self, 'hart_command'):
            self.hart_command = None  # Any HART Pass-thru command in process: None or (<command>,<command_data)

        def fldnam_attribute(typ, fldnam, dfl):
            insnam = "HART_{channel}_Data".format(channel=self.instance_id - 1)
            tag = '.'.join((insnam, fldnam))
            res = resolve_tag(tag)
            if not res:
                # Not found; create one.  Use Class ID 0xF35D, same Instance ID as self.
                # No one else should be creating Instances of this Class ID...
                clsid = HART_Data.class_id
                insid = self.instance_id
                obj = lookup(clsid, insid)
                if not obj:
                    obj = HART_Data(insnam, instance_id=insid)
                att = Attribute_print(name=tag, type_cls=typ,
                                      default=dfl)  # eg. 'PV', REAL
                attid = 0
                if obj.attribute:
                    attid = int(sorted(obj.attribute, key=misc.natural)[-1])
                attid += 1
                obj.attribute[str(attid)] \
                                = att
                log.normal(
                    "%-24s Instance %3d, Attribute %3d added: %s (Tag: %s)",
                    obj, insid, attid, att, tag)
                res = redirect_tag(tag, {
                    'class': clsid,
                    'instance': insid,
                    'attribute': attid
                })
                assert resolve_tag( tag ) == res, \
                    "Failed to create '{tag}' Tag pointing to {res!r}; found: {out!r}".format(
                        tag=tag, res=res, out=resolve_tag( tag ))
            # res is a (clsid,insid,attid) of an Attribute containing this fldnam's data.
            attribute = lookup(*res)
            return attribute

        data.service |= 0x80
        data.status = 0x08  # Service not supported, if not recognized or fail to access
        try:
            if data.service == self.RD_VAR_RPY:
                data.read_var = dotdict()
                for typ, fldnam, dfl in self.RD_VAR_RPY_FLD:
                    attribute = fldnam_attribute(typ, fldnam, dfl)
                    data.read_var[fldnam] = attribute[0]
                    logging.detail("%s <-- %s == %s", fldnam, attribute,
                                   data.read_var[fldnam])
                data.read_var.status = 0x00
                data.status = 0
            elif data.service == self.PT_INI_RPY:
                # Actually store the command, return a proper handle.  The status is actually a HART
                # command result code where 33 means initiated.  Unlike a real HART I/O card, we'll
                # just discard any previous HART pass-thru command (we don't have a stack).
                data.init.handle = 99
                data.init.queue_space = 200
                if self.hart_command:
                    data.init.status = random.choice(
                        (32, 33))  # 32 busy, 33 initiated, 35 device offline
                    if data.init.status == 33:
                        self.hart_command = None
                else:
                    data.init.status = random.choice((33, 35))
                if self.hart_command is None and data.init.status == 33:
                    self.hart_command = data.init.command, data.init.get(
                        'command_data', [])
                logging.normal(
                    "%s: HART Pass-thru Init Command %r: %s", self,
                    self.hart_command, "busy" if data.init.status == 33 else
                    "initiated" if data.init.status == 32 else "unknown: %s" %
                    data.init.status)
                logging.detail("%s HART Pass-thru Init: %r", self, data)
                data.status = 0
            elif data.service == self.PT_QRY_RPY:
                # TODO: just return a single network byte ordered real, for now, as if its a HART
                # Read Primary Variable request.  We're returning the Input Tag version of the
                # pass-thru command (not the CIP version)
                data.query.reply_status = 0
                data.query.fld_dev_status = 0
                data.query.reply_data = []

                if self.hart_command is not None:
                    data.query.status = random.choice((0, 34, 34, 34))
                    data.query.command = self.hart_command[
                        0]  # ignore command_data
                else:
                    data.query.status = 35  # 0 success, 34 running, 35 dead
                    data.query.command = 0

                if self.hart_command and self.hart_command[
                        0] == 1 and data.query.status == 0:
                    # PV units code (unknown? not in Input Tag type command) + 4-byte PV REAL (network order)
                    attribute = fldnam_attribute(REAL, 'PV', 1.234)
                    val = attribute[0]
                    data.query.reply_data += [
                        b for b in bytearray(REAL_network.produce(val))
                    ]
                elif self.hart_command and self.hart_command[
                        0] == 2 and data.query.status == 0:
                    # current and percent of range.
                    attribute = fldnam_attribute(REAL, 'loop_current',
                                                 random.uniform(4, 20))
                    cur = attribute[0]
                    pct = 0.0 if cur < 4 else 100.0 if cur > 20 else (
                        cur - 4) / (20 - 4) * 100
                    data.query.reply_data += [
                        b for b in bytearray(REAL_network.produce(cur))
                    ]
                    data.query.reply_data += [
                        b for b in bytearray(REAL_network.produce(pct))
                    ]
                elif self.hart_command and self.hart_command[
                        0] == 3 and data.query.status == 0:
                    insnam = "HART_{channel}_Data".format(
                        channel=self.instance_id - 1)
                    for v in ('PV', 'SV', 'TV', 'FV'):
                        attribute = fldnam_attribute(REAL, v,
                                                     random.uniform(0, 1))
                        val = attribute[0]
                        data.query.reply_data += [
                            b for b in bytearray(REAL_network.produce(val))
                        ]
                data.query.reply_size = len(data.query.reply_data)
                logging.normal(
                    "%s: HART Pass-thru Query Command %r: %s", self,
                    self.hart_command, "success" if data.query.status == 0 else
                    "running" if data.query.status == 34 else
                    "dead" if data.query.status == 35 else "unknown: %s" %
                    data.query.status)

                if data.query.status in (0, 35):
                    self.hart_command = None
                logging.detail("%s HART Pass-thru Query: %r", self, data)
                data.status = 0
            else:
                assert False, "Not Implemented: {data!r}".format(data=data)

            # Success (data.status == 0x00), or failure w/ non-zero data.status

        except Exception as exc:
            # On Exception, if we haven't specified a more detailed error code, return General
            # Error.  Remember: 0x06 (Insufficent Packet Space) is a NORMAL response to a successful
            # Read Tag Fragmented that returns a subset of the requested data.
            log.normal(
                "%r Service 0x%02x %s failed with Exception: %s\nRequest: %s\n%s",
                self, data.service if 'service' in data else 0,
                (self.service[data.service] if 'service' in data
                 and data.service in self.service else "(Unknown)"), exc,
                enip_format(data),
                ('' if log.getEffectiveLevel() >= logging.NORMAL else ''.join(
                    traceback.format_exception(*sys.exc_info()))))
            assert data.status, \
                "Implementation error: must specify .status before raising Exception!"
            pass

        # Always produce a response payload; if a failure occurred, will contain an error status
        if log.isEnabledFor(logging.DETAIL):
            log.detail("%s Response: Service 0x%02x %s %s", self,
                       data.service if 'service' in data else 0,
                       (self.service[data.service] if 'service' in data
                        and data.service in self.service else "(Unknown)"),
                       enip_format(data))
        data.input = bytearray(self.produce(data))
        return True
Esempio n. 26
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""\
    Begin polling the designated register range(s), optionally writing initial values to them.

    Register range(s) and value(s) must be supplied:
    
      <begin>[-<end>]
      <begin>[-<end>]=<val>,...
    
    EXAMPLE
    
      modbus_poll --address localhost:7502 40001-40100
    
    """,
    )
    parser.add_argument("-v", "--verbose", default=0, action="count", help="Display logging information.")
    parser.add_argument("-l", "--log", type=str, default=None, help="Direct log output to the specified file")
    parser.add_argument(
        "-a", "--address", default="0.0.0.0:502", help="Default [interface][:port] to bind to (default: any, port 502)"
    )
    parser.add_argument("-r", "--reach", default=1, help="Merge polls within <reach> registers of each-other")
    parser.add_argument("-R", "--rate", default=1.0, help="Target poll rate")
    parser.add_argument(
        "-t", "--timeout", default=Defaults.Timeout, help="I/O Timeout (default: %s)" % (Defaults.Timeout)
    )
    parser.add_argument("registers", nargs="+")
    args = parser.parse_args()

    # Deduce logging level and target file (if any)
    levelmap = {0: logging.WARNING, 1: logging.NORMAL, 2: logging.DETAIL, 3: logging.INFO, 4: logging.DEBUG}
    cpppo.log_cfg["level"] = levelmap[args.verbose] if args.verbose in levelmap else logging.DEBUG
    if args.log:
        cpppo.log_cfg["filename"] = args.log
    logging.basicConfig(**cpppo.log_cfg)

    # (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple
    # like ("interface",502)
    address = None
    if args.address:
        address = args.address.split(":")
        assert 1 <= len(address) <= 2
        address = (str(address[0]), int(address[1]) if len(address) > 1 else Defaults.Port)
        log.info("--address '%s' produces address=%r" % (args.address, address))

    # Set up the Modbus/TCP I/O timeout to use, for all connect and read/write transactions
    Defaults.Timeout = float(args.timeout)

    # Start the PLC poller (and perform any initial writes indicated)
    poller = poller_modbus("Modbus/TCP", host=address[0], port=address[1], reach=int(args.reach), rate=float(args.rate))

    for txt in args.registers:
        beg, end, val = register_decode(txt)  # beg-end is inclusive
        for reg in range(beg, end + 1):
            poller.poll(reg)
        if val:
            # Value(s) were supplied for the register(s) range; write 'em.  This results in a
            # WriteMultipleRegistersRequest if val is an iterable, or a WriteSingle...  if not.
            # We'll need to shatter/merge the register range into appropriate sized chunks for a
            # valid Modbus/TCP request, and then take the appropriate number of values for each.
            for base, length in merge([(beg, end - beg + 1)]):
                poller.write(base, val[0] if length == 1 else val[:length])
                val = val[length:]

    load = ""
    fail = ""
    poll = ""
    regs = {}
    while True:
        loadcur = "%.2f" % (poller.load[0] if poller.load[0] else 0)
        if loadcur != load:
            load = loadcur
            logging.detail("load: %s", loadcur)
        failcur = ", ".join([("%d-%d" % (b, b + c - 1)) for b, c in poller.failing])
        pollcur = ", ".join([("%d-%d" % (b, b + c - 1)) for b, c in poller.polling])
        if failcur != fail or pollcur != poll:
            fail, poll = failcur, pollcur
            logging.normal("failing: %s, polling: %s", fail, poll)
        # log data changes
        for beg, cnt in poller.polling:
            for reg in range(beg, beg + cnt):
                val = poller.read(reg)
                old = regs.get(reg)  # may be None
                if val != old:
                    logging.warning("%5d == %5d (was: %s)" % (reg, val, old))
                    regs[reg] = val

        time.sleep(1)
Esempio n. 27
0
def test_history_unparsable():
    """Test history files rendered unparsable due to dropouts.  This should be handled with no problem
    except if the initial frame of register data on the first file is missing.

    """
    for _ in range( 3 ):
        path		= "/tmp/test_unparsable_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases, containing records that are invalid.
        now		= timer()
        v		= 10000
        secs		= 10
        secs_ext	=  1.0  # adjust range of history to target out by this +/-
        basisext	=   .5  # adjust start basis time from now by this +/-
        minfactor	=   .25
        maxfactor	=  2.0
        maxlatency	=   .25
        # 1/N file lines corrupted (kills 2 records; the current and following).  0 --> no errors
        maxerror	= random.choice( [ None, 3, 10, 100 ] )
        oldest		= None
        newest		= None
        logging.normal( "Corrupting %s of all history lines", None if not maxerror else "1/%d" % maxerror )
        for e in range( secs ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                ssend	= 100
                for ss in range( 0, ssend ): # subseconds up to but not including ssend...
                    js	= json.dumps( { 40001: v + e * 1000 + (ss * 1000 // ssend) } ) + '\n'
                    if maxerror and not random.randint( 0, maxerror ):
                        # Truncate some of the records (as would occur in a filesystem full or halt)
                        js = js[:random.randint( 0, len( js ) - 1)]
                    ts	= timestamp( now - e + ss/ssend )
                    if oldest is None or ts < oldest:
                        oldest = ts
                    if newest is None or ts > newest:
                        newest = ts
                    l._append( '\t'.join( (str( ts ),json.dumps( None ),js) ) )

        # Load the historical records.  This will be robust against all errors except if the first
        # line of the first history file opened is corrupt, and we therefore cannot get the initial
        # frame of register data.
        historical	= timestamp( now - random.uniform( -secs_ext, secs + secs_ext ))
        basisdelay	= random.uniform( -basisext, +basisext )
        basis		= now + basisdelay
        factor		= random.uniform( minfactor, maxfactor )
        lookahead	= 1.0
        on_bad_iframe	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        on_bad_data	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        logging.normal( "Playback starts %s (%.1f%%) of history %s-%s, in %.3fs, at x %.2f rate w/%.1fs lookahead, on_bad_iframe=%s, on_bad_data=%s",
                        historical, ( historical.value - oldest.value ) * 100 / ( newest.value - oldest.value ),
                        oldest, newest, basisdelay, factor, lookahead,
                        "SUPPRESS" if on_bad_iframe == loader.SUPPRESS else "FAIL" if on_bad_iframe  == loader.FAIL else "RAISE",
                        "SUPPRESS" if on_bad_data   == loader.SUPPRESS else "FAIL" if on_bad_data    == loader.FAIL else "RAISE" )

        ld		= loader( path,
                                historical=historical, basis=basis, factor=factor, lookahead=lookahead )
        dur		= basisext + ( secs_ext + secs + secs_ext ) / factor + basisext + 2*maxlatency # Don't be tooo strict
        beg		= timer()
        count		= 0

        while ld:
            assert timer() - beg < dur, "The loader should have ended"
            cur,events	= ld.load( on_bad_iframe=on_bad_iframe, on_bad_data=on_bad_data )
            count      += len( events )
            logging.normal( "%s loaded up to %s; %d future, %d values: %d events: %s",
                            ld, cur, len( ld.future ), len( ld.values ), len( events ), 
                            repr( events ) if logging.root.isEnabledFor( logging.DEBUG ) else reprlib.repr( events ))
            time.sleep( random.uniform( 0.0, maxlatency ))

        if on_bad_data == ld.FAIL or on_bad_iframe == ld.FAIL:
            assert ld.state in (ld.COMPLETE, ld.FAILED)
        else:
            assert ld.state == ld.COMPLETE

    except IframeError as exc:
        logging.warning( "Detected error on initial frame of registers in first history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED and count == 0, "Shouldn't have loaded any events -- only iframe failures expected"

    except DataError as exc:
        logging.warning( "Detected error on registers data in a history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        for f in files:
            if os.path.exists( f ):
                logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
            else:
                logging.warning( "%s: Couldn't find file", f )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Esempio n. 28
0
def main( argv=None ):
    """Get Attribute(s) Single/All the specified Instance or Attribute level address(es)

    """
    ap				= argparse.ArgumentParser(
        description = "An EtherNet/IP Get Attribute Single/All and Set Attribute Single client",
        formatter_class = argparse.RawDescriptionHelpFormatter,
        epilog = """\

One or more EtherNet/IP CIP Object/Instance Attributes may be read or
written.  The full format for specifying a tag and an operation is:

    @<Object>/<Instance>/<Attribute>[=<value>,<value>...]

The default Send Path is '@6/1', and the default Route Path is [{"link": 0,
"port":1}].  This should work with a device that can route requests to links
(eg. a *Logix Controller), with the Processor is slot 1 of the chassis.  If you
have a simpler device (ie. something that does not route requests, such as an AB
PowerFlex for example), then you may want to specify:

    --send-path='' --route-path=false

to eliminate the *Logix-style Unconnected Send (service 0x52) encapsulation
which is required to carry this Send/Route Path data. """ )

    ap.add_argument( '-a', '--address',
                     default=( "%s:%d" % enip.address ),
                     help="EtherNet/IP interface[:port] to connect to (default: %s:%d)" % (
                         enip.address[0], enip.address[1] ))
    ap.add_argument( '-m', '--multiple', action='store_true',
                     help="Use Multiple Service Packet request targeting ~500 bytes (default: False)" )
    ap.add_argument( '-d', '--depth',
                     default=0,
                     help="Pipelining depth" )
    ap.add_argument( '-t', '--timeout',
                     default=5.0,
                     help="EtherNet/IP timeout (default: 5s)" )
    ap.add_argument( '-v', '--verbose', action="count",
                     default=0, 
                     help="Display logging information." )
    ap.add_argument( '-l', '--log',
                     help="Log file, if desired" )
    ap.add_argument( '--route-path',
                     default=None,
                     help="Route Path, in JSON (default: %r); 0/false to specify no/empty route_path" % (
                         str( json.dumps( client.connector.route_path_default ))))
    ap.add_argument( '--send-path',
                     default=None,
                     help="Send Path to UCMM (default: @6/1); Specify an empty string '' for no Send Path" )
    ap.add_argument( '-P', '--profile', action='store_true',
                     help="Activate profiling (default: False)" )
    ap.add_argument( 'tags', nargs="+",
                     help="Class/Instance[/Attribute] to get (- to read from stdin), eg: @2/1 @2/1/1" )

    args			= ap.parse_args( argv )

    # Set up logging level (-v...) and --log <file>
    levelmap 			= {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )
    if args.log:
        cpppo.log_cfg['filename'] = args.log

    logging.basicConfig( **cpppo.log_cfg )

    addr			= args.address.split(':')
    assert 1 <= len( addr ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    addr			= ( str( addr[0] ) if addr[0] else enip.address[0],
                                    int( addr[1] ) if len( addr ) > 1 and addr[1] else enip.address[1] )
    timeout			= float( args.timeout )
    depth			= int( args.depth )
    multiple			= 500 if args.multiple else 0
    route_path			= json.loads( args.route_path ) if args.route_path else None # may be None/0/False
    send_path			= args.send_path

    if '-' in args.tags:
        # Collect tags from sys.stdin 'til EOF, at position of '-' in argument list
        minus			= args.tags.index( '-' )
        tags			= itertools.chain( args.tags[:minus], sys.stdin, args.tags[minus+1:] )
    else:
        tags			= args.tags

    profiler			= None
    if args.profile:
        import cProfile as profile
        import pstats
        import StringIO
        profiler		= profile.Profile()

    failures			= 0
    with client.connector( host=addr[0], port=addr[1], timeout=timeout, profiler=profiler ) as connection:
        idx			= -1
        start			= cpppo.timer()
        operations		= attribute_operations( tags, route_path=route_path, send_path=send_path )
        for idx,dsc,op,rpy,sts,val in connection.pipeline(
                operations=operations, depth=depth, multiple=multiple, timeout=timeout ):
            print( "%s: %3d: %s == %s" % ( time.ctime(), idx, dsc, val ))
            failures	       += 1 if sts else 0
        elapsed			= cpppo.timer() - start
        logging.normal( "%3d requests in %7.3fs at pipeline depth %2s; %7.3f TPS" % (
            idx+1, elapsed, args.depth, (idx+1) / elapsed ))

    if profiler:
        s			= StringIO.StringIO()
        ps			= pstats.Stats( profiler, stream=s )
        for sortby in [ 'cumulative', 'time' ]:
            ps.sort_stats( sortby )
            ps.print_stats( 25 )
        print( s.getvalue() )

    return 1 if failures else 0
Esempio n. 29
0
def test_client_api_simple():
    taglen = 100  # able to fit request for Attribute into 1 packet
    server_addr = ('localhost', 12398)
    server_kwds = dotdict({
        'argv': [
            '-v',
            '--address',
            '%s:%d' % server_addr,
            'Int@0x99/1/1=INT[%d]' % (taglen),
            'Real@0x99/1/2=REAL[%d]' % (taglen),
            'DInt@0x99/1/3=DINT[%d]' % (taglen),
        ],
        'server': {
            'control': apidict(enip.timeout, {'done': False}),
        },
    })
    server_func = enip_main

    Process = threading.Thread  # multiprocessing.Process
    server = Process(target=server_func, kwargs=server_kwds)
    server.daemon = True
    server.start()

    client_timeout = 15.0

    try:
        connection = None
        while not connection:
            time.sleep(.1)
            try:
                connection = enip.client.implicit(*server_addr,
                                                  timeout=client_timeout,
                                                  connection_path=None)
            except socket.error as exc:
                logging.warning("enip.client.connector socket.error: %r", exc)
                if exc.errno != errno.ECONNREFUSED:
                    raise
            except Exception as exc:
                logging.warning("enip.client.connector Exception: %r", exc)
                raise

        with connection:
            # Get Attribute Single's payload is an EPATH
            req = connection.service_code(code=enip.Object.GA_SNG_REQ,
                                          path='@0x99/1/2')
            assert 'service_code' in req and req.service_code is True  # no payload
            assert connection.readable(timeout=10.0)  # receive reply
            rpy = next(connection)
            assert rpy and 'enip.CIP' in rpy and 'send_data.CPF.item[1].connection_data.request.get_attribute_single' in rpy.enip.CIP

            # Set Attribute Single's payload is an EPATH + USINT data
            req = connection.service_code(
                code=enip.Object.SA_SNG_REQ,
                path='@0x99/1/2',
                data=list(
                    bytearray(
                        #enip.EPATH.produce( enip.parse_path( '@0x99/1/2' )) +
                        enip.typed_data.produce(
                            {'data': list(map(float, range(taglen)))},
                            tag_type=enip.REAL.tag_type))))
            assert 'service_code' in req and isinstance(
                req.service_code, dict) and 'data' in req.service_code
            assert connection.readable(timeout=10.0)  # receive reply
            rpy = next(connection)
            assert rpy and 'enip.CIP' in rpy and 'send_data.CPF.item[1].connection_data.request.set_attribute_single' in rpy.enip.CIP
            '''
            # Try to send some PCCC I/O
            req		= connection.connected_send( b'\x00\x00\x01\x00\x00\x00\x00\x00\x06\x00\x4a\x0a\x03',
                                                     connection=0x8dee0016, sequence=1 )
            logging.normal("PCCC Request: %s", enip.enip_format( req ))
            #assert 'service_code' in req and req.service_code is True # no payload
            assert connection.readable( timeout=10.0 ) # receive reply
            rpy			= next( connection )
            logging.normal("PCCC Response: %s", enip.enip_format( rpy )) # will be EtherNet/IP status 8; nothing implemented
            '''

        if not random.randint(0, 9):  # 10% of the time...
            # Try a clean shutdown, closing the outgoing half of the socket, leading to an EOF on
            # the server.  This will cause the subsequent Forward Close to fail w/ an EPIPE
            logging.normal("Skip Forward Close; send EOF")
            connection.shutdown()
            assert connection.readable(timeout=1.0)  # receive EOF
            try:
                connection.close()
            except socket.error as exc:
                if exc.errno != errno.EPIPE:
                    raise
        else:
            # Normal close procedure; send Forward Close + EOF, receive Forward Close + EOF.
            logging.normal("Send Forward Close; then EOF")
            del connection
    finally:
        control = server_kwds.get('server', {}).get('control',
                                                    {}) if server_kwds else {}
        if 'done' in control:
            log.normal("Server %r done signalled",
                       misc.function_name(server_func))
            control[
                'done'] = True  # only useful for threading.Thread; Process cannot see this
        if hasattr(server, 'terminate'):
            log.normal("Server %r done via .terminate()",
                       misc.function_name(server_func))
            server.terminate(
            )  # only if using multiprocessing.Process(); Thread doesn't have
        server.join(timeout=1.0)
Esempio n. 30
0
 def failure( exc ):
     logging.normal( "failed: %s", exc )
     elapsed		= int(( timer() - failure.start ) * 1000 ) # ms.
     failed[elapsed]	= str( exc )
Esempio n. 31
0
def test_rs485_poll(simulated_modbus_rtu_ttyS0):
    """Multiple poller_modbus instances may be polling different slave RTUs at different unit IDs.

    """
    command, address = simulated_modbus_rtu_ttyS0
    Defaults.Timeout = PORT_TIMEOUT
    client = modbus_client_rtu(framer=modbus_rtu_framer_collecting,
                               port=PORT_MASTER,
                               stopbits=PORT_STOPBITS,
                               bytesize=PORT_BYTESIZE,
                               parity=PORT_PARITY,
                               baudrate=PORT_BAUDRATE)

    unit = 2
    plc = poller_modbus("RS485 unit %s" % (unit),
                        client=client,
                        unit=unit,
                        rate=.25)

    wfkw = dict(timeout=1.0, intervals=10)

    try:
        plc.write(1, 0)
        plc.write(40001, 0)

        plc.poll(40001)

        success, elapsed = waitfor(lambda: plc.read(40001) is not None,
                                   "40001 polled", **wfkw)
        assert success
        assert elapsed < 1.0
        assert plc.read(40001) == 0

        assert plc.read(1) == None
        assert plc.read(40002) == None
        success, elapsed = waitfor(lambda: plc.read(40002) is not None,
                                   "40002 polled", **wfkw)
        assert success
        assert elapsed < 1.0
        assert plc.read(40002) == 0
        success, elapsed = waitfor(lambda: plc.read(1) is not None,
                                   "00001 polled", **wfkw)
        assert success
        assert elapsed < 1.0
        assert plc.read(1) == 0

        plc.write(40001, 99)
        success, elapsed = waitfor(lambda: plc.read(40001) == 99,
                                   "40001 polled", **wfkw)
        assert success
        assert elapsed < 1.0

        # See if we converge on our target poll time
        count = plc.counter
        while plc.counter < count + 20:
            logging.normal("%s at poll %d: Load: %s ", plc.description,
                           plc.counter, plc.load)
            time.sleep(.5)
        logging.normal("%s at poll %d: Load: %s ", plc.description,
                       plc.counter, plc.load)

    except Exception:
        logging.warning("%s poller failed: %s", plc.description,
                        traceback.format_exc())
        raise
    finally:
        logging.info("Stopping plc polling")
        plc.done = True
        waitfor(lambda: not plc.is_alive(),
                "%s poller done" % (plc.description),
                timeout=1.0)
Esempio n. 32
0
def test_history_sequential():
    for _ in range( 3 ):
        path		= "/tmp/test_sequential_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases.  Note: times are truncated to milliseconds, so timestamps saved out will
        # probably evaluate as < the original value when read back in!  Since each file contains
        # only one record, we must be careful to use 'strict', to ensure we open the next file
        # strictly greater than the last timestamp (or we'll open the same file again!)
        now		= timer()
        count		= 10
        for e in range( count ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has 0 extension
            files.append( f )
            with logger( f ) as l:
                l.write( { 40001: count - e }, now=now - e )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of  some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    fd.write( open( f, 'rb' ).read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        # Attempt to begin loading history around the middle of the recording
        rdr		= reader( path,
                                          historical=now - random.uniform( 3.0, 9.0 ),
                                          basis=now + random.uniform( -.5, +.5 ),
                                          factor=3 )

        # Begin with the first historical file before our computed advancing historical time (we
        # could provide a specific timestamp here, if we wanted).  No lookahead.
        ts_l		= None
        f_l		= None
        after		= False # only first open is "before"; rest are "after"
        strict		= False # only goes false when timestamp increases in the same file
        deadline	= now + count
        while timer() <= deadline:
            # open next file beginning after the last ts
            o		= rdr.open( target=ts_l, after=after, strict=strict ) # Generator; doesn't do much here...
            after	= True
            strict	= True
            for (f,l,cur),(ts,js) in o: # raises HistoryExhausted on open() generator failure
                assert ts_l is None or ts >= ts_l, \
                    "Historical record out of sequence; %s isn't >= %s" % ( ts, ts_l )
                ts_l	= ts
                if js is None:
                    logging.info( "@%s: not yet available", ts )
                    assert ts > cur, "Next record should have been returned; not in future"
                    time.sleep( .1 )
                else:
                    logging.normal( "@%s: %r", ts, js )
                    assert ts <= cur, "Next record shouldn't have been returned; yet future"
                    if f == f_l and ts > ts_l:
                        strict = False
                f_l,ts_l= f,ts
        assert False, "Should have raised HistoryExhausted by now"
    except HistoryExhausted as exc:
        logging.normal( "History exhausted: %s", exc )

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Esempio n. 33
0
def test_rs485_multi(simulated_modbus_rtu_ttyS0, simulated_modbus_rtu_ttyS2):

    command, address = simulated_modbus_rtu_ttyS0
    command, address = simulated_modbus_rtu_ttyS2
    Defaults.Timeout = PORT_TIMEOUT
    client = modbus_client_rtu(framer=modbus_rtu_framer_collecting,
                               port=PORT_MASTER,
                               stopbits=PORT_STOPBITS,
                               bytesize=PORT_BYTESIZE,
                               parity=PORT_PARITY,
                               baudrate=PORT_BAUDRATE)

    # 4 poller_modbus instances sharing the same RTU Master 'client'.  They will all block on I/O
    # access via the same RS485 media interface.
    slaves = [1, 2, 3, 4]
    plc = {}
    for unit in slaves:
        plc[unit] = poller_modbus("RS485 unit %s" % (unit),
                                  client=client,
                                  unit=unit,
                                  rate=.25)

    wfkw = dict(timeout=1.0, intervals=10)

    try:
        for unit in slaves:
            plc[unit].write(1, 0)
            plc[unit].write(40001, 0)
            plc[unit].poll(40001)

        # See if we converge on our target poll time
        count = plc[slaves[0]].counter
        while any(plc[unit].counter < count + 20 for unit in slaves):
            for unit in slaves:
                logging.normal("%s at poll %d: Load: %s ",
                               plc[unit].description, plc[unit].counter,
                               plc[unit].load)
            time.sleep(.5)
        for unit in slaves:
            logging.normal("%s at poll %d: Load: %s ", plc[unit].description,
                           plc[unit].counter, plc[unit].load)

        for unit in slaves:
            success, elapsed = waitfor(
                lambda: plc[unit].read(40001) is not None,
                "%d/40001 polled" % (unit), **wfkw)
            assert success
            assert elapsed < 1.0
            assert plc[unit].read(40001) == 0

        # Haven't polled 1 or 40002 yet
        for unit in slaves:
            assert plc[unit].read(1) == None
            assert plc[unit].read(40002) == None
        for unit in slaves:
            success, elapsed = waitfor(
                lambda: plc[unit].read(40002) is not None,
                "%d/40002 polled" % (unit), **wfkw)
            assert success
            assert elapsed < 1.0
            assert plc[unit].read(40002) == 0

            success, elapsed = waitfor(lambda: plc[unit].read(1) is not None,
                                       "%d/00001 polled" % (unit), **wfkw)
            assert success
            assert elapsed < 1.0
            assert plc[unit].read(1) == 0

        for unit in slaves:
            plc[unit].write(40001, 99)
            success, elapsed = waitfor(lambda: plc[unit].read(40001) == 99,
                                       "%d/40001 polled" % (unit), **wfkw)
            assert success
            assert elapsed < 1.0

    except Exception:
        logging.warning("poller failed: %s", traceback.format_exc())
        raise
    finally:
        logging.info("Stopping plc polling")
        for unit in slaves:
            plc[unit].done = True
        for unit in slaves:
            waitfor(lambda: not plc[unit].is_alive(),
                    "%s poller done" % (plc[unit].description),
                    timeout=1.0)
Esempio n. 34
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""\

    Register range(s) and value(s) must be supplied:
    
      <begin>[-<end>]
    
    EXAMPLE
    
      modbus_poll --address localhost:7502 40001-40100
    
    """)
    parser.add_argument('-v',
                        '--verbose',
                        default=0,
                        action="count",
                        help="Display logging information.")
    parser.add_argument('-l',
                        '--log',
                        type=str,
                        default=None,
                        help="Direct log output to the specified file")
    parser.add_argument(
        '-a',
        '--address',
        default="0.0.0.0:502",
        help="Default [interface][:port] to bind to (default: any, port 502)")
    parser.add_argument(
        '-r',
        '--reach',
        default=1,
        help="Merge polls within <reach> registers of each-other")
    parser.add_argument('-R', '--rate', default=1.0, help="Target poll rate")
    parser.add_argument('registers', nargs="+")
    args = parser.parse_args()

    # Deduce logging level and target file (if any)
    levelmap = {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
    }
    cpppo.log_cfg['level'] = (levelmap[args.verbose]
                              if args.verbose in levelmap else logging.DEBUG)
    if args.log:
        cpppo.log_cfg['filename'] = args.log
    logging.basicConfig(**cpppo.log_cfg)

    # (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple
    # like ("interface",502)
    address = None
    if args.address:
        address = args.address.split(':')
        assert 1 <= len(address) <= 2
        address = (str(address[0]),
                   int(address[1]) if len(address) > 1 else Defaults.Port)
        log.info("--address '%s' produces address=%r" %
                 (args.address, address))

    # Start the PLC poller

    poller = poller_modbus("Modbus/TCP",
                           host=address[0],
                           port=address[1],
                           reach=int(args.reach),
                           rate=float(args.rate))

    for r in args.registers:
        rng = r.split('-')
        beg, cnt = int(
            rng[0]), int(rng[1]) - int(rng[0]) + 1 if len(rng) else 1
        for reg in range(beg, beg + cnt):
            poller.poll(reg)

    load = ''
    fail = ''
    poll = ''
    regs = {}
    while True:
        loadcur = "%.2f" % (poller.load[0] if poller.load[0] else 0)
        if loadcur != load:
            load = loadcur
            logging.detail("load: %s", loadcur)
        failcur = ", ".join([("%d-%d" % (b, b + c - 1))
                             for b, c in poller.failing])
        pollcur = ", ".join([("%d-%d" % (b, b + c - 1))
                             for b, c in poller.polling])
        if (failcur != fail or pollcur != poll):
            fail, poll = failcur, pollcur
            logging.normal("failing: %s, polling: %s", fail, poll)
        # log data changes
        for beg, cnt in poller.polling:
            for reg in range(beg, beg + cnt):
                val = poller.read(reg)
                old = regs.get(reg)  # may be None
                if val != old:
                    logging.warning("%5d == %5d (was: %s)" % (reg, val, old))
                    regs[reg] = val

        time.sleep(1)
Esempio n. 35
0
def main():
    parser			= argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog = """\
    Begin polling the designated register range(s), optionally writing initial values to them.

    Register range(s) and value(s) must be supplied:
    
      <begin>[-<end>]
      <begin>[-<end>]=<val>,...
    
    EXAMPLE
    
      modbus_poll --address localhost:7502 40001-40100
    
    """ )
    parser.add_argument( '-v', '--verbose',
                         default=0, action="count", help="Display logging information." )
    parser.add_argument('-l', '--log', 
                        type=str, default=None, help="Direct log output to the specified file" )
    parser.add_argument( '-a', '--address', default="0.0.0.0:502",
                         help="Default [interface][:port] to bind to (default: any, port 502)" )
    parser.add_argument( '-r', '--reach',	default=1,
                         help="Merge polls within <reach> registers of each-other" )
    parser.add_argument( '-R', '--rate',	default=1.0,
                         help="Target poll rate" )
    parser.add_argument( '-t', '--timeout',	default=Defaults.Timeout,
                         help="I/O Timeout (default: %s)" % ( Defaults.Timeout ))
    parser.add_argument( 'registers', nargs="+" )
    args			= parser.parse_args()
    
    # Deduce logging level and target file (if any)
    levelmap 			= {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )
    if args.log:
        cpppo.log_cfg['filename'] = args.log
    logging.basicConfig( **cpppo.log_cfg )

    # (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple
    # like ("interface",502)
    address			= None
    if args.address:
        address			= args.address.split(':')
        assert 1 <= len( address ) <= 2
        address			= (
            str( address[0] ),
            int( address[1] ) if len( address ) > 1 else Defaults.Port )
        log.info( "--address '%s' produces address=%r" % ( args.address, address ))

    # Set up the Modbus/TCP I/O timeout to use, for all connect and read/write transactions
    Defaults.Timeout		= float( args.timeout )

    # Start the PLC poller (and perform any initial writes indicated)
    poller			= poller_modbus(
        "Modbus/TCP", host=address[0], port=address[1], reach=int( args.reach ), rate=float( args.rate ))

    for txt in args.registers:
        beg,end,val		= register_decode( txt ) # beg-end is inclusive
        for reg in range( beg, end+1 ):
            poller.poll( reg )
        if val:
            # Value(s) were supplied for the register(s) range; write 'em.  This results in a
            # WriteMultipleRegistersRequest if val is an iterable, or a WriteSingle...  if not.
            # We'll need to shatter/merge the register range into appropriate sized chunks for a
            # valid Modbus/TCP request, and then take the appropriate number of values for each.
            for base,length in merge( [ (beg,end-beg+1) ] ):
                poller.write( base, val[0] if length == 1 else val[:length] )
                val		= val[length:]
    
    load			= ''
    fail			= ''
    poll			= ''
    regs			= {}
    while True:
        loadcur			= "%.2f" % ( poller.load[0] if poller.load[0] else 0 )
        if loadcur != load:
            load		= loadcur
            logging.detail( "load: %s", loadcur )
        failcur			= ", ".join( [ ("%d-%d" % (b,b+c-1)) for b,c in poller.failing ] )
        pollcur			= ", ".join( [ ("%d-%d" % (b,b+c-1)) for b,c in poller.polling ] )
        if ( failcur != fail or pollcur != poll ):
            fail, poll		= failcur, pollcur
            logging.normal( "failing: %s, polling: %s", fail, poll )
        # log data changes
        for beg,cnt in poller.polling:
            for reg in range( beg, beg+cnt ):
                val		= poller.read( reg )
                old		= regs.get( reg ) # may be None
                if val != old:
                    logging.warning( "%5d == %5d (was: %s)" %( reg, val, old ))
                    regs[reg]	= val

        time.sleep( 1 )
Esempio n. 36
0
    def position(self,
                 actuator=1,
                 timeout=TIMEOUT,
                 home=True,
                 noop=False,
                 svoff=False,
                 **kwds):
        """Begin position operation on 'actuator' w/in 'timeout'.  

        :param home: Return to home position before any other movement
        :param noop: Do not perform final activation

        Running with specified data

        1   - Set internal flag Y30 (input invalid flag)
        2   - Write 1 to internal flag Y19 (SVON)
        2a  -   and confirm internal flag X49 (SVRE) has become "1"
        3   - Write 1 to internal flag Y1C (SETUP)
        3a  -   and confirm internal flag X4A (SETON) has become "1"
        4   - Write data to D9102-D9110
        5   - Write Operation Start instruction "1" to D9100 (returns to 0 after processed)

        If no positioning kwds are provided, then no new position is configured.  If 'noop' is True,
        everything except the final activation is performed.

        """
        begin = cpppo.timer()
        if timeout is None:
            timeout = self.TIMEOUT
        assert self.complete( actuator=actuator, svoff=svoff, timeout=timeout ), \
            "Previous actuator position incomplete within timeout %r" % timeout
        status = self.status(actuator=actuator)
        if not kwds:
            return status

        # Previous positioning complete, and possibly new position keywords provided.
        logging.detail("Position: actuator %3d setdata: %r", actuator, kwds)
        unit = self.unit(uid=actuator)

        # 1: set INPUT_INVALID
        unit.write(data.Y30_INPUT_INVALID.addr, 1)

        # 2: set SVON, check SVRE
        if timeout:
            assert cpppo.timer() <= begin + timeout, \
                "Failed to complete positioning SVON/SVRE within timeout"
        unit.write(data.Y19_SVON.addr, 1)
        svre = self.check(predicate=lambda: unit.read(
            data.Y19_SVON.addr) and unit.read(data.X49_SVRE.addr),
                          deadline=None if timeout is None else begin +
                          timeout)
        assert svre, \
            "Failed to set SVON True and read SVRE True"

        # 3: Return to home? set SETUP, check SETON.  Otherwise, clear SETUP.  It is very unclear
        #    whether we need to do this, and/or whether we need to clear it afterwards.
        if home:
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning SETUP/SETON within timeout"
            unit.write(data.Y1C_SETUP.addr, 1)
            seton = self.check(predicate=lambda: unit.read(
                data.Y1C_SETUP.addr) and unit.read(data.X4A_SETON.addr),
                               deadline=None if timeout is None else begin +
                               timeout)
            if not seton:
                logging.warning("Failed to set SETUP True and read SETON True")
            # assert seton, \
            #    "Failed to set SETUP True and read SETON True"
        else:
            unit.write(data.Y1C_SETUP.addr, 0)

        # 4: Write any changed position data.  The actuator doesn't accept individual register
        # writes, so we use multiple register writes for each value.
        for k, v in kwds.items():
            assert k in data, \
                "Unrecognized positioning keyword: %s == %v" % ( k, v )
            assert STEP_DATA_BEG <= data[k].addr <= STEP_DATA_END, \
                "Invalid positioning keyword: %s == %v; not within position data address range" % ( k, v )
            format = data[k].get('format')
            if format:
                # Create a big-endian buffer.  This will be some multiple of register size.  Then,
                # unpack it into some number of 16-bit big-endian registers (this will be a tuple).
                buf = struct.pack('>' + format, v)
                values = [
                    struct.unpack_from('>H', buf[o:])[0]
                    for o in range(0, len(buf), 2)
                ]
            else:
                values = [v]
            if timeout:
                assert cpppo.timer() <= begin + timeout, \
                    "Failed to complete positioning data update within timeout"
            logging.normal("Position: actuator %3d updated: %16s: %8s (== %s)",
                           actuator, k, v, values)
            unit.write(data[k].addr, values)

        # 5: set operation_start to 0x0100 (1 in high-order bytes) unless 'noop'
        if not noop:
            unit.write(data.operation_start.addr, 0x0100)
            started = self.check(predicate=lambda: unit.read(
                data.operation_start.addr) == 0x0100,
                                 deadline=None if timeout is None else begin +
                                 timeout)
            assert started, \
                "Failed to detect positioning start within timeout"

        return self.status(actuator=actuator)
Esempio n. 37
0
def main(argv=None,
         attribute_class=device.Attribute,
         idle_service=None,
         identity_class=None,
         UCMM_class=None,
         message_router_class=None,
         connection_manager_class=None,
         **kwds):
    """Pass the desired argv (excluding the program name in sys.arg[0]; typically pass argv=None, which
    is equivalent to argv=sys.argv[1:], the default for argparse.  Requires at least one tag to be
    defined.

    If a cpppo.apidict() is passed for kwds['server']['control'], we'll use it to transmit server
    control signals via its .done, .disable, .timeout and .latency attributes.

    Uses the provided attribute_class (default: device.Attribute) to process all EtherNet/IP
    attribute I/O (eg. Read/Write Tag [Fragmented]) requests.  By default, device.Attribute stores
    and retrieves the supplied data.  To perform other actions (ie. forward the data to your own
    application), derive from device.Attribute, and override the __getitem__ and __setitem__
    methods.

    If an idle_service function is provided, it will be called after a period of latency between
    incoming requests.

    """
    global address
    global options
    global tags
    global srv_ctl
    global latency
    global timeout

    ap = argparse.ArgumentParser(description="Provide an EtherNet/IP Server",
                                 epilog="")

    ap.add_argument('-v',
                    '--verbose',
                    default=0,
                    action="count",
                    help="Display logging information.")
    ap.add_argument(
        '-a',
        '--address',
        default=("%s:%d" % address),
        help="EtherNet/IP interface[:port] to bind to (default: %s:%d)" %
        (address[0], address[1]))
    ap.add_argument('-p',
                    '--print',
                    default=False,
                    action='store_true',
                    help="Print a summary of operations to stdout")
    ap.add_argument('-l', '--log', help="Log file, if desired")
    ap.add_argument(
        '-w',
        '--web',
        default="",
        help="Web API [interface]:[port] to bind to (default: %s, port 80)" %
        (address[0]))
    ap.add_argument(
        '-d',
        '--delay',
        default="0.0",
        help=
        "Delay response to each request by a certain number of seconds (default: 0.0)"
    )
    ap.add_argument(
        '-s',
        '--size',
        help=
        "Limit EtherNet/IP encapsulated request size to the specified number of bytes (default: None)",
        default=None)
    ap.add_argument(
        '--route-path',
        default=None,
        help=
        "Route Path, in JSON, eg. %r (default: None); 0/false to accept only empty route_path"
        % (str(json.dumps(route_path_default))))
    ap.add_argument('-P',
                    '--profile',
                    default=None,
                    help="Output profiling data to a file (default: None)")
    ap.add_argument(
        'tags',
        nargs="+",
        help=
        "Any tags, their type (default: INT), and number (default: 1), eg: tag=INT[1000]"
    )

    args = ap.parse_args(argv)

    # Deduce interface:port address to bind, and correct types (default is address, above)
    bind = args.address.split(':')
    assert 1 <= len(
        bind
    ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    bind = (str(bind[0]) if bind[0] else address[0],
            int(bind[1]) if len(bind) > 1 and bind[1] else address[1])

    # Set up logging level (-v...) and --log <file>
    levelmap = {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
    }
    cpppo.log_cfg['level'] = (levelmap[args.verbose]
                              if args.verbose in levelmap else logging.DEBUG)

    # Chain any provided idle_service function with log rotation; these may (also) consult global
    # signal flags such as logrotate_request, so execute supplied functions before logrotate_perform
    idle_service = [idle_service] if idle_service else []
    if args.log:
        # Output logging to a file, and handle UNIX-y log file rotation via 'logrotate', which sends
        # signals to indicate that a service's log file has been moved/renamed and it should re-open
        cpppo.log_cfg['filename'] = args.log
        signal.signal(signal.SIGHUP, logrotate_request)
        idle_service.append(logrotate_perform)

    logging.basicConfig(**cpppo.log_cfg)

    # Pull out a 'server.control...' supplied in the keywords, and make certain it's a
    # cpppo.apidict.  We'll use this to transmit control signals to the server thread.  Set the
    # current values to sane initial defaults/conditions.
    if 'server' in kwds:
        assert 'control' in kwds[
            'server'], "A 'server' keyword provided without a 'control' attribute"
        srv_ctl = cpppo.dotdict(kwds.pop('server'))
        assert isinstance(
            srv_ctl['control'],
            cpppo.apidict), "The server.control... must be a cpppo.apidict"
    else:
        srv_ctl.control = cpppo.apidict(timeout=timeout)

    srv_ctl.control['done'] = False
    srv_ctl.control['disable'] = False
    srv_ctl.control.setdefault('latency', latency)

    # Global options data.  Copy any remaining keyword args supplied to main().  This could
    # include an alternative enip_process, for example, instead of defaulting to logix.process.
    options.update(kwds)

    # Specify a response delay.  The options.delay is another dotdict() layer, so it's attributes
    # (eg. .value, .range) are available to the web API for manipulation.  Therefore, they can be
    # set to arbitrary values at random times!  However, the type will be retained.
    def delay_range(*args, **kwds):
        """If a delay.range like ".1-.9" is specified, then change the delay.value every second to something
        in that range."""
        assert 'delay' in kwds and 'range' in kwds['delay'] and '-' in kwds['delay']['range'], \
            "No delay=#-# specified"
        log.normal("Delaying all responses by %s seconds",
                   kwds['delay']['range'])
        while True:
            # Once we start, changes to delay.range will be re-evaluated each loop
            time.sleep(1)
            try:
                lo, hi = map(float, kwds['delay']['range'].split('-'))
                kwds['delay']['value'] = random.uniform(lo, hi)
                log.info("Mutated delay == %g", kwds['delay']['value'])
            except Exception as exc:
                log.warning("No delay=#[.#]-#[.#] range specified: %s", exc)

    options.delay = cpppo.dotdict()
    try:
        options.delay.value = float(args.delay)
        log.normal("Delaying all responses by %r seconds", options.delay.value)
    except:
        assert '-' in args.delay, \
            "Unrecognized --delay=%r option" % args.delay
        # A range #-#; set up a thread to mutate the option.delay.value over the .range
        options.delay.range = args.delay
        options.delay.value = 0.0
        mutator = threading.Thread(target=delay_range, kwargs=options)
        mutator.daemon = True
        mutator.start()

    # Create all the specified tags/Attributes.  The enip_process function will (somehow) assign the
    # given tag name to reference the specified Attribute.  We'll define an Attribute to print
    # I/O if args.print is specified; reads will only be logged at logging.NORMAL and above.
    class Attribute_print(attribute_class):
        def __getitem__(self, key):
            value = super(Attribute_print, self).__getitem__(key)
            if log.isEnabledFor(logging.NORMAL):
                print("%20s[%5s-%-5s] == %s" %
                      (self.name, key.indices(len(self))[0] if isinstance(
                          key, slice) else key, key.indices(len(self))[1] -
                       1 if isinstance(key, slice) else key, value))
            return value

        def __setitem__(self, key, value):
            super(Attribute_print, self).__setitem__(key, value)
            print("%20s[%5s-%-5s] <= %s" %
                  (self.name, key.indices(len(self))[0] if isinstance(
                      key, slice) else key, key.indices(len(self))[1] -
                   1 if isinstance(key, slice) else key, value))

    for t in args.tags:
        tag_name, rest = t, ''
        if '=' in tag_name:
            tag_name, rest = tag_name.split('=', 1)
        tag_type, rest = rest or 'INT', ''
        tag_size = 1
        if '[' in tag_type:
            tag_type, rest = tag_type.split('[', 1)
            assert ']' in rest, "Invalid tag; mis-matched [...]"
            tag_size, rest = rest.split(']', 1)
        assert not rest, "Invalid tag specified; expected tag=<type>[<size>]: %r" % t
        tag_type = str(tag_type).upper()
        typenames = {
            "BOOL": (parser.BOOL, 0),
            "INT": (parser.INT, 0),
            "DINT": (parser.DINT, 0),
            "SINT": (parser.SINT, 0),
            "REAL": (parser.REAL, 0.0),
            "SSTRING": (parser.SSTRING, ''),
        }
        assert tag_type in typenames, "Invalid tag type; must be one of %r" % list(
            typenames)
        tag_class, tag_default = typenames[tag_type]
        try:
            tag_size = int(tag_size)
        except:
            raise AssertionError("Invalid tag size: %r" % tag_size)

        # Ready to create the tag and its Attribute (and error code to return, if any).  If tag_size
        # is 1, it will be a scalar Attribute.  Since the tag_name may contain '.', we don't want
        # the normal dotdict.__setitem__ resolution to parse it; use plain dict.__setitem__.
        log.normal("Creating tag: %s=%s[%d]", tag_name, tag_type, tag_size)
        tag_entry = cpppo.dotdict()
        tag_entry.attribute = (
            Attribute_print if args.print else attribute_class)(
                tag_name,
                tag_class,
                default=(tag_default if tag_size == 1 else [tag_default] *
                         tag_size))
        tag_entry.error = 0x00
        dict.__setitem__(tags, tag_name, tag_entry)

    # Use the Logix simulator and all the basic required default CIP message processing classes by
    # default (unless some other one was supplied as a keyword options to main(), loaded above into
    # 'options').  This key indexes an immutable value (not another dotdict layer), so is not
    # available for the web API to report/manipulate.  By default, we'll specify no route_path, so
    # any request route_path will be accepted.  Otherwise, we'll create a UCMM-derived class with
    # the specified route_path, which will filter only requests w/ the correct route_path.
    options.setdefault('enip_process', logix.process)
    if identity_class:
        options.setdefault('identity_class', identity_class)
    assert not UCMM_class or not args.route_path, \
        "Specify either a route-path, or a custom UCMM_class; not both"
    if args.route_path is not None:
        # Must be JSON, eg. '[{"link"...}]', or '0'/'false' to explicitly specify no route_path
        # accepted (must be empty in request)
        class UCMM_class_with_route(device.UCMM):
            route_path = json.loads(args.route_path)

        UCMM_class = UCMM_class_with_route
    if UCMM_class:
        options.setdefault('UCMM_class', UCMM_class)
    if message_router_class:
        options.setdefault('message_router_class', message_router_class)
    if connection_manager_class:
        options.setdefault('connection_manager_class',
                           connection_manager_class)

    # The Web API

    # Deduce web interface:port address to bind, and correct types (default is address, above).
    # Default to the same interface as we're bound to, port 80.  We'll only start if non-empty --web
    # was provided, though (even if it's just ':', to get all defaults).  Usually you'll want to
    # specify at least --web :[<port>].
    http = args.web.split(':')
    assert 1 <= len(
        http) <= 2, "Invalid --web [<interface>]:[<port>}: %s" % args.web
    http = (str(http[0]) if http[0] else bind[0],
            int(http[1]) if len(http) > 1 and http[1] else 80)

    if args.web:
        assert 'web' in sys.modules, "Failed to import web API module; --web option not available.  Run 'pip install web.py'"
        logging.normal("EtherNet/IP Simulator Web API Server: %r" % (http, ))
        webserver = threading.Thread(target=web_api, kwargs={'http': http})
        webserver.daemon = True
        webserver.start()

    # The EtherNet/IP Simulator.  Pass all the top-level options keys/values as keywords, and pass
    # the entire tags dotdict as a tags=... keyword.  The server_main server.control signals (.done,
    # .disable) are also passed as the server= keyword.  We are using an cpppo.apidict with a long
    # timeout; this will block the web API for several seconds to allow all threads to respond to
    # the signals delivered via the web API.
    logging.normal("EtherNet/IP Simulator: %r" % (bind, ))
    kwargs = dict(options,
                  latency=latency,
                  size=args.size,
                  tags=tags,
                  server=srv_ctl)

    tf = network.server_thread
    tf_kwds = dict()
    if args.profile:
        tf = network.server_thread_profiling
        tf_kwds['filename'] = args.profile

    disabled = False  # Recognize toggling between en/disabled
    while not srv_ctl.control.done:
        if not srv_ctl.control.disable:
            if disabled:
                logging.detail("EtherNet/IP Server enabled")
                disabled = False
            network.server_main(
                address=bind,
                target=enip_srv,
                kwargs=kwargs,
                idle_service=lambda: map(lambda f: f(), idle_service),
                thread_factory=tf,
                **tf_kwds)
        else:
            if not disabled:
                logging.detail("EtherNet/IP Server disabled")
                disabled = True
            time.sleep(latency)  # Still disabled; wait a bit

    return 0
Esempio n. 38
0
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose] 
                                    if args.verbose in levelmap
                                    else logging.DEBUG )
    if args.log:
        cpppo.log_cfg['filename'] = args.log
    logging.basicConfig( **cpppo.log_cfg )

    def attribute_operations( paths ):
        for op in client.parse_operations( paths ):
            if 'attribute' in op['path'][-1]:
                op['method'] = 'get_attribute_single'
            else:
                op['method'] = 'get_attributes_all'
            yield op

    with client.connector( host=args.address, timeout=timeout ) as conn:
        idx			= -1
        start			= cpppo.timer()
        for idx,dsc,op,rpy,sts,val in conn.pipeline(
                operations=attribute_operations( tags ), depth=depth,
                multiple=False, timeout=timeout ):
            print( "%s: %3d: %s == %s" % ( timestamp(), idx, dsc, val ))
        elapsed			= cpppo.timer() - start
        logging.normal( "%3d requests in %7.2fs at pipeline depth %2s; %5.1f TPS" % (
            idx+1, elapsed, args.depth, idx / elapsed ))
Esempio n. 39
0
def test_history_unparsable():
    """Test history files rendered unparsable due to dropouts.  This should be handled with no problem
    except if the initial frame of register data on the first file is missing.

    """
    for _ in range( 3 ):
        path		= "/tmp/test_unparsable_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases, containing records that are invalid.
        now		= timer()
        v		= 10000
        secs		= 10
        secs_ext	=  1.0  # adjust range of history to target out by this +/-
        basisext	=   .5  # adjust start basis time from now by this +/-
        minfactor	=   .25
        maxfactor	=  2.0
        maxlatency	=   .25
        # 1/N file lines corrupted (kills 2 records; the current and following).  0 --> no errors
        maxerror	= random.choice( [ None, 3, 10, 100 ] )
        oldest		= None
        newest		= None
        logging.normal( "Corrupting %s of all history lines", None if not maxerror else "1/%d" % maxerror )
        for e in range( secs ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                ssend	= 100
                for ss in range( 0, ssend ): # subseconds up to but not including ssend...
                    js	= json.dumps( { 40001: v + e * 1000 + (ss * 1000 // ssend) } ) + '\n'
                    if maxerror and not random.randint( 0, maxerror ):
                        # Truncate some of the records (as would occur in a filesystem full or halt)
                        js = js[:random.randint( 0, len( js ) - 1)]
                    ts	= timestamp( now - e + ss/ssend )
                    if oldest is None or ts < oldest:
                        oldest = ts
                    if newest is None or ts > newest:
                        newest = ts
                    l._append( '\t'.join( (str( ts ),json.dumps( None ),js) ) )

        # Load the historical records.  This will be robust against all errors except if the first
        # line of the first history file opened is corrupt, and we therefore cannot get the initial
        # frame of register data.
        historical	= timestamp( now - random.uniform( -secs_ext, secs + secs_ext ))
        basisdelay	= random.uniform( -basisext, +basisext )
        basis		= now + basisdelay
        factor		= random.uniform( minfactor, maxfactor )
        lookahead	= 1.0
        on_bad_iframe	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        on_bad_data	= random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
        logging.normal( "Playback starts %s (%.1f%%) of history %s-%s, in %.3fs, at x %.2f rate w/%.1fs lookahead, on_bad_iframe=%s, on_bad_data=%s",
                        historical, ( historical.value - oldest.value ) * 100 / ( newest.value - oldest.value ),
                        oldest, newest, basisdelay, factor, lookahead,
                        "SUPPRESS" if on_bad_iframe == loader.SUPPRESS else "FAIL" if on_bad_iframe  == loader.FAIL else "RAISE",
                        "SUPPRESS" if on_bad_data   == loader.SUPPRESS else "FAIL" if on_bad_data    == loader.FAIL else "RAISE" )

        ld		= loader( path,
                                historical=historical, basis=basis, factor=factor, lookahead=lookahead )
        dur		= basisext + ( secs_ext + secs + secs_ext ) / factor + basisext + 2*maxlatency # Don't be tooo strict
        beg		= timer()
        count		= 0

        while ld:
            assert timer() - beg < dur, "The loader should have ended"
            cur,events	= ld.load( on_bad_iframe=on_bad_iframe, on_bad_data=on_bad_data )
            count      += len( events )
            logging.normal( "%s loaded up to %s; %d future, %d values: %d events: %s",
                            ld, cur, len( ld.future ), len( ld.values ), len( events ), 
                            repr( events ) if logging.root.isEnabledFor( logging.DEBUG ) else reprlib.repr( events ))
            time.sleep( random.uniform( 0.0, maxlatency ))

        if on_bad_data == ld.FAIL or on_bad_iframe == ld.FAIL:
            assert ld.state in (ld.COMPLETE, ld.FAILED)
        else:
            assert ld.state == ld.COMPLETE

    except IframeError as exc:
        logging.warning( "Detected error on initial frame of registers in first history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED and count == 0, "Shouldn't have loaded any events -- only iframe failures expected"

    except DataError as exc:
        logging.warning( "Detected error on registers data in a history file; failure expected: %s", exc )
        assert ld.state == ld.FAILED

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        for f in files:
            if os.path.exists( f ):
                logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
            else:
                logging.warning( "%s: Couldn't find file", f )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Esempio n. 40
0
 def process( p, v ):
     logging.normal( "process: %16s == %s", p, v )
     values[p]		= v
Esempio n. 41
0
def main(argv=None):
    """Get Attribute(s) Single/All the specified Instance or Attribute level address(es)

    """
    ap = argparse.ArgumentParser(
        description=
        "An EtherNet/IP Get Attribute Single/All and Set Attribute Single client",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""\

One or more EtherNet/IP CIP Object/Instance Attributes may be read or
written.  The full format for specifying a tag and an operation is:

    @<Object>/<Instance>/<Attribute>[=<value>,<value>...]

The default Send Path is '@6/1', and the default Route Path is [{"link": 0,
"port":1}].  This should work with a device that can route requests to links
(eg. a *Logix Controller), with the Processor is slot 1 of the chassis.  If you
have a simpler device (ie. something that does not route requests, such as an AB
PowerFlex for example), then you may want to specify:

    --send-path='' --route-path=false

to eliminate the *Logix-style Unconnected Send (service 0x52) encapsulation
which is required to carry this Send/Route Path data. """)

    ap.add_argument(
        '-a',
        '--address',
        default=("%s:%d" % enip.address),
        help="EtherNet/IP interface[:port] to connect to (default: %s:%d)" %
        (enip.address[0], enip.address[1]))
    ap.add_argument(
        '-m',
        '--multiple',
        action='store_true',
        help=
        "Use Multiple Service Packet request targeting ~500 bytes (default: False)"
    )
    ap.add_argument('-d', '--depth', default=0, help="Pipelining depth")
    ap.add_argument('-t',
                    '--timeout',
                    default=5.0,
                    help="EtherNet/IP timeout (default: 5s)")
    ap.add_argument('-v',
                    '--verbose',
                    action="count",
                    default=0,
                    help="Display logging information.")
    ap.add_argument('-l', '--log', help="Log file, if desired")
    ap.add_argument(
        '--route-path',
        default=None,
        help=
        "Route Path, in JSON (default: %r); 0/false to specify no/empty route_path"
        % (str(json.dumps(client.connector.route_path_default))))
    ap.add_argument(
        '--send-path',
        default=None,
        help=
        "Send Path to UCMM (default: @6/1); Specify an empty string '' for no Send Path"
    )
    ap.add_argument('-P',
                    '--profile',
                    action='store_true',
                    help="Activate profiling (default: False)")
    ap.add_argument(
        'tags',
        nargs="+",
        help=
        "Class/Instance[/Attribute] to get (- to read from stdin), eg: @2/1 @2/1/1"
    )

    args = ap.parse_args(argv)

    # Set up logging level (-v...) and --log <file>
    levelmap = {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
    }
    cpppo.log_cfg['level'] = (levelmap[args.verbose]
                              if args.verbose in levelmap else logging.DEBUG)
    if args.log:
        cpppo.log_cfg['filename'] = args.log

    logging.basicConfig(**cpppo.log_cfg)

    addr = args.address.split(':')
    assert 1 <= len(
        addr
    ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    addr = (str(addr[0]) if addr[0] else enip.address[0],
            int(addr[1]) if len(addr) > 1 and addr[1] else enip.address[1])
    timeout = float(args.timeout)
    depth = int(args.depth)
    multiple = 500 if args.multiple else 0
    route_path = json.loads(
        args.route_path) if args.route_path else None  # may be None/0/False
    send_path = args.send_path

    if '-' in args.tags:
        # Collect tags from sys.stdin 'til EOF, at position of '-' in argument list
        minus = args.tags.index('-')
        tags = itertools.chain(args.tags[:minus], sys.stdin,
                               args.tags[minus + 1:])
    else:
        tags = args.tags

    profiler = None
    if args.profile:
        import cProfile as profile
        import pstats
        import StringIO
        profiler = profile.Profile()

    failures = 0
    with client.connector(host=addr[0],
                          port=addr[1],
                          timeout=timeout,
                          profiler=profiler) as connection:
        idx = -1
        start = cpppo.timer()
        operations = attribute_operations(tags,
                                          route_path=route_path,
                                          send_path=send_path)
        for idx, dsc, op, rpy, sts, val in connection.pipeline(
                operations=operations,
                depth=depth,
                multiple=multiple,
                timeout=timeout):
            print("%s: %3d: %s == %s" % (time.ctime(), idx, dsc, val))
            failures += 1 if sts else 0
        elapsed = cpppo.timer() - start
        logging.normal(
            "%3d requests in %7.3fs at pipeline depth %2s; %7.3f TPS" %
            (idx + 1, elapsed, args.depth, (idx + 1) / elapsed))

    if profiler:
        s = StringIO.StringIO()
        ps = pstats.Stats(profiler, stream=s)
        for sortby in ['cumulative', 'time']:
            ps.sort_stats(sortby)
            ps.print_stats(25)
        print(s.getvalue())

    return 1 if failures else 0
Esempio n. 42
0
 def failure( exc ):
     logging.normal( "failed: %s", exc )
     elapsed		= int(( timer() - failure.start ) * 1000 ) # ms.
     failed[elapsed]	= str( exc )
Esempio n. 43
0
def test_history_sequential():
    for _ in range( 3 ):
        path		= "/tmp/test_sequential_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        # Create a series of history files with decreasing timestamps as the numeric extension
        # increases.  Note: times are truncated to milliseconds, so timestamps saved out will
        # probably evaluate as < the original value when read back in!  Since each file contains
        # only one record, we must be careful to use 'strict', to ensure we open the next file
        # strictly greater than the last timestamp (or we'll open the same file again!)
        now		= timer()
        count		= 10
        for e in range( count ):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has 0 extension
            files.append( f )
            with logger( f ) as l:
                l.write( { 40001: count - e }, now=now - e )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of  some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    with open( f, 'rb' ) as rd:
                        fd.write( rd.read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        # Attempt to begin loading history around the middle of the recording
        rdr		= reader( path,
                                  historical=now - random.uniform( 3.0, 9.0 ),
                                  basis=now + random.uniform( -.5, +.5 ),
                                  factor=3 )

        # Begin with the first historical file before our computed advancing historical time (we
        # could provide a specific timestamp here, if we wanted).  No lookahead.
        ts_l		= None
        f_l		= None
        after		= False # only first open is "before"; rest are "after"
        strict		= False # only goes false when timestamp increases in the same file
        deadline	= now + count
        while timer() <= deadline:
            # open next file beginning after the last ts
            o		= rdr.open( target=ts_l, after=after, strict=strict ) # Generator; doesn't do much here...
            after	= True
            strict	= True
            for (f,l,cur),(ts,js) in o: # raises HistoryExhausted on open() generator failure
                assert ts_l is None or ts >= ts_l, \
                    "Historical record out of sequence; %s isn't >= %s" % ( ts, ts_l )
                ts_l	= ts
                if js is None:
                    logging.info( "@%s: not yet available", ts )
                    assert ts > cur, "Next record should have been returned; not in future"
                    time.sleep( .1 )
                else:
                    logging.normal( "@%s: %r", ts, js )
                    assert ts <= cur, "Next record shouldn't have been returned; yet future"
                    if f == f_l and ts > ts_l:
                        strict = False
                f_l,ts_l= f,ts
        assert False, "Should have raised HistoryExhausted by now"
    except HistoryExhausted as exc:
        logging.normal( "History exhausted: %s", exc )

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Esempio n. 44
0
def test_powerflex_poll_failure():
    """No PowerFlex simulator alive; should see exponential back-off.  Test that the poll.poll API can
    withstand gateway failures, and robustly continue polling.

    """
    #logging.getLogger().setLevel( logging.NORMAL )

    def null_server( conn, addr, server=None ):
        """Fake up an EtherNet/IP server that just sends a canned EtherNet/IP CIP Register and Identity
        string response, to fake the poll client into sending a poll request into a closed socket.
        Immediately does a shutdown of the incoming half of the socket, and then closes the
        connection after sending the fake replies, usually resulting in an excellent EPIPE/SIGPIPE
        on the client.  Use port 44819, to avoid interference by (possibly slow-to-exit) simulators
        running on port 44818.

        """
        logging.normal( "null_server on %s starting" % ( addr, ))
        conn.shutdown( socket.SHUT_RD )
        time.sleep( 0.1 )
        conn.send( b'e\x00\x04\x00\xc9wH\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00' )
        conn.send( b'c\x00;\x00\xd4/\x9dm\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x0c\x005\x00\x01\x00\x00\x02\xaf\x12\n\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x0e\x006\x00\x14\x0b`1\x1a\x06l\x00\x13PowerFlex/20-COMM-E\xff' )
        conn.close()
        while server and not server.control.done:
            time.sleep( .1 )
        logging.normal( "null_server on %s done" % ( addr, ))

    try:
        values			= {} # { <parameter>: <value> }
        failed			= {} # { <time>: <exc> }

        control			= dotdict()
        control.done		= False

        for _ in range( 3 ):
            server		= threading.Thread(
                target=network.server_main, kwargs={
                    'address': 	('',44819),
                    'target':	null_server,
                    'kwargs': {
                        'server': dotdict({
                            'control': control
                        })
                    },
                    'udp':	False, # no UDP server in this test
                })
            server.daemon		= True
            server.start()
            time.sleep(.5)
            if server.is_alive:
                break
        assert server.is_alive, "Unable to start null_server on INADDR_ANY"

        def process( p, v ):
            logging.normal( "process: %16s == %s", p, v )
            values[p]		= v
        process.done		= False

        def failure( exc ):
            logging.normal( "failed: %s", exc )
            elapsed		= int(( timer() - failure.start ) * 1000 ) # ms.
            failed[elapsed]	= str( exc )
        failure.start		= timer()
    
        backoff_min		= 0.5
        backoff_max		= 4.0
        backoff_multiplier	= 2.0 # --> backoff == .5, 1.0, 2.0, 4.0
        poller			= threading.Thread(
            target=poll.poll, kwargs={ 
                'gateway_class':powerflex_750_series, # deprecated; use proxy_class instead
                'address': 	('localhost',44819),
                'cycle':	1.0,
                'timeout':	0.5,
                'backoff_min':	backoff_min,
                'backoff_max':	backoff_max,
                'backoff_multiplier': backoff_multiplier,
                'process':	process,
                'failure':	failure,
            })
        poller.deamon		= True
        poller.start()

        try:
            # Polling starts immediately, but the first poll occurs after an attempt to get the
            # Identity string, hence two timeouts for the first poll failure.
            while len( failed ) < 3 and timer() - failure.start < 10.0:
                time.sleep(.1)
        finally:
            process.done	= True
            control.done	= True
        poller.join( backoff_max + 1.0 ) # allow for backoff_max before loop check
        assert not poller.is_alive(), "Poller Thread failed to terminate"
        server.join( 1.0 )
        assert not server.is_alive(), "Server Thread failed to terminate"

        # Check that each failure is (at least) the expected backoff from the last
        assert len( failed ) > 0
        k_last			= None
        backoff			= backoff_min
        for k in sorted( failed ):
            logging.normal( "Poll failure at %4dms (next backoff: %7.3fs): %s", k, backoff, failed[k] )
            if k_last is not None:
                assert k - k_last >= backoff
            backoff		= min( backoff_max, backoff * backoff_multiplier )
            k_last		= k

        assert len( values ) == 0

    except Exception as exc:
        logging.warning( "Test terminated with exception: %s", exc )
        raise
Esempio n. 45
0
def test_history_performance():
    try:
        tracemalloc.start()
    except:
        pass

    for _ in range( 3 ):
        path		= "/tmp/test_performance_%d" % random.randint( 100000, 999999 )
        if os.path.exists( path ):
            continue
    assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path 

    files		= []
    try:
        day		= 24*60*60
        dur		= 3*day		# a few days worth of data
        regstps		= 0.0,5.0	# 0-5secs between updates
        numfiles	= dur//day+1	# ~1 file/day, but at least 2
        values		= {}		# Initial register values
        regscount	= 1000		# Number of different registers
        regschanged	= 1,10		# From 1-25 registers per row
        regsbase	= 40001

        start		= timer()

        now = beg	= start - dur
        linecnt		= 0
        for e in reversed( range( numfiles )):
            f		= path + (( '.%d' % e ) if e else '') # 0'th file has no extension
            files.append( f )
            with logger( f ) as l:
                if values:
                    l.write( values, now=now ); linecnt += 1
                while now < beg + len(files) * dur/numfiles:
                    lst	= now
                    now += random.uniform( *regstps )
                    assert now >= lst
                    assert timestamp( now ) >= timestamp( lst ), "now: %s, timestamp(now): %s" % ( now, timestamp( now ))
                    updates = {}
                    for _ in range( random.randint( *regschanged )):
                        updates[random.randint( regsbase, regsbase + regscount - 1 )] = random.randint( 0, 1<<16 - 1 )
                    values.update( updates )
                    l.write( updates, now=now ); linecnt += 1
                lst 	= now
                now    += random.uniform( *regstps )
                assert now >= lst
                assert timestamp( now ) >= timestamp( lst )
            if e:
                # Compress .1 onward using a random format; randomly delete origin uncompressed file
                # so sometimes both files exist
                if random.choice( (True, False, False, False) ):
                    continue # Don't make a compressed version of some files
                fz	 = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
                files.append( fz )
                with opener( fz, mode='wb' ) as fd:
                    with open( f, 'rb' ) as rd:
                        fd.write( rd.read() )
                if random.choice( (True, False, False) ):
                    continue # Don't remove some of the uncompressed files
                os.unlink( f )
                files.pop( files.index( f ))

        logging.warning( "Generated data in %.3fs; lines: %d", timer() - start, linecnt )

        # Start somewhere within 0-1% the dur of the beg, forcing the load the look back to
        # find the first file.  Try to do it all in the next 'playback' second (just to push it to
        # the max), in 'chunks' pieces.
        historical	= timestamp( random.uniform( beg + dur*0/100, beg + dur*1/100 ))
        basis		= timer()
        playback	= 2.0 * dur/day # Can sustain ~2 seconds / day of history on a single CPU
        chunks		= 1000
        factor		= dur / playback
        lookahead	= 60.0
        duration	= None
        if random.choice( (True,False) ):
            duration	= random.uniform( dur * 98/100, dur * 102/100 )

        begoff		= historical.value - beg
        endoff		= 0 if duration is None else (( historical.value + duration ) - ( beg + dur ))
        logging.warning( "Playback starts at beginning %s %s, duration %s, ends at ending %s %s",
                         timestamp( beg ), format_offset( begoff, ms=False ),
                         None if duration is None else format_offset( duration, ms=False, symbols='-+' ),
                         timestamp( beg + dur ), format_offset( endoff, ms=False ))

        ld		= loader(
            path, historical=historical, basis=basis, factor=factor, lookahead=lookahead, duration=duration )
        eventcnt	= 0
        slept		= 0
        cur		= None
        while ld:
            once	= False
            while ld.state < ld.AWAITING or not once:
                once		= True
                upcoming	= None
                limit		= random.randint( 0, 250 )
                if random.choice( (True,False) ):
                    upcoming	= ld.advance()
                    if random.choice( (True,False) ) and cur:
                        # ~25% of the time, provide an 'upcoming' timestamp that is between the
                        # current advancing historical time and the last load time.
                        upcoming-= random.uniform( 0, upcoming.value - cur.value )
                cur,events	= ld.load( upcoming=upcoming, limit=limit )
                eventcnt       += len( events )
                advance		= ld.advance()
                offset		= advance.value - cur.value
                logging.detail( "%s loaded up to %s (%s w/ upcoming %14s); %4d future, %4d values: %4d events / %4d limit" ,
                                ld, cur, format_offset( offset ),
                                format_offset( upcoming.value - advance.value ) if upcoming is not None else None,
                                len( ld.future ), len( ld.values ), len( events ), limit )

            logging.warning( "%s loaded up to %s; %3d future, %4d values: %6d events total",
                                ld, cur, len( ld.future ), len( ld.values ), eventcnt )
            try:
                snapshot	= tracemalloc.take_snapshot()
                display_top( snapshot, limit=10 )
            except:
                pass

            time.sleep( playback/chunks )
            slept	       += playback/chunks

        elapsed		= timer() - basis
        eventtps	= eventcnt // ( elapsed - slept )
        logging.error( "Playback in %.3fs (slept %.3fs); events: %d ==> %d historical records/sec",
                       elapsed, slept, eventcnt, eventtps )
        if not logging.getLogger().isEnabledFor( logging.NORMAL ):
            # Ludicrously low threshold, to pass tests on very slow machines
            assert eventtps >= 1000, \
                "Historical event processing performance low: %d records/sec" % eventtps
        try:
            display_biggest_traceback()
        except:
            pass

    except Exception as exc:
        logging.normal( "Test failed: %s", exc )
        '''
        for f in files:
            logging.normal( "%s:\n    %s", f, "    ".join( l for l in open( f )))
        '''
        raise

    finally:
        for f in files:
            logging.detail( "unlinking %s", f )
            try:
                os.unlink( f )
            except:
                pass
Esempio n. 46
0
def main(argv=None, **kwds):
    """Pass the desired argv (excluding the program name in sys.arg[0]; typically
    pass argv=None, which is equivalent to argv=sys.argv[1:], the default for
    argparse.  Requires at least one tag to be defined.

    If a cpppo.apidict() is passed for kwds['server']['control'], we'll use it
    to transmit server control signals via its .done, .disable, .timeout and
    .latency attributes.

    """
    global address
    global options
    global tags
    global srv_ctl
    global latency
    global timeout

    ap = argparse.ArgumentParser(description="Provide an EtherNet/IP Server",
                                 epilog="")

    ap.add_argument('-v',
                    '--verbose',
                    default=0,
                    action="count",
                    help="Display logging information.")
    ap.add_argument(
        '-a',
        '--address',
        default=("%s:%d" % address),
        help="EtherNet/IP interface[:port] to bind to (default: %s:%d)" %
        (address[0], address[1]))
    ap.add_argument('-l', '--log', help="Log file, if desired")
    ap.add_argument(
        '-w',
        '--web',
        default="",
        help="Web API [interface]:[port] to bind to (default: %s, port 80)" %
        (address[0]))
    ap.add_argument(
        '-d',
        '--delay',
        help=
        "Delay response to each request by a certain number of seconds (default: 0.0)",
        default="0.0")
    ap.add_argument('-p',
                    '--profile',
                    help="Output profiling data to a file (default: None)",
                    default=None)
    ap.add_argument(
        'tags',
        nargs="+",
        help=
        "Any tags, their type (default: INT), and number (default: 1), eg: tag=INT[1000]"
    )

    args = ap.parse_args(argv)

    # Deduce interface:port address to bind, and correct types (default is address, above)
    bind = args.address.split(':')
    assert 1 <= len(
        bind
    ) <= 2, "Invalid --address [<interface>]:[<port>}: %s" % args.address
    bind = (str(bind[0]) if bind[0] else address[0],
            int(bind[1]) if len(bind) > 1 and bind[1] else address[1])

    # Set up logging level (-v...) and --log <file>
    levelmap = {
        0: logging.WARNING,
        1: logging.NORMAL,
        2: logging.DETAIL,
        3: logging.INFO,
        4: logging.DEBUG,
    }
    cpppo.log_cfg['level'] = (levelmap[args.verbose]
                              if args.verbose in levelmap else logging.DEBUG)

    idle_service = None
    if args.log:
        # Output logging to a file, and handle UNIX-y log file rotation via 'logrotate', which sends
        # signals to indicate that a service's log file has been moved/renamed and it should re-open
        cpppo.log_cfg['filename'] = args.log
        signal.signal(signal.SIGHUP, logrotate_request)
        idle_service = logrotate_perform

    logging.basicConfig(**cpppo.log_cfg)

    # Pull out a 'server.control...' supplied in the keywords, and make certain it's a
    # cpppo.apidict.  We'll use this to transmit control signals to the server thread.  Set the
    # current values to sane initial defaults/conditions.
    if 'server' in kwds:
        assert 'control' in kwds[
            'server'], "A 'server' keyword provided without a 'control' attribute"
        srv_ctl = cpppo.dotdict(kwds.pop('server'))
        assert isinstance(
            srv_ctl['control'],
            cpppo.apidict), "The server.control... must be a cpppo.apidict"
    else:
        srv_ctl.control = cpppo.apidict(timeout=timeout)

    srv_ctl.control['done'] = False
    srv_ctl.control['disable'] = False
    srv_ctl.control.setdefault('latency', latency)

    # Global options data.  Copy any remaining keyword args supplied to main().  This could
    # include an alternative enip_process, for example, instead of defaulting to logix.process.
    options.update(kwds)

    # Specify a response delay.  The options.delay is another dotdict() layer, so it's attributes
    # (eg. .value, .range) are available to the web API for manipulation.  Therefore, they can be
    # set to arbitrary values at random times!  However, the type will be retained.
    def delay_range(*args, **kwds):
        """If a delay.range like ".1-.9" is specified, then change the delay.value every second to something
        in that range."""
        assert 'delay' in kwds and 'range' in kwds['delay'] and '-' in kwds['delay']['range'], \
            "No delay=#-# specified"
        log.normal("Delaying all responses by %s seconds",
                   kwds['delay']['range'])
        while True:
            # Once we start, changes to delay.range will be re-evaluated each loop
            time.sleep(1)
            try:
                lo, hi = map(float, kwds['delay']['range'].split('-'))
                kwds['delay']['value'] = random.uniform(lo, hi)
                log.info("Mutated delay == %g", kwds['delay']['value'])
            except Exception as exc:
                log.warning("No delay=#[.#]-#[.#] range specified: %s", exc)

    options.delay = cpppo.dotdict()
    try:
        options.delay.value = float(args.delay)
        log.normal("Delaying all responses by %r seconds", options.delay.value)
    except:
        assert '-' in args.delay, \
            "Unrecognized --delay=%r option" % args.delay
        # A range #-#; set up a thread to mutate the option.delay.value over the .range
        options.delay.range = args.delay
        options.delay.value = 0.0
        mutator = threading.Thread(target=delay_range, kwargs=options)
        mutator.daemon = True
        mutator.start()

    # Create all the specified tags/Attributes.  The enip_process function will (somehow) assign the
    # given tag name to reference the specified Attribute.
    for t in args.tags:
        tag_name, rest = t, ''
        if '=' in tag_name:
            tag_name, rest = tag_name.split('=', 1)
        tag_type, rest = rest or 'INT', ''
        tag_size = 1
        if '[' in tag_type:
            tag_type, rest = tag_type.split('[', 1)
            assert ']' in rest, "Invalid tag; mis-matched [...]"
            tag_size, rest = rest.split(']', 1)
        assert not rest, "Invalid tag specified; expected tag=<type>[<size>]: %r" % t
        tag_type = str(tag_type).upper()
        typenames = {
            "INT": parser.INT,
            "DINT": parser.DINT,
            "SINT": parser.SINT,
            "REAL": parser.REAL
        }
        assert tag_type in typenames, "Invalid tag type; must be one of %r" % list(
            typenames.keys())
        try:
            tag_size = int(tag_size)
        except:
            raise AssertionError("Invalid tag size: %r" % tag_size)

        # Ready to create the tag and its Attribute (and error code to return, if any).  If tag_size
        # is 1, it will be a scalar Attribute.
        log.normal("Creating tag: %s=%s[%d]", tag_name, tag_type, tag_size)
        tags[tag_name] = cpppo.dotdict()
        tags[tag_name].attribute = device.Attribute(
            tag_name,
            typenames[tag_type],
            default=(0 if tag_size == 1 else [0] * tag_size))
        tags[tag_name].error = 0x00

    # Use the Logix simulator by default (unless some other one was supplied as a keyword options to
    # main(), loaded above into 'options').  This key indexes an immutable value (not another dotdict
    # layer), so is not available for the web API to report/manipulate.
    options.setdefault('enip_process', logix.process)

    # The Web API

    # Deduce web interface:port address to bind, and correct types (default is address, above).
    # Default to the same interface as we're bound to, port 80.  We'll only start if non-empty --web
    # was provided, though (even if it's just ':', to get all defaults).  Usually you'll want to
    # specify at least --web :[<port>].
    http = args.web.split(':')
    assert 1 <= len(
        http) <= 2, "Invalid --web [<interface>]:[<port>}: %s" % args.web
    http = (str(http[0]) if http[0] else bind[0],
            int(http[1]) if len(http) > 1 and http[1] else 80)

    if args.web:
        assert 'web' in sys.modules, "Failed to import web API module; --web option not available.  Run 'pip install web.py'"
        logging.normal("EtherNet/IP Simulator Web API Server: %r" % (http, ))
        webserver = threading.Thread(target=web_api, kwargs={'http': http})
        webserver.daemon = True
        webserver.start()

    # The EtherNet/IP Simulator.  Pass all the top-level options keys/values as keywords, and pass
    # the entire tags dotdict as a tags=... keyword.  The server_main server.control signals (.done,
    # .disable) are also passed as the server= keyword.  We are using an cpppo.apidict with a long
    # timeout; this will block the web API for several seconds to allow all threads to respond to
    # the signals delivered via the web API.
    logging.normal("EtherNet/IP Simulator: %r" % (bind, ))
    kwargs = dict(options, latency=latency, tags=tags, server=srv_ctl)

    tf = network.server_thread
    tf_kwds = dict()
    if args.profile:
        tf = network.server_thread_profiling
        tf_kwds['filename'] = args.profile

    disabled = False  # Recognize toggling between en/disabled
    while not srv_ctl.control.done:
        if not srv_ctl.control.disable:
            if disabled:
                logging.detail("EtherNet/IP Server enabled")
                disabled = False
            network.server_main(address=bind,
                                target=enip_srv,
                                kwargs=kwargs,
                                idle_service=idle_service,
                                thread_factory=tf,
                                **tf_kwds)
        else:
            if not disabled:
                logging.detail("EtherNet/IP Server disabled")
                disabled = True
            time.sleep(latency)  # Still disabled; wait a bit

    return 0
Esempio n. 47
0
def run_plc_modbus_polls(plc):
    # Initial conditions (in case PLC is persistent between tests)
    plc.write(1, 0)
    plc.write(40001, 0)

    rate = 1.0
    timeout = 2 * rate  # Nyquist
    intervals = timeout / .05  #  w/ fixed .05s intervals
    wfkw = dict(timeout=timeout, intervals=intervals)

    plc.poll(40001, rate=rate)

    success, elapsed = waitfor(lambda: plc.read(40001) is not None,
                               "40001 polled", **wfkw)
    assert success
    assert elapsed < 1.0
    assert plc.read(40001) == 0

    assert plc.read(1) == None
    assert plc.read(40002) == None
    success, elapsed = waitfor(lambda: plc.read(40002) is not None,
                               "40002 polled", **wfkw)
    assert success
    assert elapsed < 1.0
    assert plc.read(40002) == 0
    success, elapsed = waitfor(lambda: plc.read(1) is not None, "00001 polled",
                               **wfkw)
    assert success
    assert elapsed < 1.0
    assert plc.read(1) == 0

    # Now add a bunch of new stuff to poll, and ensure polling occurs.  As we add registers the
    # number of distinct poll ranges will increase, and then decrease as we in-fill and the
    # inter-register range drops below the merge reach 10, allowing the polling to merge ranges.
    # Thus, keep track of the number of registers added, and allow
    #
    # avg.
    # poll
    # time
    #
    #   |
    #   |
    # 4s|         ..
    # 3s|        .  .
    # 2s|     ...    ...
    # 1s|.....          .......
    #  -+----------------------------------
    #   |  10  20  30  40   regs

    # We'll be overwhelming the poller, so it won't be able to poll w/in the target rate, so we'll
    # need to more than double the Nyquist-rate timeout
    wfkw['timeout'] *= 2.5
    wfkw['intervals'] *= 2.5

    regs = {}
    extent = 100  # how many each of coil/holding registers
    total = extent * 2  # total registers in play
    elapsed = None
    rolling = None
    rolling_factor = 1.0 / 5  # Rolling exponential moving average over last ~8 samples

    # Keep increasing the number of registers polled, up to 1/2 of all registers
    while len(regs) < total * 50 // 100:
        # Always select a previously unpolled register; however, it might
        # have already been in a merge range; if so, get its current value
        # so we mutate it (forcing it to be re-polled)
        base = 40001 if random.randint(0, 1) else 1
        r = None
        while r is None or r in regs:
            r = random.randint(base, base + extent)
        v = plc.read(r)
        if v is not None:
            logging.detail("New reg %5d was already polled due to reach=%d", r,
                           plc.reach)
            regs[r] = v
        regs[r] = (regs[r] ^ 1 if r in regs else random.randint(0, 65535)
                   if base > 40000 else random.randint(0, 1))

        plc.write(r, regs[r])
        plc.poll(r)
        if len(regs) > total * 10 // 100:
            # skip to the good parts...  After 10% of all registers are being polled, start
            # calculating.  See how long it takes, on average, to get the newly written register
            # value polled back.
            success, elapsed = waitfor(lambda: plc.read(r) == regs[r],
                                       "polled %5d == %5d" % (r, regs[r]),
                                       **wfkw)
            assert success
            rolling = misc.exponential_moving_average(rolling, elapsed,
                                                      rolling_factor)

        logging.normal(
            "%3d/%3d regs: polled %3d ranges w/in %7.3fs. Polled %5d == %5d w/in %7.3fs: avg. %7.3fs (load %3.2f, %3.2f, %3.2f)",
            len(regs), total, len(plc.polling), plc.duration, r, regs[r],
            elapsed or 0.0, rolling or 0.0,
            *[misc.nan if load is None else load for load in plc.load])

        if len(regs) > total * 20 // 100:
            # after 20%, start looking for the exit (ranges should merge, poll rate fall )
            if rolling < plc.rate:
                break

    assert rolling < plc.rate, \
        "Rolling average poll cycle %7.3fs should have fallen below target poll rate %7.3fs" % ( rolling, plc.rate )

    for r, v in regs.items():
        assert plc.read(r) == v
Esempio n. 48
0
def test_powerflex_poll_failure():
    """No PowerFlex simulator alive; should see exponential back-off.  Test that the poll.poll API can
    withstand gateway failures, and robustly continue polling.

    """
    #logging.getLogger().setLevel( logging.INFO )
    def null_server( conn, addr, server=None ):
        """Fake up an EtherNet/IP server that just sends a canned EtherNet/IP CIP Register and Identity
        string response, to fake the poll client into sending a poll request into a closed socket.
        Immediately does a shutdown of the incoming half of the socket, and then closes the
        connection after sending the fake replies, usually resulting in an excellent EPIPE/SIGPIPE
        on the client.  Use port 44819, to avoid interference by (possibly slow-to-exit) simulators
        running on port 44818.

        """
        logging.normal( "null_server on %s starting" % ( addr, ))
        conn.shutdown( socket.SHUT_RD )
        time.sleep( 0.1 )
        conn.send( b'e\x00\x04\x00\xc9wH\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00' )
        conn.send( b'c\x00;\x00\xd4/\x9dm\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x0c\x005\x00\x01\x00\x00\x02\xaf\x12\n\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x0e\x006\x00\x14\x0b`1\x1a\x06l\x00\x13PowerFlex/20-COMM-E\xff' )
        conn.close()
        while server and not server.control.done:
            time.sleep( .1 )
        logging.normal( "null_server on %s done" % ( addr, ))

    try:
        values			= {} # { <parameter>: <value> }
        failed			= {} # { <time>: <exc> }

        control			= dotdict()
        control.done		= False

        for _ in range( 3 ):
            server		= threading.Thread(
                target=network.server_main, kwargs={
                    'address': 	('',44819),
                    'target':	null_server,
                    'kwargs': {
                        'server': dotdict({
                            'control': control
                        })
                    },
                    'udp':	False, # no UDP server in this test
                })
            server.daemon		= True
            server.start()
            time.sleep(.5)
            if server.is_alive:
                break
        assert server.is_alive, "Unable to start null_server on INADDR_ANY"

        def process( p, v ):
            logging.normal( "process: %16s == %s", p, v )
            values[p]		= v
        process.done		= False

        def failure( exc ):
            logging.normal( "failed: %s", exc )
            elapsed		= int(( timer() - failure.start ) * 1000 ) # ms.
            failed[elapsed]	= str( exc )
        failure.start		= timer()
    
        backoff_min		= 0.5
        backoff_max		= 4.0
        backoff_multiplier	= 2.0 # --> backoff == .5, 1.0, 2.0, 4.0
        poller			= threading.Thread(
            target=poll.poll, kwargs={ 
                'proxy_class':	powerflex_750_series,
                'address': 	('localhost',44819),
                'cycle':	1.0,
                'timeout':	0.5,
                'backoff_min':	backoff_min,
                'backoff_max':	backoff_max,
                'backoff_multiplier': backoff_multiplier,
                'process':	process,
                'failure':	failure,
            })
        poller.deamon		= True
        poller.start()

        try:
            # Polling starts immediately, but the first poll occurs after an attempt to get the
            # Identity string, hence two timeouts for the first poll failure.
            while len( failed ) < 3 and timer() - failure.start < 10.0:
                time.sleep(.1)
        finally:
            process.done	= True
            control.done	= True
        poller.join( backoff_max + 1.0 ) # allow for backoff_max before loop check
        assert not poller.is_alive(), "Poller Thread failed to terminate"
        server.join( 1.0 )
        assert not server.is_alive(), "Server Thread failed to terminate"

        # Check that each failure is (at least) the expected backoff from the last
        assert len( failed ) > 0
        k_last			= None
        backoff			= backoff_min
        for k in sorted( failed ):
            logging.normal( "Poll failure at %4dms (next backoff: %7.3fs): %s", k, backoff, failed[k] )
            if k_last is not None:
                assert k - k_last >= backoff
            backoff		= min( backoff_max, backoff * backoff_multiplier )
            k_last		= k

        assert len( values ) == 0

    except Exception as exc:
        logging.warning( "Test terminated with exception: %s", exc )
        raise
Esempio n. 49
0
def test_rs485_multi( simulated_modbus_rtu_ttyS0,  simulated_modbus_rtu_ttyS2 ):

    command,address		= simulated_modbus_rtu_ttyS0
    command,address		= simulated_modbus_rtu_ttyS2
    Defaults.Timeout		= PORT_TIMEOUT
    client			= modbus_client_rtu( framer=modbus_rtu_framer_collecting,
        port=PORT_MASTER, stopbits=PORT_STOPBITS, bytesize=PORT_BYTESIZE,
        parity=PORT_PARITY, baudrate=PORT_BAUDRATE )

    # 4 poller_modbus instances sharing the same RTU Master 'client'.  They will all block on I/O
    # access via the same RS485 media interface.
    slaves			= [1,2,3,4]
    plc				= {}
    for unit in slaves:
        plc[unit]		= poller_modbus( "RS485 unit %s" % ( unit ), client=client, unit=unit, rate=.25 )

    wfkw			= dict( timeout=1.0, intervals=10 )

    try:
        for unit in slaves:
            plc[unit].write(     1,  0 )
            plc[unit].write( 40001,  0 )
            plc[unit].poll(  40001 )

        # See if we converge on our target poll time
        count			= plc[slaves[0]].counter
        while any( plc[unit].counter < count + 20 for unit in slaves ):
            for unit in slaves:
                logging.normal( "%s at poll %d: Load: %s ", plc[unit].description, plc[unit].counter, plc[unit].load )
            time.sleep( .5 )
        for unit in slaves:
            logging.normal( "%s at poll %d: Load: %s ", plc[unit].description, plc[unit].counter, plc[unit].load )


        for unit in slaves:
            success,elapsed	= waitfor( lambda: plc[unit].read( 40001 ) is not None, "%d/40001 polled" % ( unit ), **wfkw )
            assert success
            assert elapsed < 1.0
            assert plc[unit].read( 40001 ) == 0

        # Haven't polled 1 or 40002 yet
        for unit in slaves:
            assert plc[unit].read(     1 ) == None
            assert plc[unit].read( 40002 ) == None
        for unit in slaves:
            success, elapsed	= waitfor( lambda: plc[unit].read( 40002 ) is not None, "%d/40002 polled" % ( unit ), **wfkw )
            assert success
            assert elapsed < 1.0
            assert plc[unit].read( 40002 ) == 0

            success,elapsed	= waitfor( lambda: plc[unit].read(     1 ) is not None, "%d/00001 polled" % ( unit ), **wfkw )
            assert success
            assert elapsed < 1.0
            assert plc[unit].read(     1 ) == 0

        for unit in slaves:
            plc[unit].write( 40001,   99 )
            success,elapsed	= waitfor( lambda: plc[unit].read( 40001 ) == 99, "%d/40001 polled" % ( unit ), **wfkw )
            assert success
            assert elapsed < 1.0

    except Exception:
        logging.warning( "poller failed: %s", traceback.format_exc() )
        raise
    finally:
        logging.info( "Stopping plc polling" )
        for unit in slaves:
            plc[unit].done	= True
        for unit in slaves:
            waitfor( lambda: not plc[unit].is_alive(), "%s poller done" % ( plc[unit].description ), timeout=1.0 )
Esempio n. 50
0
def main( argv=None, idle_service=None, **kwds ):
    """Pass the desired argv (excluding the program name in sys.arg[0]; typically pass argv=None, which
    is equivalent to argv=sys.argv[1:], the default for argparse.  Requires at least one tag to be
    defined.

    Takes a sequence of blocks of actuator position information (in JSON format), either from the
    command-line, or (if '-' provided) from stdin.

    """
    ap				= argparse.ArgumentParser(
        description = "Transmit position to actuators.",
        epilog = "" )

    ap.add_argument( '-g', '--gateway', default='smc.smc_modbus',
                     help="Gateway module.class for positioning actuator (default: smc.smc_modbus)" )
    ap.add_argument( '-c', '--config', default=None,
                     help="Gateway module.class configuration JSON (default: None)" )
    ap.add_argument( '-v', '--verbose', default=0, action="count",
                     help="Display logging information." )
    ap.add_argument( '-a', '--address', default=address,
                     help="Address of actuator gateway to connect to (default: %s)" % ( address ))
    ap.add_argument( '-l', '--log',
                     help="Log file, if desired" )
    ap.add_argument( '-t', '--timeout', default=5,
                     help="Gateway I/O timeout" )

    ap.add_argument( 'position', nargs="+",
                     help="Any JSON position dictionaries, or numeric delays (in seconds)")

    args			= ap.parse_args( argv )

    # Set up logging level (-v...) and --log <file>
    cpppo.log_cfg['level']	= ( logging_levelmap[args.verbose] 
                                    if args.verbose in logging_levelmap
                                    else logging.DEBUG )

    # Chain any provided idle_service function with log rotation; these may (also) consult global
    # signal flags such as logrotate_request, so execute supplied functions before logrotate_perform
    idle_service		= [ idle_service ] if idle_service else []
    if args.log:
        # Output logging to a file, and handle UNIX-y log file rotation via 'logrotate', which sends
        # signals to indicate that a service's log file has been moved/renamed and it should re-open
        cpppo.log_cfg['filename']= args.log
        signal.signal( signal.SIGHUP, logrotate_request )

    logging.basicConfig( **cpppo.log_cfg )

    signal.signal( signal.SIGTERM, shutdown_request )
    if hasattr( signal, 'SIGUSR1' ):
        signal.signal( signal.SIGUSR1, loglevelup_request )
    if hasattr( signal, 'SIGUSR2' ):
        signal.signal( signal.SIGUSR2, logleveldn_request )
    if hasattr( signal, 'SIGURG' ):
        signal.signal( signal.SIGURG,  uptime_request )

    idle_service.append( signal_service )

    # Load the specified Gateway module.class, and ensure class is present; include the module's own
    # directory to get the locally specified ones.
    sys.path.append( os.path.dirname( __file__ ))
    mod,cls			= args.gateway.split('.')
    __import__( mod, globals(), locals(), [], 0 )
    gateway_module		= sys.modules[mod]
    assert hasattr( gateway_module, cls ), "Gateway module %s missing target class: %s" % ( mod, cls )
    gateway_class		= getattr( gateway_module, cls )

    # Parse any Gateway configuration JSON supplied
    gateway_config		= {}
    if args.config:
        try:
            gateway_config	= json.loads( args.config )
            assert isinstance( gateway_config, dict ), \
                "Gateway configuration JSON must produce a dictionary"
        except Exception as exc:
            logging.warning( "Invalid Gateway config: %s; %s", args.config, exc )
            raise

    # Read and process JSON position and delay inputs; '-' means read from sys.stdin 'til EOF.  Can be mixed, eg:
    # 
    #     '{ <initial position> }' '# a comment, followed by a delay' 1.5 - '{ <final position> }'

    if '-' in args.position:
        # Collect input from sys.stdin 'til EOF, at position of '-' in argument list
        minus			= args.position.index('-')
        positer			= itertools.chain( args.position[:minus], sys.stdin, args.position[minus+1:] )
    else:
        positer			= iter( args.position )

    start			= cpppo.timer()
    count,success		= 0,0
    gateway			= None # None --> never, False --> failed, truthy --> connected
    while not shutdown_signalled:
        # Perform all idle_services, and get next position, terminate loop when done
        map( lambda f: f(), idle_service )
        try:
            pos			= next( positer )
        except StopIteration:
            break

        # Ignore whitespace and comments
        inp			= pos.strip()
        if inp.startswith( '#' ):
            inp			= ''
        if not inp:
            continue
        # A non-empty non-comment input in 'inp'; parse it as JSON into 'dat'; allow numeric and dict

        if gateway and logging.getLogger().isEnabledFor( logging.NORMAL ):
            logging.normal( "%r", gateway )

        try:
            dat			= json.loads( inp )
        except Exception as exc:
            logging.warning( "Invalid position data: %s; %s", inp, exc )
            continue
        if isinstance( dat, cpppo.natural.num_types ):
            logging.normal( "Delaying: %7.3fs", dat )
            time.sleep( dat )
            continue
        elif isinstance( dat, dict ):
            # A position dict in 'dat'; attempt to position to it.  We'll wait forever to establish a
            # connection to the gateway, and then attempt each positioning command until it succeeds.
            logging.normal( "Position: actuator %3s parsed ; params: %r", dat.get( 'actuator', 'N/A' ), dat )
        elif isinstance( dat, list ) and dat and isinstance( dat[0], int ):
            # An [ <actuator>, "FLAG", "flag", ... ] 
            logging.normal( "Outputs : actuator %3s parsed ; params: %r", dat[0], dat[1:] )
        else:
            logging.warning( "Unknown command: %s: %r", type( dat ), dat )
            continue

        count		       += 1
        while success < count:
            if not gateway:
                try:
                    gateway	= gateway_class( address=args.address, timeout=args.timeout, **gateway_config )
                    logging.normal( "Gateway:  %s connected", address )
                except Exception as exc:
                    logging.warning("Gateway:  %s connection failed: %s; %s", address,
                                    exc, traceback.format_exc() if gateway is None else "" )
                    gateway	= False
                    time.sleep( 1 ) # avoid tight loop on connection failures
                    continue

            # Have a gateway; issue the set/position command, discarding the Gateway on failure and
            # looping; otherwise, fall thru after success (gateway is Truthy) and get next command.
            # A positioning command with no position data (eg. only actuator and/or timeout) should
            # just confirm that the previous positioning operation is complete.
            try:
                if isinstance( dat, list ):
                    status	= gateway.outputs( *dat )
                else:
                    status	= gateway.position( **dat )
                success	       += 1
                logging.normal(  "Success : actuator %3s status: %r\n%r", 
                                 dat[0] if isinstance( dat, list ) else dat.get( 'actuator', 'N/A' ),
                                 status, gateway )
            except Exception as exc:
                logging.warning( "Failure : actuator %3s raised : %s\n%r\n%s\n%r",
                                 dat[0] if isinstance( dat, list ) else dat.get( 'actuator', 'N/A' ),
                                 exc, dat, traceback.format_exc(), gateway )
                gateway.close()
                gateway		= None

    logging.normal( "Completed %d/%d actuator commands in %7.3fs", success, count, cpppo.timer() - start )
    return 0 if success == count else 1