def test_STRINGs(): """SSTRING is 1-byte len + string; STRING is 2-byte len + string + pad (if odd len)""" base = "Of the increase of His government and peace there shall be no end " for l in (random.randrange(0, 1000) for _ in range(10)): original = base * (l // len(base) + 1) # always at least length l encoded = parser.STRING.produce(value=original) assert len(encoded) == 2 + len(original) + len(original) % 2 result = cpppo.dotdict() with parser.STRING() as machine: with contextlib.closing(machine.run(source=encoded, data=result)) as engine: for m, s in engine: pass assert result.STRING.length == len(original) assert result.STRING.string == original try: encoded = parser.SSTRING.produce(value=original) except Exception as exc: assert len(original) >= 256, "SSTRING failure: %s" % (exc) continue assert len(encoded) == 1 + len(original) result = cpppo.dotdict() with parser.SSTRING() as machine: with contextlib.closing(machine.run(source=encoded, data=result)) as engine: for m, s in engine: pass assert result.SSTRING.length == len(original) assert result.SSTRING.string == original
def test_limit(): # Force a limit on input symbols. If we only accept only even b's, we'll # fail if we force a stoppage at a+b*9 source = cpppo.peekable( str( 'a'+'b'*100 )) data = cpppo.dotdict() try: with cpppo.regex( initial=str( 'a(bb)*' ), context='even_b', limit=10 ) as machine: for i,(m,s) in enumerate( machine.run( source=source, data=data )): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data ) except cpppo.NonTerminal: assert i == 10 assert source.sent == 10 else: assert False, "Should have failed with a cpppo.NonTerminal exception" # But odd b's OK for limit in [ 10, '..somewhere.ten', lambda **kwds: 10, lambda path=None, data=None, **kwds: data[path+'..somewhere.ten'] ]: source = cpppo.peekable( str( 'a'+'b'*100 )) data = cpppo.dotdict() data['somewhere.ten'] = 10 with cpppo.regex( initial=str( 'ab(bb)*' ), context='odd_b', limit=limit ) as machine: for i,(m,s) in enumerate( machine.run( source=source, data=data )): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data ) assert i == 10 assert source.sent == 10 assert ( data.odd_b.input.tostring() if sys.version_info[0] < 3 else data.odd_b.input.tounicode() ) == str( 'a'+'b'*9 )
def logix_test_once( obj, req ): req_source = cpppo.peekable( req ) req_data = cpppo.dotdict() with obj.parser as machine: for m,s in machine.run( source=req_source, data=req_data ): pass if log.isEnabledFor( logging.NORMAL ): log.normal( "Logix Request parsed: %s", enip.enip_format( req_data )) # If we ask a Logix Object to process the request, it should respond. processed = obj.request( req_data ) if log.isEnabledFor( logging.NORMAL ): log.normal( "Logix Request processed: %s", enip.enip_format( req_data )) # And, the same object should be able to parse the request's generated reply rpy_source = cpppo.peekable( bytes( req_data.input )) rpy_data = cpppo.dotdict() with obj.parser as machine: for i,(m,s) in enumerate( machine.run( source=rpy_source, data=rpy_data )): if log.isEnabledFor( logging.INFO ): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, rpy_source.sent, rpy_source.peek(), rpy_data ) if log.isEnabledFor( logging.NORMAL ): log.normal( "Logix Reply processed: %s", enip.enip_format( rpy_data )) return processed,req_data,rpy_data
def multiple(self, request, path=None, route_path=None, send_path=None, timeout=None, send=True): assert isinstance(request, list), "A Multiple Service Packet requires a request list" req = cpppo.dotdict() if path: req.path = {"segment": [cpppo.dotdict(d) for d in path]} req.multiple = {"request": request} if send: self.unconnected_send(request=req, route_path=route_path, send_path=send_path, timeout=timeout) return req
def read(self, path, elements=1, offset=0, route_path=None, send_path=None, timeout=None, send=True): req = cpppo.dotdict() req.path = {"segment": [cpppo.dotdict(d) for d in path]} if offset is None: req.read_tag = {"elements": elements} else: req.read_frag = {"elements": elements, "offset": offset} if send: self.unconnected_send(request=req, route_path=route_path, send_path=send_path, timeout=timeout) return req
def test_decide(): """Allow state transition decisions based on collected context other than just the next source symbol. """ e = cpppo.state("enter") e["a"] = a = cpppo.state_input("a", context="a") a[" "] = s1 = cpppo.state_drop("s1") s1[" "] = s1 s1[None] = i1 = cpppo.integer("i1", context="i1") i1[" "] = s2 = cpppo.state_drop("s2") s2[" "] = s2 s2[None] = i2 = cpppo.integer("i2", context="i2") less = cpppo.state("less", terminal=True) greater = cpppo.state("greater", terminal=True) equal = cpppo.state("equal", terminal=True) i2[None] = cpppo.decide("isless", less, predicate=lambda machine, source, path, data: data.i1 < data.i2) i2[None] = cpppo.decide("isgreater", greater, predicate=lambda machine, source, path, data: data.i1 > data.i2) i2[None] = equal source = cpppo.peekable(str("a 1 2")) data = cpppo.dotdict() with cpppo.dfa("comparo", initial=e) as comparo: for i, (m, s) in enumerate(comparo.run(source=source, data=data)): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data, ) assert i == 11 assert s is less source = cpppo.peekable(str("a 33 33")) data = cpppo.dotdict() with cpppo.dfa("comparo", initial=e) as comparo: for i, (m, s) in enumerate(comparo.run(source=source, data=data)): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data, ) assert i == 13 assert s is equal
def __init__(self, template, template_directory, args): self.config = EnipConfig(template) self.addr = self.config.server_addr self.port = self.config.server_port self.stopped = False self.connections = cpppo.dotdict() # all known tags self.tags = cpppo.dotdict() self.set_tags() logger.debug('ENIP server serial number: ' + self.config.serial_number) logger.debug('ENIP server product name: ' + self.config.product_name)
def tnet_server( conn, addr ): """Serve one tnet client 'til EOF; then close the socket""" source = cpppo.chainable() with tnet_machine( "tnet_%s" % addr[1] ) as tnet_mesg: eof = False while not eof: data = cpppo.dotdict() # Loop blocking for input, while we've consumed input from source since the last time. # If we hit this again without having used any input, we know we've hit a symbol # unacceptable to the state machine; stop for mch, sta in tnet_mesg.run( source=source, data=data ): if sta is not None: continue # Non-transition; check for input, blocking if non-terminal and none left. On # EOF, terminate early; this will raise a GeneratorExit. timeout = 0 if tnet_mesg.terminal or source.peek() is not None else None msg = network.recv( conn, timeout=timeout ) # blocking if msg is not None: eof = not len( msg ) log.info( "%s: recv: %5d: %s", tnet_mesg.name_centered(), len( msg ), "EOF" if eof else reprlib.repr( msg )) source.chain( msg ) if eof: break # Terminal state (or EOF). log.detail( "%s: byte %5d: data: %r", tnet_mesg.name_centered(), source.sent, data ) if tnet_mesg.terminal: res = json.dumps( data.tnet.type.input, indent=4, sort_keys=True ) conn.send(( res + "\n\n" ).encode( "utf-8" )) log.info( "%s done", tnet_mesg.name_centered() )
def __init__( self, host, port=44818, timeout=None, depth=None, multiple=None, gateway_class=client.connector, route_path=None, send_path=None, identity_default=None ): """Capture the desired I/O parameters for the target CIP Device. By default, the CIP Device will be identified using a List Identity request each time a CIP session is registered; provide a identity_default containing (at least) an attribute product_name == 'Some Product Name', to avoid this initial List Identity request (self.identity it will still be updated if .list_identity is invoked successfully). """ self.host = host self.port = port self.timeout = 5 if timeout is None else timeout self.depth = 2 if depth is None else depth self.multiple = 0 if multiple is None else multiple self.route_path = route_path self.send_path = send_path self.gateway_class = gateway_class self.gateway = None self.gateway_lock = threading.Lock() if isinstance( identity_default, cpppo.type_str_base ): identity_default = cpppo.dotdict( product_name = identity_default ) assert not identity_default or hasattr( identity_default, 'product_name' ) self.identity_default = identity_default self.identity = identity_default
def main(): """The basic examples in the README""" # Basic DFA that accepts ab+ E = cpppo.state( 'E' ) A = cpppo.state_input( 'A' ) B = cpppo.state_input( 'B', terminal=True ) E['a'] = A A['b'] = B B['b'] = B BASIC = cpppo.dfa( 'ab+', initial=E, context='basic' ) # Composite state machine accepting ab+, ignoring ,[ ]* separators ABP = cpppo.dfa( 'ab+', initial=E, terminal=True ) SEP = cpppo.state_drop( 'SEP' ) ABP[','] = SEP SEP[' '] = SEP SEP[None] = ABP CSV = cpppo.dfa( 'CSV', initial=ABP, context='csv' ) # A regular expression; he default dfa name is the regular expression itself. REGEX = cpppo.regex( initial='(ab+)((,[ ]*)(ab+))*', context='regex' ) data = cpppo.dotdict() for machine in [ BASIC, CSV, REGEX ]: path = machine.context() + '.input' # default for state_input data source = cpppo.peekable( str( 'abbbb, ab' )) with machine: for i,(m,s) in enumerate( machine.run( source=source, data=data )): print( "%s #%3d; next byte %3d: %-10.10r: %r" % ( m.name_centered(), i, source.sent, source.peek(), data.get(path) )) print( "Accepted: %r; remaining: %r\n" % ( data.get(path), ''.join( source ))) print( "Final: %r" % ( data ))
def test_tnet_string(): testvec = [ "The π character is called pi", ] successes = 0 for t in testvec: with tnet.tnet_machine() as tnsmach: path = "test_tnet" tns = tnetstrings.dump( t ) data = cpppo.dotdict() source = cpppo.peekable( tns ) for mch, sta in tnsmach.run( source=source, data=data, path=path ): log.info( "%s byte %5d: data: %r", misc.centeraxis( mch, 25, clip=True ), source.sent, data ) log.info("Parsing tnetstring:\n%s\n%s (byte %d)", repr(bytes(tns)), '-' * (len(repr(bytes(tns[:source.sent])))-1) + '^', source.sent ) if sta is None or not sta.terminal: # Ended in a non-terminal state log.info( "%s byte %5d: failure: data: %r; Not terminal; unrecognized", misc.centeraxis( tnsmach, 25, clip=True ), source.sent, data ) else: # Ended in a terminal state. if source.peek() is None: log.info( "%s byte %5d: success: data: %r", misc.centeraxis( tnsmach, 25, clip=True ), source.sent, data ) successes += 1 else: log.info( "%s byte %5d: failure: data: %r; Terminal, but TNET string wasn't consumed", misc.centeraxis( tnsmach, 25, clip=True ), source.sent, data ) assert successes == len( testvec )
def echo_server( conn, addr ): """Serve one echo client 'til EOF; then close the socket""" source = cpppo.chainable() with echo_machine( "echo_%s" % addr[1] ) as echo_line: eof = False while not eof: data = cpppo.dotdict() # See if a line has been recognized, stopping at terminal state. If this machine # is ended early due to an EOF, it should still terminate in a terminal state for mch, sta in echo_line.run( source=source, data=data ): if sta is not None: continue # Non-transition; check for input, blocking if non-terminal and none left. On # EOF, terminate early; this will raise a GeneratorExit. timeout = 0 if echo_line.terminal or source.peek() is not None else None msg = network.recv( conn, timeout=timeout ) if msg is not None: eof = not len( msg ) log.info( "%s recv: %5d: %s", echo_line.name_centered(), len( msg ), "EOF" if eof else cpppo.reprlib.repr( msg )) source.chain( msg ) if eof: break # Terminal state (or EOF). log.detail( "%s: byte %5d: data: %r", echo_line.name_centered(), source.sent, data ) if echo_line.terminal: conn.send( data.echo ) log.info( "%s done", echo_line.name_centered() )
def test_readme(): """The basic examples in the README""" # Basic DFA that accepts ab+ E = cpppo.state( "E" ) A = cpppo.state_input( "A" ) B = cpppo.state_input( "B", terminal=True ) E['a'] = A A['b'] = B B['b'] = B data = cpppo.dotdict() source = cpppo.peekable( str( 'abbbb,ab' )) with cpppo.dfa( initial=E ) as abplus: for i,(m,s) in enumerate( abplus.run( source=source, path="ab+", data=data )): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data ) assert i == 5 assert source.peek() == str(',') # Composite state machine accepting ab+, ignoring ,[ ]* separators CSV = cpppo.dfa( "CSV", initial=E, terminal=True ) SEP = cpppo.state_drop( "SEP" ) CSV[','] = SEP SEP[' '] = SEP SEP[None] = CSV source = cpppo.peekable( str( 'abbbb, ab' )) with cpppo.dfa( initial=CSV ) as r2: for i,(m,s) in enumerate( r2.run( source=source, path="readme_CSV", data=data )): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data ) assert i == 14 assert source.peek() is None
def unconnected_send(self, request, route_path=None, send_path=None, timeout=None): if route_path is None: # Default to the CPU in chassis (link 0), port 1 route_path = [{"link": 0, "port": 1}] if send_path is None: # Default to the Connection Manager send_path = [{"class": 6}, {"instance": 1}] assert isinstance(request, dict) data = cpppo.dotdict() data.enip = {} data.enip.session_handle = self.session data.enip.options = 0 data.enip.status = 0 data.enip.sender_context = {} data.enip.sender_context.input = bytearray([0x00] * 8) data.enip.CIP = {} data.enip.CIP.send_data = {} sd = data.enip.CIP.send_data sd.interface = 0 sd.timeout = 0 sd.CPF = {} sd.CPF.item = [cpppo.dotdict(), cpppo.dotdict()] sd.CPF.item[0].type_id = 0 sd.CPF.item[1].type_id = 178 sd.CPF.item[1].unconnected_send = {} us = sd.CPF.item[1].unconnected_send us.service = 82 us.status = 0 us.priority = 5 us.timeout_ticks = 157 us.path = {"segment": [cpppo.dotdict(d) for d in send_path]} us.route_path = {"segment": [cpppo.dotdict(d) for d in route_path]} us.request = request log.detail("Client Unconnected Send: %s", enip.enip_format(data)) us.request.input = bytearray(logix.Logix.produce(us.request)) sd.input = bytearray(enip.CPF.produce(sd.CPF)) data.enip.input = bytearray(enip.CIP.produce(data.enip)) data.input = bytearray(enip.enip_encode(data.enip)) self.send(data.input, timeout=timeout) return data
def set_tags(self): typenames = { "BOOL": (parser.BOOL, 0, lambda v: bool(v)), "INT": (parser.INT, 0, lambda v: int(v)), "DINT": (parser.DINT, 0, lambda v: int(v)), "SINT": (parser.SINT, 0, lambda v: int(v)), "REAL": (parser.REAL, 0.0, lambda v: float(v)), "SSTRING": (parser.SSTRING, '', lambda v: str(v)), "STRING": (parser.STRING, '', lambda v: str(v)), } for t in self.config.dtags: tag_name = t.name tag_type = t.type tag_size = t.size assert tag_type in typenames, "Invalid tag type; must be one of %r" % list(typenames) tag_class, tag_default, f = typenames[tag_type] tag_value = f(t.value) tag_address = t.addr logger.debug("tag address: %s", tag_address) path, attribute = None, None if tag_address: # Resolve the @cls/ins/att, and optionally [elm] or /elm segments, elm, cnt = device.parse_path_elements('@' + tag_address) assert not cnt or cnt == 1, \ "A Tag may be specified to indicate a single element: %s" % (tag_address) path = {'segment': segments} cls, ins, att = device.resolve(path, attribute=True) assert ins > 0, "Cannot specify the Class' instance for a tag's address" elm = device.resolve_element(path) # Look thru defined tags for one assigned to same cls/ins/att (maybe different elm); # must be same type/size. for tn, te in dict.items(self.tags): if not te['path']: continue # Ignore tags w/o pre-defined path... if device.resolve(te['path'], attribute=True) == (cls, ins, att): assert te.attribute.parser.__class__ is tag_class and len(te.attribute) == tag_size, \ "Incompatible Attribute types for tags %r and %r" % (tn, tag_name) attribute = te.attribute break if not attribute: # No Attribute found attribute = device.Attribute(tag_name, tag_class, default = (tag_value if tag_size == 1 else [tag_value] * tag_size)) # Ready to create the tag and its Attribute (and error code to return, if any). If tag_size # is 1, it will be a scalar Attribute. Since the tag_name may contain '.', we don't want # the normal dotdict.__setitem__ resolution to parse it; use plain dict.__setitem__. logger.debug("Creating tag: %-14s%-10s %10s[%4d]", tag_name, '@' + tag_address if tag_address else '', attribute.parser.__class__.__name__, len(attribute)) tag_entry = cpppo.dotdict() tag_entry.attribute = attribute # The Attribute (may be shared by multiple tags) tag_entry.path = path # Desired Attribute path (may include element), or None tag_entry.error = 0x00 dict.__setitem__(self.tags, tag_name, tag_entry)
def test_IFACEADDRS(): data = cpppo.dotdict() data.ip_address = "10.161.1.5" data.network_mask = "255.255.255.0" data.gateway_address = "10.161.1.1" data.dns_primary = "8.8.8.8" data.dns_secondary = "8.8.4.4" data.domain_name = "acme.ca" source = parser.IFACEADDRS.produce(data) assert source == b"\n\xa1\x01\x05\xff\xff\xff\x00\n\xa1\x01\x01\x08\x08\x08\x08\x08\x08\x04\x04\x07\x00acme.ca\x00" result = cpppo.dotdict() with parser.IFACEADDRS() as machine: with contextlib.closing(machine.run(source=source, data=result)) as engine: for m, s in engine: pass assert result.IFACEADDRS == data
def logix_remote( count, svraddr, kwargs ): try: time.sleep(.25) # Wait for server to be established # Confirm that a known Register encodes as expected data = cpppo.dotdict() data.enip = {} data.enip.options = 0 data.enip.session_handle = 0 data.enip.status = 0 data.enip.sender_context = {} data.enip.sender_context.input = bytearray( [0x00] * 8 ) data.enip.CIP = {} data.enip.CIP.register = {} data.enip.CIP.register.options = 0 data.enip.CIP.register.protocol_version = 1 data.enip.input = bytearray( enip.CIP.produce( data.enip )) data.input = bytearray( enip.enip_encode( data.enip )) log.normal( "Register Request: %r" % data ) assert bytes( data.input ) == rss_004_request # Try to Register a real session, followed by commands timeout = 5 begun = cpppo.timer() cli = client.client( host=svraddr[0], port=svraddr[1] ) assert cli.writable( timeout=timeout ) elapsed = cpppo.timer() - begun log.normal( "Client Connected in %7.3f/%7.3fs" % ( elapsed, timeout )) begun = cpppo.timer() with cli: cli.register( timeout=timeout ) data,elapsed = client.await( cli, timeout=timeout ) log.normal( "Client Register Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data ) assert data is not None and 'enip.CIP.register' in data, "Failed to receive Register response" assert data.enip.status == 0, "Register response indicates failure: %s" % data.enip.status # Establish the EtherNet/IP "session handle" used by all further requests cli.session = data.enip.session_handle start = cpppo.timer() with cli: for _ in range( count ): begun = cpppo.timer() cli.read( path=[{'symbolic': 'SCADA'}, {'element': 12}], elements=201, offset=2, timeout=timeout ) data,elapsed = client.await( cli, timeout=timeout ) log.normal( "Client ReadFrg. Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data ) duration = cpppo.timer() - start log.warning( "Client ReadFrg. Average %7.3f TPS (%7.3fs ea)." % ( count / duration, duration / count )) log.normal( "Signal shutdown w/ server.control in object %s", id( kwargs['server']['control'] )) finally: kwargs['server']['control'].done= True # Signal the server to terminate
def port_fix( path=None, data=None, **kwds ): """Discard port values about 0x0F; return True (transition) if remaining port value is 0x0F (Optional Extended port)""" data[path].port &= 0x0F if data[path].port == 0x0F: # Port is extended; discard and prepare to collect new port number data[path].port = cpppo.dotdict() return True # Port is OK; don't transition return False
def port_fix(path=None, data=None, **kwds): """Discard port values about 0x0F; return True (transition) if remaining port value is 0x0F (Optional Extended port)""" data[path].port &= 0x0F if data[path].port == 0x0F: # Port is extended; discard and prepare to collect new port number data[path].port = cpppo.dotdict() return True # Port is OK; don't transition return False
def test_IFACEADDRS(): data = cpppo.dotdict() data.ip_address = "10.161.1.5" data.network_mask = "255.255.255.0" data.gateway_address = "10.161.1.1" data.dns_primary = "8.8.8.8" data.dns_secondary = "8.8.4.4" data.domain_name = "acme.ca" source = parser.IFACEADDRS.produce(data) assert source == b'\n\xa1\x01\x05\xff\xff\xff\x00\n\xa1\x01\x01\x08\x08\x08\x08\x08\x08\x04\x04\x07\x00acme.ca\x00' result = cpppo.dotdict() with parser.IFACEADDRS() as machine: with contextlib.closing(machine.run(source=source, data=result)) as engine: for m, s in engine: pass assert result.IFACEADDRS == data
def start(self, host, port): srv_ctl = cpppo.dotdict() srv_ctl.control = cpppo.apidict(timeout=self.config.timeout) srv_ctl.control['done'] = False srv_ctl.control['disable'] = False srv_ctl.control.setdefault('latency', self.config.latency) options = cpppo.dotdict() options.setdefault('enip_process', logix.process) kwargs = dict(options, tags=self.tags, server=srv_ctl) tcp_mode = True if self.config.mode == 'tcp' else False udp_mode = True if self.config.mode == 'udp' else False logger.debug('ENIP server started on: %s:%d, mode: %s' % (host, port, self.config.mode)) while not self.stopped: network.server_main(address=(host, port), target=self.handle, kwargs=kwargs, idle_service=None, udp=udp_mode, tcp=tcp_mode, thread_factory=network.server_thread)
def test_decide(): """Allow state transition decisions based on collected context other than just the next source symbol. """ e = cpppo.state( "enter" ) e['a'] = a = cpppo.state_input( "a", context='a' ) a[' '] = s1 = cpppo.state_drop( "s1" ) s1[' '] = s1 s1[None] = i1 = cpppo.integer( "i1", context='i1' ) i1[' '] = s2 = cpppo.state_drop( "s2" ) s2[' '] = s2 s2[None] = i2 = cpppo.integer( "i2", context='i2' ) less = cpppo.state( "less", terminal=True ) greater = cpppo.state( "greater", terminal=True ) equal = cpppo.state( "equal", terminal=True ) i2[None] = cpppo.decide( "isless", less, predicate=lambda machine,source,path,data: data.i1 < data.i2 ) i2[None] = cpppo.decide( "isgreater", greater, predicate=lambda machine,source,path,data: data.i1 > data.i2) i2[None] = equal source = cpppo.peekable( str('a 1 2') ) data = cpppo.dotdict() with cpppo.dfa( "comparo", initial=e ) as comparo: for i,(m,s) in enumerate( comparo.run( source=source, data=data )): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data ) assert i == 12 assert s is less source = cpppo.peekable( str('a 33 33') ) data = cpppo.dotdict() with cpppo.dfa( "comparo", initial=e ) as comparo: for i,(m,s) in enumerate( comparo.run( source=source, data=data )): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data ) assert i == 14 assert s is equal
def test_once(): source = cpppo.peekable( req_1 ) data = cpppo.dotdict() with Obj.parser as machine: for m,w in machine.run( source=source, data=data ): pass log.normal( "Logix Request parsed: %s", enip.enip_format( data )) # If we ask a Logix Object to process the request, it should respond. processed = Obj.request( data ) log.normal( "Logix Request processed: %s", enip.enip_format( data )) return processed, data
def test_IPADDR(): # IP addresses are expressed as Network byte-ordered UDINTs, on the wire source = parser.IPADDR.produce("10.0.0.1") assert source == b"\x0A\x00\x00\x01" # But, we parse them as Network byte-ordered UDINTs and present them as IP addresses result = cpppo.dotdict() with parser.IPADDR() as machine: with contextlib.closing(machine.run(source=source, data=result)) as engine: for m, s in engine: if s is None: assert m.terminal assert result.IPADDR == "10.0.0.1"
def write( self, path, data, elements=1, offset=0, tag_type=enip.INT.tag_type, route_path=None, send_path=None, timeout=None, send=True, ): req = cpppo.dotdict() req.path = {"segment": [cpppo.dotdict(d) for d in path]} if offset is None: req.write_tag = {"elements": elements, "data": data, "type": tag_type} else: req.write_frag = {"elements": elements, "offset": offset, "data": data, "type": tag_type} if send: self.unconnected_send(request=req, route_path=route_path, send_path=send_path, timeout=timeout) return req
def test_once(): source = cpppo.peekable(req_1) data = cpppo.dotdict() with Obj.parser as machine: for m, w in machine.run(source=source, data=data): pass log.normal("Logix Request parsed: %s", enip.enip_format(data)) # If we ask a Logix Object to process the request, it should respond. processed = Obj.request(data) log.normal("Logix Request processed: %s", enip.enip_format(data)) return processed, data
def test_struct(): dtp = cpppo.type_bytes_array_symbol abt = cpppo.type_bytes_iter ctx = 'val' a = cpppo.state_input( "First", alphabet=abt, typecode=dtp, context=ctx ) a[True] = b = cpppo.state_input( "Second", alphabet=abt, typecode=dtp, context=ctx ) b[True] = c = cpppo.state_input( "Third", alphabet=abt, typecode=dtp, context=ctx ) c[True] = d = cpppo.state_input( "Fourth", alphabet=abt, typecode=dtp, context=ctx ) d[None] = cpppo.state_struct( "int32", context=ctx, format=str("<i"), terminal=True ) machine = cpppo.dfa( initial=a ) with machine: material = b'\x01\x02\x03\x80\x99' segment = 3 source = cpppo.chainable() log.info( "States; %r input, by %d", material, segment ) inp = None data = cpppo.dotdict() path = "struct" sequence = machine.run( source=source, path=path, data=data ) for num in range( 10 ): try: mch,sta = next( sequence ) inp = source.peek() except StopIteration: inp = source.peek() log.info( "%s <- %-10.10r test done", cpppo.centeraxis( mch, 25, clip=True ), inp ) break log.info( "%s <- %-10.10r test rcvd", cpppo.centeraxis( mch, 25, clip=True ), inp ) if sta is None: log.info( "%s <- %-10.10r test no next state", cpppo.centeraxis( mch, 25, clip=True ), inp ) if inp is None: if not material: log.info( "%s <- %-10.10r test source finished", cpppo.centeraxis( mch, 25, clip=True ), inp ) # Will load consecutive empty iterables; chainable must handle source.chain( material[:segment] ) material = material[segment:] inp = source.peek() log.info( "%s <- %-10.10r test chain", cpppo.centeraxis( mch, 25, clip=True ), inp ) if num == 0: assert inp == b'\x01'[0]; assert sta.name == "First" if num == 1: assert inp == b'\x02'[0]; assert sta.name == "Second" if num == 2: assert inp == b'\x03'[0]; assert sta.name == "Third" if num == 3: assert inp == b'\x80'[0]; assert sta is None if num == 4: assert inp == b'\x80'[0]; assert sta.name == "Fourth" if num == 5: assert inp == b'\x99'[0]; assert sta.name == "int32" if num == 6: assert inp == b'\x99'[0]; assert sta.name == "int32" assert inp == b'\x99'[0] assert num == 6 assert sta.name == "int32" assert data.struct.val == -2147286527
def test_IPADDR(): # IP addresses are expressed as Network byte-ordered UDINTs, on the wire source = parser.IPADDR.produce('10.0.0.1') assert source == b'\x0A\x00\x00\x01' # But, we parse them as Network byte-ordered UDINTs and present them as IP addresses result = cpppo.dotdict() with parser.IPADDR() as machine: with contextlib.closing(machine.run(source=source, data=result)) as engine: for m, s in engine: if s is None: assert m.terminal assert result.IPADDR == '10.0.0.1'
def unconnected_send(self, path, route_path=None, send_path=None, timeout=None, read_frag=None, write_frag=None): if route_path is None: # Default to the CPU in chassis (link 0), port 1 route_path = [{'link': 0, 'port': 1}] if send_path is None: # Default to the Connection Manager send_path = [{'class': 6}, {'instance': 1}] assert isinstance(path, list) data = cpppo.dotdict() data.enip = {} data.enip.session_handle = self.session data.enip.options = 0 data.enip.status = 0 data.enip.sender_context = {} data.enip.sender_context.input = bytearray([0x00] * 8) data.enip.CIP = {} data.enip.CIP.send_data = {} sd = data.enip.CIP.send_data sd.interface = 0 sd.timeout = 0 sd.CPF = {} sd.CPF.item = [cpppo.dotdict(), cpppo.dotdict()] sd.CPF.item[0].type_id = 0 sd.CPF.item[1].type_id = 178 sd.CPF.item[1].unconnected_send = {} us = sd.CPF.item[1].unconnected_send us.service = 82 us.status = 0 us.priority = 5 us.timeout_ticks = 157 us.path = {'segment': [cpppo.dotdict(d) for d in send_path]} us.route_path = {'segment': [cpppo.dotdict(d) for d in route_path]} us.request = {} us.request.path = {'segment': [cpppo.dotdict(d) for d in path]} if read_frag: us.request.read_frag = read_frag elif write_frag: us.request.write_frag = write_frag else: raise ValueError("Expected a Read/Write Tag [Fragmented] request") us.request.input = bytearray(logix.Logix.produce(us.request)) sd.input = bytearray(enip.CPF.produce(sd.CPF)) data.enip.input = bytearray(enip.CIP.produce(data.enip)) data.input = bytearray(enip.enip_encode(data.enip)) self.send(data.input, timeout=timeout) return data
def __init__(self, name=None, instance_id=None): """Create the instance (default to the next available instance_id). An instance_id of 0 holds the "class" attributes/commands. """ self.name = name or self.__class__.__name__ # Allocate and/or keep track of maximum instance ID assigned thus far. if instance_id is None: instance_id = self.__class__.max_instance + 1 if instance_id > self.__class__.max_instance: self.__class__.max_instance = instance_id self.instance_id = instance_id (log.normal if self.instance_id else log.info)( "%24s, Class ID 0x%04x, Instance ID %3d created", self, self.class_id, self.instance_id) instance = lookup(self.class_id, instance_id) assert instance is None, \ "CIP Object class %x, instance %x already exists" % ( self.class_id, self.instance_id ) # # directory.1.2.None == self # self.attribute == directory.1.2 (a dotdict), for direct access of our attributes # self.attribute = directory.setdefault( str(self.class_id) + '.' + str(instance_id), cpppo.dotdict()) self.attribute['0'] = self # Check that the class-level instance (0) has been created; if not, we'll create one using # the default parameters. If this isn't appropriate, then the user should create it using # the appropriate parameters. if lookup(self.class_id, 0) is None: self.__class__(name='meta-' + self.name, instance_id=0) if self.instance_id == 0: # Set up the default Class-level values. self.attribute['1'] = Attribute('Revision', INT, default=0) self.attribute['2'] = MaxInstance('Max Instance', INT, class_id=self.class_id) self.attribute['3'] = NumInstances('Num Instances', INT, class_id=self.class_id) # A UINT array; 1st UINT is size (default 0) self.attribute['4'] = Attribute('Optional Attributes', INT, default=0)
def test_limit(): # Force a limit on input symbols. If we only accept only even b's, we'll # fail if we force a stoppage at a+b*9 source = cpppo.peekable(str('a' + 'b' * 100)) data = cpppo.dotdict() try: with cpppo.regex(initial=str('a(bb)*'), context='even_b', limit=10) as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) except cpppo.NonTerminal: assert i == 10 assert source.sent == 10 else: assert False, "Should have failed with a cpppo.NonTerminal exception" # But odd b's OK for limit in [ 10, '..somewhere.ten', lambda **kwds: 10, lambda path=None, data=None, **kwds: data[path + '..somewhere.ten'] ]: source = cpppo.peekable(str('a' + 'b' * 100)) data = cpppo.dotdict() data['somewhere.ten'] = 10 with cpppo.regex(initial=str('ab(bb)*'), context='odd_b', limit=limit) as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 10 assert source.sent == 10 assert (data.odd_b.input.tostring() if sys.version_info[0] < 3 else data.odd_b.input.tounicode()) == str('a' + 'b' * 9)
def decoding(self): """Returns connection parameters as a dict, along with any self.other kwds associated with the connection (eg. RPI, connection_ID, ...) """ parameters = cpppo.dotdict( size=self._NCP & (0xFFFF if self._large else 0x01FF), variable=0b01 & self._NCP >> (9 + (16 if self._large else 0)), priority=0b11 & self._NCP >> (10 + (16 if self._large else 0)), type=0b11 & self._NCP >> (13 + (16 if self._large else 0)), redundant=0b01 & self._NCP >> (15 + (16 if self._large else 0)), large=self._large, NCP=self._NCP, ) parameters.update(self.other) return parameters
def __init__(self, large=None, size=None, variable=None, priority=None, type=None, redundant=None, NCP=None, **kwds): # Save other supplied connection parameters (eg. RPI, API, connection_ID, ...) self.other = cpppo.dotdict(kwds) if large is None: self._large = bool(size and size > 0x1FF) or bool(NCP and NCP > 0xFFFF) else: self._large = bool(large) assert size is None or 0x0 < size <= ( 0xFFFF if self._large else 0x1FF ), \ "Connection size {size!r} invalid".format( size=size ) assert variable is None or 0b00 <= variable <= 0b01, \ "Connection variable {variable!r} invalid".format( variable=variable ) assert priority is None or 0b00 <= priority <= 0b11, \ "Connection priority {priority!r} invalid".format( priority=priority ) assert type is None or 0b00 <= type <= 0b11, \ "Connection type {type!r} invalid".format( type=type ) assert redundant is None or 0b00 <= redundant <= 0b01, \ "Connection redundant {redundant!r} invalid".format( redundant=redundant ) # If the connection is fully specified (no defaults used), we'll demand # any provided NCP value to match exactly! specificity = {size, variable, type, redundant, priority} if NCP is None or None not in specificity: # Either no NCP provided, *or* the connection parameters are fully specified self._NCP = ( (((variable or 1) << 9) + ((priority or 0) << 10) + ((type or 2) << 13) + ((redundant or 0) << 15)) << (16 if self._large else 0)) + (size or (4000 if self._large else 500)) if NCP is not None: assert NCP == self._NCP, \ "Supplied NCP: {NCP!r} doesn't match one deduced: {self._NCP} from supplied parameters".format( self=self, NCP=NCP ) else: # No NCP provided, and/or some parameters not specified self._NCP = NCP
def test_regex_demo(): regex = str( '(ab+)((,[ ]*)(ab+))*' ) machine = cpppo.regex( name=str( 'demo' ), initial=regex ) data = cpppo.dotdict() with machine: source = cpppo.chainable( str( 'abbb, abb, ab' )) for i,(m,s) in enumerate( machine.run( source=source, data=data )): log.info( "%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data ) assert i == 14 assert source.sent == 13 regexstr, lego, machine, initial = cpppo.state_input.from_regex( regex, alphabet=cpppo.type_str_iter, encoder=None, typecode=cpppo.type_str_array_symbol, context=None ) assert str( lego ) == "ab+(, *ab+)*" assert str( machine ) == """\
def unconnected_send( self, path, route_path=None, send_path=None, timeout=None, read_frag=None, write_frag=None ): if route_path is None: # Default to the CPU in chassis (link 0), port 1 route_path = [{'link': 0, 'port': 1}] if send_path is None: # Default to the Connection Manager send_path = [{'class': 6}, {'instance': 1}] assert isinstance( path, list ) data = cpppo.dotdict() data.enip = {} data.enip.session_handle= self.session data.enip.options = 0 data.enip.status = 0 data.enip.sender_context= {} data.enip.sender_context.input = bytearray( [0x00] * 8 ) data.enip.CIP = {} data.enip.CIP.send_data = {} sd = data.enip.CIP.send_data sd.interface = 0 sd.timeout = 0 sd.CPF = {} sd.CPF.item = [ cpppo.dotdict(), cpppo.dotdict() ] sd.CPF.item[0].type_id = 0 sd.CPF.item[1].type_id = 178 sd.CPF.item[1].unconnected_send = {} us = sd.CPF.item[1].unconnected_send us.service = 82 us.status = 0 us.priority = 5 us.timeout_ticks = 157 us.path = { 'segment': [ cpppo.dotdict( d ) for d in send_path ]} us.route_path = { 'segment': [ cpppo.dotdict( d ) for d in route_path ]} us.request = {} us.request.path = { 'segment': [ cpppo.dotdict( d ) for d in path ]} if read_frag: us.request.read_frag= read_frag elif write_frag: us.request.write_frag= write_frag else: raise ValueError( "Expected a Read/Write Tag [Fragmented] request" ) us.request.input = bytearray( logix.Logix.produce( us.request )) sd.input = bytearray( enip.CPF.produce( sd.CPF )) data.enip.input = bytearray( enip.CIP.produce( data.enip )) data.input = bytearray( enip.enip_encode( data.enip )) self.send( data.input, timeout=timeout ) return data
def __init__( self, name=None, instance_id=None ): """Create the instance (default to the next available instance_id). An instance_id of 0 holds the "class" attributes/commands. """ self.name = name or self.__class__.__name__ # Allocate and/or keep track of maximum instance ID assigned thus far. if instance_id is None: instance_id = self.__class__.max_instance + 1 if instance_id > self.__class__.max_instance: self.__class__.max_instance = instance_id self.instance_id = instance_id ( log.normal if self.instance_id else log.info )( "%24s, Class ID 0x%04x, Instance ID %3d created", self, self.class_id, self.instance_id ) instance = lookup( self.class_id, instance_id ) assert instance is None, \ "CIP Object class %x, instance %x already exists" % ( self.class_id, self.instance_id ) # # directory.1.2.None == self # self.attribute == directory.1.2 (a dotdict), for direct access of our attributes # self.attribute = directory.setdefault( str( self.class_id )+'.'+str( instance_id ), cpppo.dotdict() ) self.attribute['0'] = self # Check that the class-level instance (0) has been created; if not, we'll create one using # the default parameters. If this isn't appropriate, then the user should create it using # the appropriate parameters. if lookup( self.class_id, 0 ) is None: self.__class__( name='meta-'+self.name, instance_id=0 ) if self.instance_id == 0: # Set up the default Class-level values. self.attribute['1']= Attribute( 'Revision', INT, default=0 ) self.attribute['2']= MaxInstance( 'Max Instance', INT, class_id=self.class_id ) self.attribute['3']= NumInstances( 'Num Instances', INT, class_id=self.class_id ) # A UINT array; 1st UINT is size (default 0) self.attribute['4']= Attribute( 'Optional Attributes', INT, default=0 )
def register(self, timeout=None): data = cpppo.dotdict() data.enip = {} data.enip.session_handle = 0 data.enip.options = 0 data.enip.status = 0 data.enip.sender_context = {} data.enip.sender_context.input = bytearray([0x00] * 8) data.enip.CIP = {} data.enip.CIP.register = {} data.enip.CIP.register.options = 0 data.enip.CIP.register.protocol_version = 1 data.enip.input = bytearray(enip.CIP.produce(data.enip)) data.input = bytearray(enip.enip_encode(data.enip)) self.send(data.input, timeout=timeout) return data
def test_EPATH_single(): data = cpppo.dotdict() source = b'\x12\x0810.0.7.1' with parser.EPATH_single() as machine: with contextlib.closing(machine.run(source=source, data=data)) as engine: for m, s in engine: pass assert data.EPATH_single.segment == [{"port": 2, "link": "10.0.7.1"}] result = parser.EPATH_single.produce(data.EPATH_single) assert result == source # Try out an "extended" 16-bit port number # [1F][SS][PPPP]'123.123.123.123'[00] port 0xPPPP, link address '123.123.123.123' (pad if size SS odd) data.EPATH_single.segment[0]['port'] = 0x56CE # 22222 result = parser.EPATH_single.produce(data.EPATH_single) assert result == b'\x1F\x08\xCE\x5610.0.7.1'
def test_tnet(): testvec = [ "The π character is called pi", ] successes = 0 for t in testvec: with tnet.tnet_machine() as tnsmach: path = "test_tnet" tns = tnetstrings.dump(t) data = cpppo.dotdict() source = cpppo.peekable(tns) for mch, sta in tnsmach.run(source=source, data=data, path=path): log.info("%s byte %5d: data: %r", misc.centeraxis(mch, 25, clip=True), source.sent, data) log.info("Parsing tnetstring:\n%s\n%s (byte %d)", repr(bytes(tns)), '-' * (len(repr(bytes(tns[:source.sent]))) - 1) + '^', source.sent) if sta is None: break if sta is None: # Ended in a non-terminal state log.info( "%s byte %5d: failure: data: %r; Not terminal; unrecognized", misc.centeraxis(tnsmach, 25, clip=True), source.sent, data) else: # Ended in a terminal state. if source.peek() is None: log.info("%s byte %5d: success: data: %r", misc.centeraxis(tnsmach, 25, clip=True), source.sent, data) successes += 1 else: log.info( "%s byte %5d: failure: data: %r; Terminal, but TNET string wasn't consumed", misc.centeraxis(tnsmach, 25, clip=True), source.sent, data) assert successes == len(testvec)
def __init__(self, host, port=44818, timeout=None, depth=None, multiple=None, gateway_class=None, route_path=None, send_path=None, priority_time_tick=None, timeout_ticks=None, identity_default=None, dialect=None, **gateway_kwds): """Capture the desired I/O parameters for the target CIP Device. By default, the CIP Device will be identified using a List Identity request each time a CIP session is registered; provide a identity_default containing (at least) an attribute product_name == 'Some Product Name', to avoid this initial List Identity request (self.identity it will still be updated if .list_identity is invoked successfully). """ self.host = host self.port = port self.timeout = 5 if timeout is None else timeout self.depth = 2 if depth is None else depth self.multiple = 0 if multiple is None else multiple self.route_path = route_path self.send_path = send_path self.priority_time_tick = priority_time_tick self.timeout_ticks = timeout_ticks self.gateway_kwds = gateway_kwds # Any additional args to gateway self.gateway_class = client.connector if gateway_class is None else gateway_class self.gateway = None self.gateway_lock = threading.Lock() if isinstance(identity_default, cpppo.type_str_base): identity_default = cpppo.dotdict(product_name=identity_default) assert not identity_default or hasattr(identity_default, 'product_name') self.identity_default = identity_default self.identity = identity_default self.dialect = dialect
def test_tnet_machinery(): # parsing integers path = "machinery" SIZE = cpppo.integer_bytes(name="SIZE", context="size", terminal=True) data = cpppo.dotdict() source = cpppo.chainable(b'123:') with SIZE: for m, s in SIZE.run(source=source, data=data, path=path): if s is None: break log.info("After SIZE: %r", data) assert SIZE.terminal assert data.machinery.size == 123 # repeat, limited by parent context's 'value' in data DATA = tnet.data_parser(name="DATA", context="data", repeat="..size") source.chain(b"abc" * 123) with DATA: for m, s in DATA.run(source=source, data=data, path=path): if s is None: break log.info("After DATA: %r", data)
def test_tnet_machinery(): # parsing integers path = "machinery" SIZE = cpppo.integer_bytes( name="SIZE", context="size", terminal=True ) data = cpppo.dotdict() source = cpppo.chainable( b'123:' ) with SIZE: for m,s in SIZE.run( source=source, data=data, path=path ): if s is None: break log.info( "After SIZE: %r", data ) assert SIZE.terminal assert data.machinery.size == 123 # repeat, limited by parent context's 'value' in data DATA = tnet.data_parser( name="DATA", context="data", repeat="..size" ) source.chain( b"abc" * 123 ) with DATA: for m,s in DATA.run( source=source, data=data, path=path ): if s is None: break log.info( "After DATA: %r", data )
def tnet_server(conn, addr): """Serve one tnet client 'til EOF; then close the socket""" source = cpppo.chainable() with tnet_machine("tnet_%s" % addr[1]) as tnet_mesg: eof = False while not eof: data = cpppo.dotdict() # Loop blocking for input, while we've consumed input from source since the last time. # If we hit this again without having used any input, we know we've hit a symbol # unacceptable to the state machine; stop for mch, sta in tnet_mesg.run(source=source, data=data): if sta is not None: continue # Non-transition; check for input, blocking if non-terminal and none left. On # EOF, terminate early; this will raise a GeneratorExit. timeout = 0 if tnet_mesg.terminal or source.peek( ) is not None else None msg = network.recv(conn, timeout=timeout) # blocking if msg is not None: eof = not len(msg) log.info("%s: recv: %5d: %s", tnet_mesg.name_centered(), len(msg), "EOF" if eof else cpppo.reprlib.repr(msg)) source.chain(msg) if eof: break # Terminal state (or EOF). log.detail("%s: byte %5d: data: %r", tnet_mesg.name_centered(), source.sent, data) if tnet_mesg.terminal: res = json.dumps(data.tnet.type.input, indent=4, sort_keys=True) conn.send((res + "\n\n").encode("utf-8")) log.info("%s done", tnet_mesg.name_centered())
def test_readme(): """The basic examples in the README""" # Basic DFA that accepts ab+ E = cpppo.state("E") A = cpppo.state_input("A") B = cpppo.state_input("B", terminal=True) E['a'] = A A['b'] = B B['b'] = B data = cpppo.dotdict() source = cpppo.peekable(str('abbbb,ab')) with cpppo.dfa(initial=E) as abplus: for i, (m, s) in enumerate( abplus.run(source=source, path="ab+", data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 5 assert source.peek() == str(',') # Composite state machine accepting ab+, ignoring ,[ ]* separators CSV = cpppo.dfa("CSV", initial=E, terminal=True) SEP = cpppo.state_drop("SEP") CSV[','] = SEP SEP[' '] = SEP SEP[None] = CSV source = cpppo.peekable(str('abbbb, ab')) with cpppo.dfa(initial=CSV) as r2: for i, (m, s) in enumerate( r2.run(source=source, path="readme_CSV", data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 14 assert source.peek() is None
def main(): """The basic examples in the README""" # Basic DFA that accepts ab+ E = cpppo.state('E') A = cpppo.state_input('A') B = cpppo.state_input('B', terminal=True) E['a'] = A A['b'] = B B['b'] = B BASIC = cpppo.dfa('ab+', initial=E, context='basic') # Composite state machine accepting ab+, ignoring ,[ ]* separators ABP = cpppo.dfa('ab+', initial=E, terminal=True) SEP = cpppo.state_drop('SEP') ABP[','] = SEP SEP[' '] = SEP SEP[None] = ABP CSV = cpppo.dfa('CSV', initial=ABP, context='csv') # A regular expression; he default dfa name is the regular expression itself. REGEX = cpppo.regex(initial='(ab+)((,[ ]*)(ab+))*', context='regex') data = cpppo.dotdict() for machine in [BASIC, CSV, REGEX]: path = machine.context() + '.input' # default for state_input data source = cpppo.peekable(str('abbbb, ab')) with machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): print("%s #%3d; next byte %3d: %-10.10r: %r" % (m.name_centered(), i, source.sent, source.peek(), data.get(path))) print("Accepted: %r; remaining: %r\n" % (data.get(path), ''.join(source))) print("Final: %r" % (data))
def produce(cls, value): """Truncate or NUL-fill the provided .string to the given .length (if provided and not None). Then, emit the (one byte) length+string. Accepts either a {.length: ..., .string:... } dotdict, or a plain string. """ result = b'' if isinstance(value, cpppo.type_str_base): value = cpppo.dotdict({'string': value}) encoded = value.string.encode('iso-8859-1') # If .length doesn't exist or is None, set the length to the actual string length actual = len(encoded) desired = value.setdefault('length', actual) if desired is None: value.length = actual assert value.length < 256, "SSTRING must be < 256 bytes in length; %r" % value result += USINT.produce(value.length) result += encoded[:value.length] if actual < value.length: result += b'\x00' * (value.length - actual) return result
def test_logix_remote( count=100 ): """Performance of executing an operation a number of times on a socket connected Logix simulator, within the same Python interpreter (ie. all on a single CPU thread). """ svraddr = ('localhost', 12345) kwargs = cpppo.dotdict({ 'argv': [ #'-v', #'--log', '/tmp/logix.log', #'--profile', '/tmp/logix.prof', '--address', '%s:%d' % svraddr, 'SCADA=INT[1000]' ], 'server': { 'control': cpppo.apidict( enip.timeout, { 'done': False }), }, }) # This is sort of "inside-out". This thread will run logix_remote, which will signal the # enip.main (via the kwargs.server...) to shut down. However, to do line-based performance # measurement, we need to be running enip.main in the "Main" thread... logixthread = threading.Thread( target=logix_remote, kwargs={ 'count': count, 'svraddr': svraddr, 'kwargs': kwargs } ) logixthread.daemon = True logixthread.start() enip.main( **kwargs ) logixthread.join()
def test_logix_remote(count=100): """Performance of executing an operation a number of times on a socket connected Logix simulator, within the same Python interpreter (ie. all on a single CPU thread). """ svraddr = ('localhost', 12345) kwargs = cpppo.dotdict({ 'argv': [ #'-v', #'--log', '/tmp/logix.log', #'--profile', '/tmp/logix.prof', '--address', '%s:%d' % svraddr, 'SCADA=INT[1000]' ], 'server': { 'control': cpppo.apidict(enip.timeout, {'done': False}), }, }) # This is sort of "inside-out". This thread will run logix_remote, which will signal the # enip.main (via the kwargs.server...) to shut down. However, to do line-based performance # measurement, we need to be running enip.main in the "Main" thread... logixthread = threading.Thread(target=logix_remote, kwargs={ 'count': count, 'svraddr': svraddr, 'kwargs': kwargs }) logixthread.daemon = True logixthread.start() enip.main(**kwargs) logixthread.join()
def produce( cls, value ): """Truncate or NUL-fill the provided .string to the given .length (if provided and not None). Then, emit the (one byte) length+string. Accepts either a {.length: ..., .string:... } dotdict, or a plain string. """ result = b'' if isinstance( value, cpppo.type_str_base ): value = cpppo.dotdict( {'string': value } ) encoded = value.string.encode( 'iso-8859-1' ) # If .length doesn't exist or is None, set the length to the actual string length actual = len( encoded ) desired = value.setdefault( 'length', actual ) if desired is None: value.length = actual assert value.length < 256, "SSTRING must be < 256 bytes in length; %r" % value result += USINT.produce( value.length ) result += encoded[:value.length] if actual < value.length: result += b'\x00' * ( value.length - actual ) return result
def logix_remote(count, svraddr, kwargs): try: time.sleep(.25) # Wait for server to be established # Confirm that a known Register encodes as expected data = cpppo.dotdict() data.enip = {} data.enip.options = 0 data.enip.session_handle = 0 data.enip.status = 0 data.enip.sender_context = {} data.enip.sender_context.input = bytearray([0x00] * 8) data.enip.CIP = {} data.enip.CIP.register = {} data.enip.CIP.register.options = 0 data.enip.CIP.register.protocol_version = 1 data.enip.input = bytearray(enip.CIP.produce(data.enip)) data.input = bytearray(enip.enip_encode(data.enip)) log.normal("Register Request: %r" % data) assert bytes(data.input) == rss_004_request # Try to Register a real session, followed by commands timeout = 5 begun = cpppo.timer() cli = client.client(host=svraddr[0], port=svraddr[1]) assert cli.writable(timeout=timeout) elapsed = cpppo.timer() - begun log.normal("Client Connected in %7.3f/%7.3fs" % (elapsed, timeout)) begun = cpppo.timer() with cli: cli.register(timeout=timeout) data, elapsed = client. await (cli, timeout=timeout) log.normal("Client Register Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data) assert data is not None and 'enip.CIP.register' in data, "Failed to receive Register response" assert data.enip.status == 0, "Register response indicates failure: %s" % data.enip.status # Establish the EtherNet/IP "session handle" used by all further requests cli.session = data.enip.session_handle start = cpppo.timer() with cli: for _ in range(count): begun = cpppo.timer() cli.read(path=[{ 'symbolic': 'SCADA' }, { 'element': 12 }], elements=201, offset=2, timeout=timeout) data, elapsed = client. await (cli, timeout=timeout) log.normal("Client ReadFrg. Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data) duration = cpppo.timer() - start log.warning("Client ReadFrg. Average %7.3f TPS (%7.3fs ea)." % (count / duration, duration / count)) log.normal("Signal shutdown w/ server.control in object %s", id(kwargs['server']['control'])) finally: kwargs['server'][ 'control'].done = True # Signal the server to terminate
def test_logix_multiple(): """Test the Multiple Request Service. Ensure multiple requests can be successfully handled, and invalid tags are correctly rejected. The Logix is a Message_Router instance, and is expected to be at Class 2, Instance 1. Eject any non-Logix Message_Router that presently exist. """ enip.lookup_reset() # Flush out any existing CIP Objects for a fresh start Obj = logix.Logix(instance_id=1) # Create some Attributes to test, but mask the big ones from Get Attributes All. size = 1000 Obj_a1 = Obj.attribute['1'] = enip.device.Attribute( 'parts', enip.parser.DINT, default=[n for n in range(size)], mask=enip.device.Attribute.MASK_GA_ALL) Obj_a2 = Obj.attribute['2'] = enip.device.Attribute('ControlWord', enip.parser.DINT, default=[0, 0]) Obj_a3 = Obj.attribute['3'] = enip.device.Attribute( 'SCADA_40001', enip.parser.INT, default=[n for n in range(size)], mask=enip.device.Attribute.MASK_GA_ALL) Obj_a4 = Obj.attribute['4'] = enip.device.Attribute('number', enip.parser.REAL, default=0.0) # Set up a symbolic tag referencing the Logix Object's Attribute enip.device.symbol['parts'] = { 'class': Obj.class_id, 'instance': Obj.instance_id, 'attribute': 1 } enip.device.symbol['ControlWord'] \ = {'class': Obj.class_id, 'instance': Obj.instance_id, 'attribute':2 } enip.device.symbol['SCADA_40001'] \ = {'class': Obj.class_id, 'instance': Obj.instance_id, 'attribute':3 } enip.device.symbol['number'] \ = {'class': Obj.class_id, 'instance': Obj.instance_id, 'attribute':4 } assert len(Obj_a1) == size assert len(Obj_a3) == size assert len(Obj_a4) == 1 Obj_a1[0] = 42 Obj_a2[0] = 476 Obj_a4[0] = 1.0 # Ensure that the basic CIP Object requests work on a derived Class. for description, original, produced, parsed, result, response in GA_tests: request = cpppo.dotdict(original) log.warning("%s; request: %s", description, enip.enip_format(request)) encoded = Obj.produce(request) assert encoded == produced, "%s: Didn't produce correct encoded request: %r != %r" % ( description, encoded, produced) # Now, use the Message_Router's parser to decode the encoded bytes source = cpppo.rememberable(encoded) decoded = cpppo.dotdict() with Obj.parser as machine: for m, s in enumerate(machine.run(source=source, data=decoded)): pass for k, v in cpppo.dotdict(parsed).items(): assert decoded[ k] == v, "%s: Didn't parse expected value: %s != %r in %s" % ( description, k, v, enip.enip_format(decoded)) # Process the request into a reply, and ensure we get the expected result (some Attributes # are filtered from Get Attributes All; only a 2-element DINT array and a single REAL should # be produced) Obj.request(request) logging.warning("%s: reply: %s", description, enip.enip_format(request)) for k, v in cpppo.dotdict(result).items(): assert k in request and request[k] == v, \ "%s: Didn't result in expected response: %s != %r; got %r" % ( description, k, v, request[k] if k in request else "(not found)" ) # Finally, produce the encoded response encoded = Obj.produce(request) assert encoded == response, "%s: Didn't produce correct encoded response: %r != %r" % ( description, encoded, response) # Test that we correctly compute beg,end,endactual for various Read Tag Fragmented scenarios, # with 2-byte and 4-byte types. For the purposes of this test, we only look at path...elements. data = cpppo.dotdict() data.service = Obj.RD_FRG_RPY data.path = { 'segment': [cpppo.dotdict(d) for d in [ { 'element': 0 }, ]] } data.read_frag = {} data.read_frag.elements = 1000 data.read_frag.offset = 0 # Reply maximum size limited beg, end, endactual = Obj.reply_elements(Obj_a1, data, 'read_frag') assert beg == 0 and end == 125 and endactual == 1000 # DINT == 4 bytes beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert beg == 0 and end == 250 and endactual == 1000 # INT == 2 bytes data.read_frag.offset = 125 * 4 # OK, second request; begin after byte offset of first beg, end, endactual = Obj.reply_elements(Obj_a1, data, 'read_frag') assert beg == 125 and end == 250 and endactual == 1000 # DINT == 4 bytes # Request elements limited; 0 offset data.read_frag.elements = 30 data.read_frag.offset = 0 beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert beg == 0 and end == 30 and endactual == 30 # INT == 2 bytes # Request elements limited; +'ve offset data.read_frag.elements = 70 data.read_frag.offset = 80 beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert beg == 40 and end == 70 and endactual == 70 # INT == 2 bytes # Request limited by size of data provided (Write Tag [Fragmented]) data = cpppo.dotdict() data.service = Obj.WR_FRG_RPY data.path = { 'segment': [cpppo.dotdict(d) for d in [ { 'element': 0 }, ]] } data.write_frag = {} data.write_frag.data = [0] * 100 # 100 elements provided in this request data.write_frag.elements = 200 # Total request is to write 200 elements data.write_frag.offset = 16 # request starts 16 bytes in (8 INTs) beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'write_frag') assert beg == 8 and end == 108 and endactual == 200 # INT == 2 bytes # ... same, but lets say request started somewhere in the middle of the array data.path = { 'segment': [cpppo.dotdict(d) for d in [ { 'element': 222 }, ]] } beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'write_frag') assert beg == 8 + 222 and end == 108 + 222 and endactual == 200 + 222 # INT == 2 bytes # Ensure correct computation of (beg,end] that are byte-offset and data/size limited data = cpppo.dotdict() data.service = Obj.WR_FRG_RPY data.path = {'segment': []} data.write_frag = {} data.write_frag.data = [3, 4, 5, 6] data.write_frag.offset = 6 beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'write_frag') assert beg == 3 and end == 7 and endactual == 1000 # INT == 2 bytes # Trigger the error cases only accessible via write # Too many elements provided for attribute capacity data.write_frag.offset = (1000 - 3) * 2 try: beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'write_frag') assert False, "Should have raised Exception due to capacity" except Exception as exc: assert "capacity exceeded" in str(exc) data = cpppo.dotdict() data.service = Obj.RD_FRG_RPY data.path = {'segment': []} data.read_frag = {} data.read_frag.offset = 6 beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert beg == 3 and end == 253 and endactual == 1000 # INT == 2 bytes # And we should be able to read with an offset right up to the last element data.read_frag.offset = 1998 beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert beg == 999 and end == 1000 and endactual == 1000 # INT == 2 bytes # Trigger all the remaining error cases # Unknown service data.service = Obj.RD_FRG_REQ try: beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert False, "Should have raised Exception due to service" except Exception as exc: assert "unknown service" in str(exc) # Offset indivisible by element size data.service = Obj.RD_FRG_RPY data.read_frag.offset = 7 try: beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert False, "Should have raised Exception due to odd byte offset" except Exception as exc: assert "element boundary" in str(exc) # Initial element outside bounds data.read_frag.offset = 2000 try: beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert False, "Should have raised Exception due to initial element" except Exception as exc: assert "initial element invalid" in str(exc) # Ending element outside bounds data.read_frag.offset = 0 data.read_frag.elements = 1001 try: beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert False, "Should have raised Exception due to ending element" except Exception as exc: assert "ending element invalid" in str(exc) # Beginning element after ending (should be no way to trigger). This request doesn't specify an # element in the path, hence defaults to element 0, and asks for a number of elements == 2. # Thus, there is no 6-byte offset possible (a 2-byte offset is, though). data.read_frag.offset = 6 data.read_frag.elements = 2 try: beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert False, "Should have raised Exception due to ending element order" except Exception as exc: assert "ending element before beginning" in str(exc) data.read_frag.offset = 2 data.read_frag.elements = 2 beg, end, endactual = Obj.reply_elements(Obj_a3, data, 'read_frag') assert beg == 1 and end == 2 and endactual == 2 # INT == 2 bytes # Test an example valid multiple request data = cpppo.dotdict() data.multiple = {} data.multiple.request = [ cpppo.dotdict(), cpppo.dotdict(), cpppo.dotdict(), cpppo.dotdict(), cpppo.dotdict() ] req = data.multiple.request req[0].path = { 'segment': [cpppo.dotdict(d) for d in [{ 'symbolic': 'parts' }]] } req[0].read_tag = {} req[0].read_tag.elements = 1 req[1].path = { 'segment': [cpppo.dotdict(d) for d in [{ 'symbolic': 'ControlWord' }]] } req[1].read_tag = {} req[1].read_tag.elements = 1 req[2].path = { 'segment': [cpppo.dotdict(d) for d in [{ 'symbolic': 'number' }]] } req[2].read_tag = {} req[2].read_tag.elements = 1 req[3].path = { 'segment': [cpppo.dotdict(d) for d in [{ 'symbolic': 'number' }]] } req[3].write_tag = {} req[3].write_tag.elements = 1 req[3].write_tag.type = 0x00ca req[3].write_tag.data = [1.25] req[4].path = { 'segment': [cpppo.dotdict(d) for d in [{ 'symbolic': 'number' }]] } req[4].read_tag = {} req[4].read_tag.elements = 1 request = Obj.produce(data) req_1 = bytes( bytearray([ 0x0A, 0x02, 0x20, 0x02, 0x24, 0x01, 0x05, 0x00, 0x0c, 0x00, 0x18, 0x00, 0x2a, 0x00, 0x36, 0x00, 0x48, 0x00, 0x4C, 0x04, 0x91, 0x05, 0x70, 0x61, 0x72, 0x74, 0x73, 0x00, 0x01, 0x00, 0x4C, 0x07, 0x91, 0x0B, 0x43, 0x6F, 0x6E, 0x74, 0x72, 0x6F, 0x6C, 0x57, 0x6F, 0x72, 0x64, 0x00, 0x01, 0x00, b'L'[0], 0x04, 0x91, 0x06, b'n'[0], b'u'[0], b'm'[0], b'b'[0], b'e'[0], b'r'[0], 0x01, 0x00, b'M'[0], 0x04, 0x91, 0x06, b'n'[0], b'u'[0], b'm'[0], b'b'[0], b'e'[0], b'r'[0], 0xca, 0x00, 0x01, 0x00, 0x00, 0x00, 0xa0, 0x3f, b'L'[0], 0x04, 0x91, 0x06, b'n'[0], b'u'[0], b'm'[0], b'b'[0], b'e'[0], b'r'[0], 0x01, 0x00, ])) assert request == req_1, \ "Unexpected result from Multiple Request Service; got: \n%r\nvs.\n%r " % ( request, req_1 ) # Now, use the Message_Router's parser source = cpppo.rememberable(request) data = cpppo.dotdict() with Obj.parser as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): pass log.normal("Multiple Request: %s", enip.enip_format(data)) assert 'multiple' in data, \ "No parsed multiple found in data: %s" % enip.enip_format( data ) assert data.service == enip.device.Message_Router.MULTIPLE_REQ, \ "Expected a Multiple Request Service request: %s" % enip.enip_format( data ) assert data.multiple.number == 5, \ "Expected 5 requests in request.multiple: %s" % enip.enip_format( data ) # And ensure if we re-encode the parsed result, we get the original encoded request back assert Obj.produce(data) == req_1 # Process the request into a reply. Obj.request(data) log.normal("Multiple Response: %s", enip.enip_format(data)) assert data.service == enip.device.Message_Router.MULTIPLE_RPY, \ "Expected a Multiple Request Service reply: %s" % enip.enip_format( data ) rpy_1 = bytearray([ 0x8A, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0c, 0x00, 0x16, 0x00, 0x20, 0x00, 0x2a, 0x00, 0x2e, 0x00, 0xCC, 0x00, 0x00, 0x00, 0xC4, 0x00, 0x2A, 0x00, 0x00, 0x00, 0xCC, 0x00, 0x00, 0x00, 0xC4, 0x00, 0xDC, 0x01, 0x00, 0x00, 0xCC, 0x00, 0x00, 0x00, 0xCA, 0x00, 0x00, 0x00, 0x80, 0x3F, 0xcd, 0x00, 0x00, 0x00, 0xcc, 0x00, 0x00, 0x00, 0xca, 0x00, 0x00, 0x00, 0xa0, 0x3f, ]) assert data.input == rpy_1, \ "Unexpected reply from Multiple Request Service request; got: \n%r\nvs.\n%r " % ( data.input, rpy_1 ) # Now lets try some valid and invalid requests data = cpppo.dotdict() data.multiple = {} data.multiple.request = req = [cpppo.dotdict()] req[0].path = { 'segment': [cpppo.dotdict(d) for d in [{ 'symbolic': 'SCADA_40001' }]] } req[0].read_tag = {} req[0].read_tag.elements = 1 data.multiple.number = len(data.multiple.request) request = Obj.produce(data) req_good = bytearray([ 0x0A, 0x02, 0x20, 0x02, ord('$'), 0x01, 0x01, 0x00, 0x04, 0x00, 0x4C, 0x07, 0x91, 0x0b, ord('S'), ord('C'), ord('A'), ord('D'), ord('A'), ord('_'), ord('4'), ord('0'), ord('0'), ord('0'), ord('1'), 0x00, 0x01, 0x00, ]) assert request == req_good, \ "Unexpected result from Multiple Request Service request for SCADA_40001; got: \n%r\nvs.\n%r " % ( request, req_good ) Obj.request(data) rpy_good = bytearray([ 0x8A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0xCC, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x00, 0x00, ]) assert data.input == rpy_good, \ "Unexpected reply from Multiple Request Service request for SCADA_40001; got: \n%r\nvs.\n%r " % ( data.input, rpy_good ) # Add an invalid request data = cpppo.dotdict() data.multiple = {} data.multiple.request = req = [cpppo.dotdict(), cpppo.dotdict()] req[0].path = { 'segment': [cpppo.dotdict(d) for d in [{ 'symbolic': 'SCADA_40001' }]] } req[0].read_tag = {} req[0].read_tag.elements = 1 req[1].path = { 'segment': [cpppo.dotdict(d) for d in [{ 'symbolic': 'SCADA_40002' }]] } req[1].read_tag = {} req[1].read_tag.elements = 1 data.multiple.number = len(data.multiple.request) request = Obj.produce(data) req_bad = bytearray([ 0x0A, 0x02, 0x20, 0x02, ord('$'), 0x01, 0x02, 0x00, 0x06, 0x00, 0x18, 0x00, 0x4C, 0x07, 0x91, 0x0b, ord('S'), ord('C'), ord('A'), ord('D'), ord('A'), ord('_'), ord('4'), ord('0'), ord('0'), ord('0'), ord('1'), 0x00, 0x01, 0x00, 0x4C, 0x07, 0x91, 0x0b, ord('S'), ord('C'), ord('A'), ord('D'), ord('A'), ord('_'), ord('4'), ord('0'), ord('0'), ord('0'), ord('2'), 0x00, 0x01, 0x00, ]) assert request == req_bad, \ "Unexpected result from Multiple Request Service request for SCADA_40001/2; got: \n%r\nvs.\n%r " % ( request, req_bad ) Obj.request(data) rpy_bad = bytearray([ 0x8A, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x0e, 0x00, 0xCC, 0x00, 0x00, 0x00, 0xC3, 0x00, 0x00, 0x00, 0xCC, 0x00, 0x05, 0x01, # Status code 0x05 (invalid path) 0x00, 0x00, ]) assert data.input == rpy_bad, \ "Unexpected reply from Multiple Request Service request for SCADA_40001/2; got: \n%r\nvs.\n%r " % ( data.input, rpy_bad )
from cpppo.server import enip from cpppo.server.enip import logix, client log = logging.getLogger("enip.lgx") # Get Attribute[s] All/Single tests: description, original, produced, parsed, processed, response. # Ensure we can produce the encoded version from the original, and then check what we can parse from # the encoded, and finally what the result is. GA_tests = [ ( "Get Attribute Single 0x02/1/4", { 'get_attribute_single': True, 'path': { 'segment': [ cpppo.dotdict(s) for s in [{ 'class': 0x02 }, { 'instance': 1 }, { 'attribute': 4 }] ] }, }, b'\x0e\x03 \x02$\x010\x04', { "service": 0x0e, }, { "service": 0x8e,
import cpppo from cpppo.server import enip from cpppo.server.enip import logix, client log = logging.getLogger( "enip.lgx" ) # Get Attribute[s] All/Single tests: description, original, produced, parsed, processed, response. # Ensure we can produce the encoded version from the original, and then check what we can parse from # the encoded, and finally what the result is. GA_tests = [ ( "Get Attribute Single 0x02/1/4", { 'get_attribute_single': True, 'path': { 'segment': [ cpppo.dotdict( s ) for s in [ { 'class': 0x02 }, { 'instance': 1}, { 'attribute': 4 }]]}, }, b'\x0e\x03 \x02$\x010\x04', { "service": 0x0e, }, { "service": 0x8e, "get_attribute_single.data": [ 0, 0, 128, 63
def read_details( self, attributes ): """Assumes that self.gateway has been established; does not close_gateway on Exception. If you use this interface, ensure that you maintain the gateway (eg. ): via = proxy( 'hostname' ) with via: for val,(sts,(att,typ,uni) in via.read_details( [...] ): Read the specified CIP Tags/Attributes in the string or iterable 'attributes', using Read Tag [Fragmented] (returning the native type), or Get Attribute Single/All (converting it to the specified EtherNet/IP CIP type(s)). The reason iterables are used and a generator returned, is to allow the underlying cpppo.server.enip.client connector to aggregate multiple CIP operations using Multiple Service Packet requests and/or "pipeline" multiple requests in-flight, while receiving the results of earlier requests. The 'attributes' must be either a simple string Tag name (no Type, implying the use of *Logix Read Tag [Fragmented] service), eg: "Tag" or an iterable containing 2 or 3 values; a Tag/address, a type/types (may be None, to force Tag I/O), and an optional description (eg. Units) [ "Tag", ( "Tag", None, "kWh" ), ( "@1/1/1", "INT" ) ( "@1/1/1", "INT", "Hz" ) ( "@1/1", ( "INT", "INT", "INT", "INT", "INT", "DINT", "SSTRING", "USINT" )) ( "@1/1", ( "INT", "INT", "INT", "INT", "INT", "DINT", "SSTRING", "USINT" ), "Identity" ) ] Produces a generator yielding the corresponding sequence of results and details for the supplied 'attributes' iterable. Each individual request may succeed or fail with a non-zero status code (remember: status code 0x06 indicates successful return of a partial result). Upon successful I/O, a tuple containing the result value and details about the result (a status, and the attribute's details (address, type, and units)) corresponding to each of the supplied 'attributes' elements is yielded as a sequence. Each result value is always a list of values, or None if the request failed: ( ([0],(0, ("Tag", parser.INT, None))), ([1.23],(0, "Tag", parser.REAL, "kWh"))), ([1], (0, ("@1/1/1", parser.INT, None))), ([1], (0, ("@1/1/1", parser.INT, "Hz"))), ([1, 2, 3, 4, 5 6, "Something", 255], (0, ("@1/1", [ parser.INT, parser.INT, parser.INT, parser.INT, parser.INT, parser.DINT, parser.STRING, parser.USINT ], None ))), ([1, 2, 3, 4, 5 6, "Something", 255], (0, ("@1/1", [ parser.INT, parser.INT, parser.INT, parser.INT, parser.INT, parser.DINT, parser.STRING, parser.USINT ], "Identity" ))), ) The read_details API raises exception on failure to parse request, or result data type conversion problem. The simple 'read' API also raises an Exception on attribute access error, the return of failure status code. Not all of these strictly necessitate a closure of the EtherNet/IP CIP connection, but should be sufficiently rare (once configured) that they all must signal closure of the connection gateway (which is re-established on the next call for I/O). EXAMPLES proxy = enip_proxy( '10.0.1.2' ) try: with contextlib.closing( proxy.read( [ ("@1/1/7", "SSTRING") ] )) as reader: # CIP Device Name value = next( reader ) except Exception as exc: proxy.close_gateway( exc ) # If CPython (w/ reference counting) is your only target, you can use the simpler: proxy = enip_proxy( '10.0.1.2' ) try: value, = proxy.read( [ ("@1/1/7", "SSTRING") ] ) # CIP Device Name except Exception as exc: proxy.close_gateway( exc ) """ if isinstance( attributes, cpppo.type_str_base ): attributes = [ attributes ] def opp__att_typ_uni( i ): """Generate sequence containing the enip.client operation, and the original attribute specified, its type(s) (if any), and any description. Augment produced operation with data type (if known), to allow estimation of reply sizes (and hence, Multiple Service Packet use); requires cpppo>=3.8.1. Yields: (opp,(att,typ,dsc)) """ for a in i: assert self.is_request( a ), \ "Not a valid read/write target: %r" % ( a, ) try: # The attribute description is either a plain Tag, an (address, type), or an # (address, type, description) if is_listlike( a ): att,typ,uni = a if len( a ) == 3 else a+(None,) else: att,typ,uni = a,None,None # No conversion of data type if None; use a Read Tag [Fragmented]; works only # for [S]STRING/SINT/INT/DINT/REAL/BOOL. Otherwise, conversion of data type # desired; get raw data using Get Attribute Single. parser = client.parse_operations if typ is None else attribute_operations opp, = parser( ( att, ), route_path=device.parse_route_path( self.route_path ), send_path=self.send_path, priority_time_tick=self.priority_time_tick, timeout_ticks=self.timeout_ticks ) except Exception as exc: log.warning( "Failed to parse attribute %r; %s", a, exc ) raise # For read_tag.../get_attribute..., tag_type is never required; but, it is used (if # provided) to estimate data sizes for Multiple Service Packets. For # write_tag.../set_attribute..., the data has specified its data type, if not the # default (INT for write_tag, SINT for set_attribute). if typ is not None and not is_listlike( typ ) and 'tag_type' not in opp: t = typ if isinstance( typ, cpppo.type_str_base ): td = self.CIP_TYPES.get( t.strip().lower() ) if td is not None: t,d = td if hasattr( t, 'tag_type' ): opp['tag_type'] = t.tag_type log.detail( "Parsed attribute %r (type %r) into operation: %r", att, typ, opp ) yield opp,(att,typ,uni) def types_decode( types ): """Produce a sequence of type class,data-path, eg. (parser.REAL,"SSTRING.string"). If a user-supplied type (or None) is provided, data-path is None, and the type is passed. """ for t in typ if is_listlike( typ ) else [ typ ]: d = None # No data-path, if user-supplied type if isinstance( t, cpppo.type_str_base ): td = self.CIP_TYPES.get( t.strip().lower() ) assert td, "Invalid EtherNet/IP CIP type name %r specified" % ( t, ) t,d = td assert type( t ) in (type,type(None)), \ "Expected None or CIP type class, not %r" % ( t, ) yield t,d # Get duplicate streams; one to feed the the enip.client's connector.operate, and one for # post-processing based on the declared type(s). operations,attrtypes = itertools.tee( opp__att_typ_uni( attributes )) # Process all requests w/ the specified pipeline depth, Multiple Service Packet # configuration. The 'idx' is the EtherNet/IP CIP request packet index; 'i' is the # individual I/O request index (for indexing att/typ/operations). # # This Thread may block here attempting to gain exclusive access to the cpppo.dfa used # by the cpppo.server.enip.client connector. This uses a threading.Lock, which will raise # an exception on recursive use, but block appropriately on multi-Thread contention. # # assert not self.gateway.frame.lock.locked(), \ # "Attempting recursive read on %r" % ( self.gateway.frame, ) log.info( "Acquiring gateway connection: %s", "locked" if self.gateway.frame.lock.locked() else "available" ) with self.gateway as connection: # waits 'til any Thread's txn. completes for i,(idx,dsc,req,rpy,sts,val) in enumerate( connection.operate( ( opr for opr,_ in operations ), depth=self.depth, multiple=self.multiple, timeout=self.timeout )): log.detail( "%3d (pkt %3d) %16s %-12s: %r ", i, idx, dsc, sts or "OK", val ) opr,(att,typ,uni) = next( attrtypes ) if typ is None or sts not in (0,6) or val in (True,None): # No type conversion; just return whatever type produced by Read Tag. Also, if # failure status (OK if no error, or if just not all data could be returned), we # can't do any more with this value... Also, if actually a Write Tag or Set # Attribute ..., then val True/None indicates success/failure (no data returned). yield val,(sts,(att,typ,uni)) continue # Parse the raw data using the type (or list of types) desired. If one type, then # all data will be parsed using it. If a list, then the data will be sequentially # parsed using each type. Finally, the target data will be extracted from each # parsed item, and added to the result. For example, for the parsed SSTRING # # data = { "SSTRING": {"length": 3, "string": "abc"}} # # we just want to return data['SSTRING.string'] == "abc"; each recognized CIP type # has a data path which we'll use to extract just the result data. If a # user-defined type is supplied, of course we'll just return the full result. source = cpppo.peekable( bytes( bytearray( val ))) # Python2/3 compat. res = [] typ_is_list = is_listlike( typ ) typ_dat = list( types_decode( typ if typ_is_list else [typ] )) for t,d in typ_dat: with t() as machine: while source.peek() is not None: # More data available; keep parsing. data= cpppo.dotdict() for m,s in machine.run( source=source, data=data ): assert not ( s is None and source.peek() is None ), \ "Data exhausted before completing parsing a %s" % ( t.__name__, ) res.append( data[d] if d else data ) # If t is the only type, keep processing it 'til out of data... if len( typ_dat ) == 1: continue break typ_types = [t for t,_ in typ_dat] if typ_is_list else typ_dat[0][0] yield res,(sts,(att,typ_types,uni))
def test_decode(): # Test decode of regexes over bytes data. Operates in raw bytes symbols., works in Python 2/3. source = cpppo.peekable('π'.encode('utf-8')) data = cpppo.dotdict() with cpppo.string_bytes('pi', initial='.*', greedy=True, context='pi', decode='utf-8') as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 3 assert source.sent == 2 assert data.pi == 'π' if sys.version_info[0] < 3: # Test regexes over plain string data (no decode required). Force non-unicode (counteracts # import unicode_literals above). We can't use greenery.lego regexes on unicode data in # Python 2... source = cpppo.peekable(str('pi')) data = cpppo.dotdict() with cpppo.string('pi', initial='.*', greedy=True, context='pi') as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 3 assert source.sent == 2 assert data.pi == 'pi' else: # Test regexes over Python 3 unicode string data (no decode required). Operates in native # unicode symbols. source = cpppo.peekable('π') data = cpppo.dotdict() with cpppo.string('pi', initial='.*', greedy=True, context='pi') as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 2 assert source.sent == 1 assert data.pi == 'π' source = cpppo.peekable(str('123')) data = cpppo.dotdict() with cpppo.integer('value') as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 4 assert source.sent == 3 assert data.integer == 123 source = cpppo.peekable('123'.encode('ascii')) data = cpppo.dotdict() with cpppo.integer_bytes('value') as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 4 assert source.sent == 3 assert data.integer == 123 # Try using a integer (str) parser over bytes data. Works in Python 2, not so much in Python 3 try: source = cpppo.peekable('123'.encode('ascii')) data = cpppo.dotdict() with cpppo.integer('value') as machine: for i, (m, s) in enumerate(machine.run(source=source, data=data)): log.info("%s #%3d -> %10.10s; next byte %3d: %-10.10r: %r", m.name_centered(), i, s, source.sent, source.peek(), data) assert i == 4 assert source.sent == 3 assert data.integer == 123 assert sys.version_info[0] < 3, \ "Should have failed in Python3; str/bytes iterator both produce str/int" except AssertionError: assert not sys.version_info[0] < 3, \ "Shouldn't have failed in Python2; str/bytes iterator both produce str"
def test_codecs(): # In Python3, the greenery.fsm is able to handle the Unicode str type; under # Python2, it can sanely only handle the non-Unicode str type. if sys.version_info[0] < 3: return # Test parsing of greenery.fsm/lego regexes specified in Unicode. Then, # generate corresponding cpppo state machines that accept Unicode input # symbols, and byte input symbols. These tests will accept as much of the # input as matches the regular expression. texts = [ 'pi: π', 'abcdé\u4500123', 'This contains π,π and more πs', 'a 480Ω resistor', ] tests = [ ( '[^π]*(π[^π]*)+', True ), # Optional non-π's, followed by at least one string of π and non-π's ('[^π]*[^π]', False) # Any number of non-π, ending in a non-π ] for text in texts: for re, tr in tests: # First, convert the unicode regex to a state machine in unicode symbols. Only if both # the dfa and its sub-state are "terminal", will it be terminal. with cpppo.regex(name='pies', context="pies", initial=re, terminal=True) as pies: original = text source = cpppo.chainable(original) data = cpppo.dotdict() try: for mch, sta in pies.run(source=source, data=data): pass except cpppo.NonTerminal: pass accepted = pies.terminal and data.pies.input.tounicode( ) == original log.info("%s ends w/ re %s: %s: %r", pies.name_centered(), re, "string accepted" if accepted else "string rejected", data) # Each of these are greedy, and so run 'til the end of input (next state is None); they # collect the full input string, unless they run into a non-matching input. expected = tr == ('π' in text) assert accepted == expected for text in texts: # Then convert the unicode regex to a state machine in bytes symbols. # Our encoder generates 1 or more bytes for each unicode symbol. for re, tr in tests: original = text.encode('utf-8') # u'...' --> b'...' source = cpppo.chainable(original) data = cpppo.dotdict() with cpppo.regex(name='pies', context="pies", initial=re, terminal=True, regex_alphabet=int, regex_typecode='B', regex_encoder=lambda s: (b for b in s.encode('utf-8'))) as pies: try: for mch, sta in pies.run(source=source, data=data): pass except cpppo.NonTerminal: pass accepted = pies.terminal and data.pies.input.tobytes( ) == original log.detail( "%s ends w/ re: %s: %s: %r", pies.name_centered(), re, "string accepted" if accepted else "string rejected", data) expected = tr == ('π' in text) assert accepted == expected assert original.startswith(data.pies.input.tobytes())