class UDPTransport(object): """ Node communication using the UDP protocol. """ def __init__(self, host, port, protocol=None): self.protocol = protocol self.server = DatagramServer((host, port), handle=self.receive) self.server.start() self.host = self.server.server_host self.port = self.server.server_port def receive(self, data, host_port): # pylint: disable=unused-argument self.protocol.receive(data) # enable debugging using the DummyNetwork callbacks DummyTransport.track_recv(self.protocol.raiden, host_port, data) def send(self, sender, host_port, bytes_): """ Send `bytes_` to `host_port`. Args: sender (address): The address of the running node. host_port (Tuple[(str, int)]): Tuple with the host name and port number. bytes_ (bytes): The bytes that are going to be sent through the wire. """ self.server.sendto(bytes_, host_port) # enable debugging using the DummyNetwork callbacks DummyTransport.network.track_send(sender, host_port, bytes_) def register(self, proto, host, port): # pylint: disable=unused-argument assert isinstance(proto, RaidenProtocol) self.protocol = proto def stop(self): self.server.stop()
class UDPTransport(object): """ Node communication using the UDP protocol. """ def __init__(self, host, port, protocol=None): self.protocol = protocol self.server = DatagramServer((host, port), handle=self.receive) self.server.start() self.host = self.server.server_host self.port = self.server.server_port def receive(self, data, host_port): # pylint: disable=unused-argument self.protocol.receive(data) # enable debugging using the DummyNetwork callbacks DummyTransport.track_recv(self.protocol.raiden, host_port, data) def send(self, sender, host_port, bytes_): """ Send `bytes_` to `host_port`. Args: sender (address): The address of the running node. host_port (Tuple[(str, int)]): Tuple with the host name and port number. bytes_ (bytes): The bytes that are going to be sent throught the wire. """ self.server.sendto(bytes_, host_port) # enable debugging using the DummyNetwork callbacks DummyTransport.network.track_send(sender, host_port, bytes_) def register(self, proto, host, port): # pylint: disable=unused-argument assert isinstance(proto, RaidenProtocol) self.protocol = proto def stop(self): self.server.stop()
class NodeDiscovery(BaseService, DiscoveryProtocolTransport): """ Persist the list of known nodes with their reputation """ name = 'discovery' server = None # will be set to DatagramServer def __init__(self, app): BaseService.__init__(self, app) log.info('NodeDiscovery init') self.protocol = DiscoveryProtocol(app=self.app, transport=self) @property def address(self): return Address(self.app.config.get('p2p', 'listen_host'), self.app.config.getint('p2p', 'listen_port')) def _send(self, address, message): assert isinstance(address, Address) sock = gevent.socket.socket(type=gevent.socket.SOCK_DGRAM) # sock.bind(('0.0.0.0', self.address.port)) # send from our recv port sock.connect((address.ip, address.port)) log.debug('sending', size=len(message), to=address) sock.send(message) def send(self, address, message): assert isinstance(address, Address) log.debug('sending', size=len(message), to=address) self.server.sendto(message, (address.ip, address.port)) def receive(self, address, message): assert isinstance(address, Address) self.protocol.receive(address, message) def _handle_packet(self, message, ip_port): log.debug('handling packet', address=ip_port, size=len(message)) assert len(ip_port) == 2 address = Address(ip=ip_port[0], port=ip_port[1]) self.receive(address, message) def start(self): log.info('starting discovery') # start a listening server host = self.app.config.get('p2p', 'listen_host') port = self.app.config.getint('p2p', 'listen_port') log.info('starting listener', port=port) self.server = DatagramServer((host, port), handle=self._handle_packet) self.server.start() super(NodeDiscovery, self).start() def _run(self): log.debug('_run called') evt = gevent.event.Event() evt.wait() def stop(self): log.info('stopping discovery') self.server.stop()
class UDPTransport(object): """ Node communication using the UDP protocol. """ def __init__(self, host, port, socket=None, protocol=None, throttle_policy=DummyPolicy()): self.protocol = protocol if socket is not None: self.server = DatagramServer(socket, handle=self.receive) else: self.server = DatagramServer((host, port), handle=self.receive) self.host = self.server.server_host self.port = self.server.server_port self.throttle_policy = throttle_policy def receive(self, data, host_port): # pylint: disable=unused-argument self.protocol.receive(data) # enable debugging using the DummyNetwork callbacks DummyTransport.track_recv(self.protocol.raiden, host_port, data) def send(self, sender, host_port, bytes_): """ Send `bytes_` to `host_port`. Args: sender (address): The address of the running node. host_port (Tuple[(str, int)]): Tuple with the host name and port number. bytes_ (bytes): The bytes that are going to be sent through the wire. """ sleep_timeout = self.throttle_policy.consume(1) # Don't sleep if timeout is zero, otherwise a context-switch is done # and the message is delayed, increasing it's latency if sleep_timeout: gevent.sleep(sleep_timeout) if not hasattr(self.server, 'socket'): raise RuntimeError('trying to send a message on a closed server') self.server.sendto(bytes_, host_port) # enable debugging using the DummyNetwork callbacks DummyTransport.network.track_send(sender, host_port, bytes_) def stop(self): self.server.stop() def stop_accepting(self): self.server.stop_accepting() def start(self): assert not self.server.started # server.stop() clears the handle, since this may be a restart the # handle must always be set self.server.set_handle(self.receive) self.server.start()
def udp_port(): callback = UdpEcho() server = DatagramServer(('',0),handle=callback) callback.server = server server.family = socket.AF_INET server.start() yield server.address[1] server.stop()
def udp_port(): callback = UdpEcho() server = DatagramServer(('', 0), handle=callback) callback.server = server server.family = socket.AF_INET server.start() yield server.address[1] server.stop()
class UDPIn(Actor): '''**A UDP server.** A UDP server. Parameters: - name(str) | The name of the module. - size(int) | The default max length of each queue. - frequency(int) | The frequency in seconds to generate metrics. - address(string)("0.0.0.0") | The address to bind to. - port(int)(19283) | The port to listen on. - reuse_port(bool)(False) | Whether or not to set the SO_REUSEPORT socket option. | Allows multiple instances to bind to the same port. | Requires Linux kernel >= 3.9 Queues: - outbox | Incoming events. ''' def __init__(self, name, size=100, frequency=1, address="0.0.0.0", port=19283): Actor.__init__(self, name, size, frequency) self.pool.createQueue("outbox") self.name = name self._address = address self.port = port self.server = DatagramServer("%s:%s" % (address, port), self.handle) def handle(self, data, address): '''Is called upon each incoming message''' self.submit({'header': {}, 'data': data}, self.pool.queue.outbox) def preHook(self): self.logging.info('Started listening on %s:%s' % (self._address, self.port)) self.server.start()
class Broadcast_Sub(object): def __init__(self, host, port, on_handler, qos=0): self.host = host self.port = port self.qos = qos self.bcast = DatagramServer(('', self.port), handle=on_handler) self.bcast.start() def receive(self, data, address): value = json.loads(data.decode()) payload, type, date = json.loads(data.decode())
class Server(object): def __init__(self, bind_ip='127.0.0.1', tcp_port=DEFAULT_TCP, udp_port=DEFAULT_UDP, redis_host='localhost', redis_port=6379, redis_queue='logstash', message_type='pystash', fqdn=True, logstash_version=1, logstash_tags=None): self.redis = redis.Redis(redis_host, redis_port) self.redis_queue = redis_queue if logstash_version == 1: self.formatter = formatter.LogstashFormatterVersion1(message_type, logstash_tags, fqdn) else: self.formatter = formatter.LogstashFormatterVersion0(message_type, logstash_tags, fqdn) self.udp_server = DatagramServer('%s:%s' % (bind_ip, udp_port), self.udp_handle) self.tcp_server = StreamServer('%s:%s' % (bind_ip, tcp_port), self.tcp_handle) logging.info('Listening on %s (udp=%s tcp=%s) sending to %s:%s.', bind_ip, udp_port, tcp_port, redis_host, redis_port) def obj_to_redis(self, obj): record = logging.makeLogRecord(obj) payload = self.formatter.format(record) #logger.debug('message %s', payload) try: self.redis.rpush(self.redis_queue, payload) except redis.RedisError as exc: logging.error('Redis error: %s' % exc) def udp_handle(self, data, address): slen = struct.unpack('>L', data[:4])[0] chunk = data[4:slen+4] try: obj = cPickle.loads(chunk) except EOFError: logging.error('UDP: invalid data to pickle %s', chunk) return self.obj_to_redis(obj) def tcp_handle(self, socket, address): fileobj = socket.makefile() while True: chunk = fileobj.read(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = fileobj.read(slen) while len(chunk) < slen: chunk = chunk + fileobj.read(slen - len(chunk)) fileobj.flush() try: obj = cPickle.loads(chunk) except EOFError: logging.error('TCP: invalid data to pickle %s', chunk) break self.obj_to_redis(obj) def start(self): self.udp_server.start() self.tcp_server.start() gevent.wait()
def test(self): log = [] def handle(message, address): log.append(message) server.sendto('reply-from-server', address) server = DatagramServer('127.0.0.1:9000', handle) server.start() try: run_script(self.path, 'Test_udp_client') finally: server.close() self.assertEqual(log, ['Test_udp_client'])
def test(self): log = [] def handle(message, address): log.append(message) server.sendto('reply-from-server', address) server = DatagramServer('127.0.0.1:9000', handle) server.start() try: run([sys.executable, '-u', 'udp_client.py', 'Test_udp_client'], timeout=10, cwd='../examples/') finally: server.close() self.assertEqual(log, ['Test_udp_client'])
def test(self): log = [] def handle(message, address): log.append(message) server.sendto(b'reply-from-server', address) server = DatagramServer('127.0.0.1:9001', handle) server.start() try: self.run_example() finally: server.close() self.assertEqual(log, [b'Test_udp_client'])
def start(self): self.register_logger("gevent_helpers") for comp in self.components.itervalues(): comp.manager = self # Add a backreference for leaky abstractions comp.counters = self.register_stat_counters( comp, comp.one_min_stats, comp.one_sec_stats) # Register a separate logger for each component if comp is not self: comp.logger = self.register_logger(comp.name) comp.start() # Starts the greenlet Component.start(self) # Start the datagram control server if it's been inited if self.config['datagram']['enabled']: DatagramServer.start(self, ) # This is the main thread of execution, so just continue here waiting # for exit signals ###### # Register shutdown signals gevent.signal(signal.SIGUSR1, self.dump_objgraph) gevent.signal(signal.SIGHUP, exit, "SIGHUP") gevent.signal(signal.SIGINT, exit, "SIGINT") gevent.signal(signal.SIGTERM, exit, "SIGTERM") try: gevent.wait() # Allow a force exit from multiple exit signals (Ctrl+C mashed multiple times) finally: self.logger.info( "Exiting requested, allowing {} seconds for cleanup.".format( self.config['term_timeout'])) try: for comp in self.components.itervalues(): self.logger.debug( "Calling stop on component {}".format(comp)) comp.stop() if gevent.wait(timeout=self.config['term_timeout']): self.logger.info("All threads exited normally") else: self.logger.info( "Timeout reached, shutting down forcefully") except gevent.GreenletExit: self.logger.info("Shutdown requested again by system, " "exiting without cleanup") self.logger.info("Exit") self.logger.info("=" * 80)
def start(self): self.register_logger("gevent_helpers") for comp in self.components.itervalues(): comp.manager = self # Add a backreference for leaky abstractions comp.counters = self.register_stat_counters(comp, comp.one_min_stats, comp.one_sec_stats) # Register a separate logger for each component if comp is not self: comp.logger = self.register_logger(comp.name) comp.start() # Starts the greenlet Component.start(self) # Start the datagram control server if it's been inited if self.config['datagram']['enabled']: DatagramServer.start(self, ) # This is the main thread of execution, so just continue here waiting # for exit signals ###### # Register shutdown signals gevent.signal(signal.SIGUSR1, self.dump_objgraph) gevent.signal(signal.SIGHUP, exit, "SIGHUP") gevent.signal(signal.SIGINT, exit, "SIGINT") gevent.signal(signal.SIGTERM, exit, "SIGTERM") try: gevent.wait() # Allow a force exit from multiple exit signals (Ctrl+C mashed multiple times) finally: self.logger.info("Exiting requested, allowing {} seconds for cleanup." .format(self.config['term_timeout'])) try: for comp in self.components.itervalues(): self.logger.debug("Calling stop on component {}".format(comp)) comp.stop() if gevent.wait(timeout=self.config['term_timeout']): self.logger.info("All threads exited normally") else: self.logger.info("Timeout reached, shutting down forcefully") except gevent.GreenletExit: self.logger.info("Shutdown requested again by system, " "exiting without cleanup") self.logger.info("Exit") self.logger.info("=" * 80)
class UDPTransport(object): def __init__(self, host, port, protocol=None): self.protocol = protocol self.server = DatagramServer(('', 0), handle=self.receive) self.server.start() self.host = self.server.server_host self.port = self.server.server_port def receive(self, data, host_port): self.protocol.receive(data) def send(self, sender, host_port, data): log.info('TRANSPORT SENDS') self.server.sendto(data, host_port) DummyTransport.network.track_send(sender, host_port, data) # debuging def register(self, proto, host, port): assert isinstance(proto, RaidenProtocol) self.protocol = proto
class UDPIn(Actor): """**A UDP server.** A UDP server. Parameters: - address(string)("0.0.0.0") | The address to bind to. - port(int)(19283) | The port to listen on. - reuse_port(bool)(False) | Whether or not to set the SO_REUSEPORT socket option. | Allows multiple instances to bind to the same port. | Requires Linux kernel >= 3.9 Queues: - outbox | Incoming events. """ def __init__(self, actor_config, address="0.0.0.0", port=19283): Actor.__init__(self, actor_config) self.pool.createQueue("outbox") self.server = DatagramServer("%s:%s" % (self.kwargs.address, self.kwargs.port), self.handle) def handle(self, data, address): """Is called upon each incoming message""" event = self.createEvent() event.data = data self.submit(event, self.pool.queue.outbox) def preHook(self): self.logging.info("Started listening on %s:%s" % (self.kwargs.address, self.kwargs.port)) self.server.start()
class Discovery(object): def __init__(self, discovery_port=9000, act=False, name="PYROBOT", delay=1): self.discovery_port = discovery_port self.name = name self.delay = delay self.active_server = act self.client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.client.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.client.settimeout(delay) #print("utils discovery port",self.discovery_port) self.server = DatagramServer(('', self.discovery_port), handle=self._receive) if self.active_server: self.server.start() def Get(self, key): #print("key ",key) robot, comp, query = key.split("/") key = "{}::{}".format(self.name, key) self.client.sendto(key.encode(), ("255.255.255.255", self.discovery_port)) #time.sleep(self.delay) instances = [] try: while True: data, address = self.client.recvfrom(buff_size) data, rec_query, sender = json.loads(data.decode()) if rec_query == query: instances.append(data) except: pass return getters["_Get_" + query](instances) def _receive(self, key, address): pass
class UDPTransport(object): def __init__(self, host, port, protocol=None): self.protocol = protocol self.server = DatagramServer(('', 0), handle=self.receive) self.server.start() self.host = self.server.server_host self.port = self.server.server_port def receive(self, data, host_port): self.protocol.receive(data) def send(self, sender, host_port, data): # print "TRANSPORT SENDS", datas.decode(data) try: self.server.sendto(data, host_port) except gevent.socket.error as e: raise e DummyTransport.network.track_send(sender, host_port, data) # debuging def register(self, proto, host, port): assert isinstance(proto, RaidenProtocol) self.protocol = proto
class UDPTransport(Runnable): UDP_MAX_MESSAGE_SIZE = 1200 def __init__(self, discovery, udpsocket, throttle_policy, config): super().__init__() # these values are initialized by the start method self.queueids_to_queues: typing.Dict self.raiden: RaidenService self.discovery = discovery self.config = config self.retry_interval = config['retry_interval'] self.retries_before_backoff = config['retries_before_backoff'] self.nat_keepalive_retries = config['nat_keepalive_retries'] self.nat_keepalive_timeout = config['nat_keepalive_timeout'] self.nat_invitation_timeout = config['nat_invitation_timeout'] self.event_stop = Event() self.event_stop.set() self.greenlets = list() self.addresses_events = dict() self.messageids_to_asyncresults = dict() # Maps the addresses to a dict with the latest nonce (using a dict # because python integers are immutable) self.nodeaddresses_to_nonces = dict() cache = cachetools.TTLCache( maxsize=50, ttl=CACHE_TTL, ) cache_wrapper = cachetools.cached(cache=cache) self.get_host_port = cache_wrapper(discovery.get) self.throttle_policy = throttle_policy self.server = DatagramServer(udpsocket, handle=self.receive) def start( self, raiden: RaidenService, message_handler: MessageHandler, ): if not self.event_stop.ready(): raise RuntimeError('UDPTransport started while running') self.event_stop.clear() self.raiden = raiden self.message_handler = message_handler self.queueids_to_queues = dict() # server.stop() clears the handle. Since this may be a restart the # handle must always be set self.server.set_handle(self.receive) self.server.start() super().start() def _run(self): """ Runnable main method, perform wait on long-running subtasks """ try: self.event_stop.wait() except gevent.GreenletExit: # killed without exception self.event_stop.set() gevent.killall(self.greenlets) # kill children raise # re-raise to keep killed status except Exception: self.stop() # ensure cleanup and wait on subtasks raise def stop(self): if self.event_stop.ready(): return # double call, happens on normal stop, ignore self.event_stop.set() # Stop handling incoming packets, but don't close the socket. The # socket can only be safely closed after all outgoing tasks are stopped self.server.stop_accepting() # Stop processing the outgoing queues gevent.wait(self.greenlets) # All outgoing tasks are stopped. Now it's safe to close the socket. At # this point there might be some incoming message being processed, # keeping the socket open is not useful for these. self.server.stop() # Calling `.close()` on a gevent socket doesn't actually close the underlying os socket # so we do that ourselves here. # See: https://github.com/gevent/gevent/blob/master/src/gevent/_socket2.py#L208 # and: https://groups.google.com/forum/#!msg/gevent/Ro8lRra3nH0/ZENgEXrr6M0J try: self.server._socket.close() # pylint: disable=protected-access except socket.error: pass # Set all the pending results to False for async_result in self.messageids_to_asyncresults.values(): async_result.set(False) def get_health_events(self, recipient): """ Starts a healthcheck task for `recipient` and returns a HealthEvents with locks to react on its current state. """ if recipient not in self.addresses_events: self.start_health_check(recipient) return self.addresses_events[recipient] def start_health_check(self, recipient): """ Starts a task for healthchecking `recipient` if there is not one yet. """ if recipient not in self.addresses_events: ping_nonce = self.nodeaddresses_to_nonces.setdefault( recipient, {'nonce': 0}, # HACK: Allows the task to mutate the object ) events = healthcheck.HealthEvents( event_healthy=Event(), event_unhealthy=Event(), ) self.addresses_events[recipient] = events greenlet_healthcheck = gevent.spawn( healthcheck.healthcheck, self, recipient, self.event_stop, events.event_healthy, events.event_unhealthy, self.nat_keepalive_retries, self.nat_keepalive_timeout, self.nat_invitation_timeout, ping_nonce, ) greenlet_healthcheck.name = f'Healthcheck for {pex(recipient)}' greenlet_healthcheck.link_exception(self.on_error) self.greenlets.append(greenlet_healthcheck) def init_queue_for( self, queue_identifier: QueueIdentifier, items: typing.List[QueueItem_T], ) -> Queue_T: """ Create the queue identified by the queue_identifier and initialize it with `items`. """ recipient = queue_identifier.recipient queue = self.queueids_to_queues.get(queue_identifier) assert queue is None queue = NotifyingQueue(items=items) self.queueids_to_queues[queue_identifier] = queue events = self.get_health_events(recipient) greenlet_queue = gevent.spawn( single_queue_send, self, recipient, queue, queue_identifier, self.event_stop, events.event_healthy, events.event_unhealthy, self.retries_before_backoff, self.retry_interval, self.retry_interval * 10, ) if queue_identifier.channel_identifier == CHANNEL_IDENTIFIER_GLOBAL_QUEUE: greenlet_queue.name = f'Queue for {pex(recipient)} - global' else: greenlet_queue.name = ( f'Queue for {pex(recipient)} - {queue_identifier.channel_identifier}' ) greenlet_queue.link_exception(self.on_error) self.greenlets.append(greenlet_queue) log.debug( 'new queue created for', node=pex(self.raiden.address), queue_identifier=queue_identifier, items_qty=len(items), ) return queue def get_queue_for( self, queue_identifier: QueueIdentifier, ) -> Queue_T: """ Return the queue identified by the given queue identifier. If the queue doesn't exist it will be instantiated. """ queue = self.queueids_to_queues.get(queue_identifier) if queue is None: items = () queue = self.init_queue_for(queue_identifier, items) return queue def send_async( self, queue_identifier: QueueIdentifier, message: 'Message', ): """ Send a new ordered message to recipient. Messages that use the same `queue_identifier` are ordered. """ recipient = queue_identifier.recipient if not is_binary_address(recipient): raise ValueError('Invalid address {}'.format(pex(recipient))) # These are not protocol messages, but transport specific messages if isinstance(message, (Delivered, Ping, Pong)): raise ValueError('Do not use send for {} messages'.format( message.__class__.__name__)) messagedata = message.encode() if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE: raise ValueError( 'message size exceeds the maximum {}'.format( self.UDP_MAX_MESSAGE_SIZE), ) # message identifiers must be unique message_id = message.message_identifier # ignore duplicates if message_id not in self.messageids_to_asyncresults: self.messageids_to_asyncresults[message_id] = AsyncResult() queue = self.get_queue_for(queue_identifier) queue.put((messagedata, message_id)) assert queue.is_set() log.debug( 'Message queued', node=pex(self.raiden.address), queue_identifier=queue_identifier, queue_size=len(queue), message=message, ) def maybe_send(self, recipient: typing.Address, message: Message): """ Send message to recipient if the transport is running. """ if not is_binary_address(recipient): raise InvalidAddress('Invalid address {}'.format(pex(recipient))) messagedata = message.encode() host_port = self.get_host_port(recipient) self.maybe_sendraw(host_port, messagedata) def maybe_sendraw_with_result( self, recipient: typing.Address, messagedata: bytes, message_id: typing.MessageID, ) -> AsyncResult: """ Send message to recipient if the transport is running. Returns: An AsyncResult that will be set once the message is delivered. As long as the message has not been acknowledged with a Delivered message the function will return the same AsyncResult. """ async_result = self.messageids_to_asyncresults.get(message_id) if async_result is None: async_result = AsyncResult() self.messageids_to_asyncresults[message_id] = async_result host_port = self.get_host_port(recipient) self.maybe_sendraw(host_port, messagedata) return async_result def maybe_sendraw(self, host_port: typing.Tuple[int, int], messagedata: bytes): """ Send message to recipient if the transport is running. """ # Don't sleep if timeout is zero, otherwise a context-switch is done # and the message is delayed, increasing it's latency sleep_timeout = self.throttle_policy.consume(1) if sleep_timeout: gevent.sleep(sleep_timeout) # Check the udp socket is still available before trying to send the # message. There must be *no context-switches after this test*. if hasattr(self.server, 'socket'): self.server.sendto( messagedata, host_port, ) def receive( self, messagedata: bytes, host_port: typing.Tuple[str, int], # pylint: disable=unused-argument ) -> bool: """ Handle an UDP packet. """ # pylint: disable=unidiomatic-typecheck if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE: log.warning( 'Invalid message: Packet larger than maximum size', node=pex(self.raiden.address), message=hexlify(messagedata), length=len(messagedata), ) return False try: message = decode(messagedata) except InvalidProtocolMessage as e: log.warning( 'Invalid protocol message', error=str(e), node=pex(self.raiden.address), message=hexlify(messagedata), ) return False if type(message) == Pong: self.receive_pong(message) elif type(message) == Ping: self.receive_ping(message) elif type(message) == Delivered: self.receive_delivered(message) elif message is not None: self.receive_message(message) else: log.warning( 'Invalid message: Unknown cmdid', node=pex(self.raiden.address), message=hexlify(messagedata), ) return False return True def receive_message(self, message: Message): """ Handle a Raiden protocol message. The protocol requires durability of the messages. The UDP transport relies on the node's WAL for durability. The message will be converted to a state change, saved to the WAL, and *processed* before the durability is confirmed, which is a stronger property than what is required of any transport. """ self.message_handler.on_message(self.raiden, message) # Sending Delivered after the message is decoded and *processed* # gives a stronger guarantee than what is required from a # transport. # # Alternatives are, from weakest to strongest options: # - Just save it on disk and asynchronously process the messages # - Decode it, save to the WAL, and asynchronously process the # state change # - Decode it, save to the WAL, and process it (the current # implementation) delivered_message = Delivered(message.message_identifier) self.raiden.sign(delivered_message) self.maybe_send( message.sender, delivered_message, ) def receive_delivered(self, delivered: Delivered): """ Handle a Delivered message. The Delivered message is how the UDP transport guarantees persistence by the partner node. The message itself is not part of the raiden protocol, but it's required by this transport to provide the required properties. """ self.message_handler.on_message(self.raiden, delivered) message_id = delivered.delivered_message_identifier async_result = self.raiden.transport.messageids_to_asyncresults.get( message_id) # clear the async result, otherwise we have a memory leak if async_result is not None: del self.messageids_to_asyncresults[message_id] async_result.set() else: log.warn( 'Unknown delivered message received', message_id=message_id, ) # Pings and Pongs are used to check the health status of another node. They # are /not/ part of the raiden protocol, only part of the UDP transport, # therefore these messages are not forwarded to the message handler. def receive_ping(self, ping: Ping): """ Handle a Ping message by answering with a Pong. """ log_healthcheck.debug( 'Ping received', node=pex(self.raiden.address), message_id=ping.nonce, message=ping, sender=pex(ping.sender), ) pong = Pong(ping.nonce) self.raiden.sign(pong) try: self.maybe_send(ping.sender, pong) except (InvalidAddress, UnknownAddress) as e: log.debug("Couldn't send the `Delivered` message", e=e) def receive_pong(self, pong: Pong): """ Handles a Pong message. """ message_id = ('ping', pong.nonce, pong.sender) async_result = self.messageids_to_asyncresults.get(message_id) if async_result is not None: log_healthcheck.debug( 'Pong received', node=pex(self.raiden.address), sender=pex(pong.sender), message_id=pong.nonce, ) async_result.set(True) else: log_healthcheck.warn( 'Unknown pong received', message_id=message_id, ) def get_ping(self, nonce: int) -> Ping: """ Returns a signed Ping message. Note: Ping messages don't have an enforced ordering, so a Ping message with a higher nonce may be acknowledged first. """ message = Ping( nonce=nonce, current_protocol_version=constants.PROTOCOL_VERSION, ) self.raiden.sign(message) message_data = message.encode() return message_data def set_node_network_state(self, node_address: typing.Address, node_state): state_change = ActionChangeNodeNetworkState(node_address, node_state) self.raiden.handle_state_change(state_change)
def start(self): DatagramServer.start(self)
class NodeDiscovery(BaseService, DiscoveryProtocolTransport): """ Persist the list of known nodes with their reputation """ name = 'discovery' server = None # will be set to DatagramServer default_config = dict(discovery=dict( listen_port=30303, listen_host='0.0.0.0', bootstrap_nodes=dict( cpp_bootstrap= 'enode://487611428e6c99a11a9795a6abe7b529e81315ca6aad66e2a2fc76e3adf263faba0d35466c2f8f68d561dbefa8878d4df5f1f2ddb1fbeab7f42ffb8cd328bd4a@5.1.83.226:30303', go_bootstrap= 'enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303', go_bootstrap2= 'enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303', py_bootstrap= 'enode://2676755dd8477ad3beea32b4e5a144fa10444b70dfa3e05effb0fdfa75683ebd4f75709e1f8126cb5317c5a35cae823d503744e790a3a038ae5dd60f51ee9101@144.76.62.101:30303', ).values()), node=dict(privkey_hex='')) def __init__(self, app): BaseService.__init__(self, app) log.info('NodeDiscovery init') # man setsockopt self.protocol = DiscoveryProtocol(app=self.app, transport=self) @property def address(self): ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] return Address(ip, port) # def _send(self, address, message): # assert isinstance(address, Address) # sock = gevent.socket.socket(type=gevent.socket.SOCK_DGRAM) # sock.bind(('0.0.0.0', self.address.port)) # send from our recv port # sock.connect((address.ip, address.port)) # log.debug('sending', size=len(message), to=address) # sock.send(message) def send(self, address, message): assert isinstance(address, Address) log.debug('sending', size=len(message), to=address) try: self.server.sendto(message, (address.ip, address.udp_port)) except gevent.socket.error as e: log.critical('udp write error', errno=e.errno, reason=e.strerror) log.critical('waiting for recovery') gevent.sleep(5.) def receive(self, address, message): assert isinstance(address, Address) self.protocol.receive(address, message) def _handle_packet(self, message, ip_port): log.debug('handling packet', address=ip_port, size=len(message)) assert len(ip_port) == 2 address = Address(ip=ip_port[0], udp_port=ip_port[1]) self.receive(address, message) def start(self): log.info('starting discovery') # start a listening server ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] log.info('starting listener', port=port, host=ip) self.server = DatagramServer((ip, port), handle=self._handle_packet) self.server.start() super(NodeDiscovery, self).start() # bootstap nodes = [ Node.from_uri(x) for x in self.app.config['discovery']['bootstrap_nodes'] ] if nodes: self.protocol.kademlia.bootstrap(nodes) def _run(self): log.debug('_run called') evt = gevent.event.Event() evt.wait() def stop(self): log.info('stopping discovery') self.server.stop() super(NodeDiscovery, self).stop()
class IpmiServer(object): def __init__(self, template, template_directory, args): dom = etree.parse(template) databus = conpot_core.get_databus() self.device_name = databus.get_value( dom.xpath('//ipmi/device_info/device_name/text()')[0]) self.port = None self.sessions = dict() self.uuid = uuid.uuid4() self.kg = None self.sock = None self.authdata = collections.OrderedDict() lanchannel = 1 authtype = 0b10000000 authstatus = 0b00000100 chancap = 0b00000010 oemdata = (0, 0, 0, 0) self.authcap = struct.pack('BBBBBBBBB', 0, lanchannel, authtype, authstatus, chancap, *oemdata) self.server = None self.session = None self.bmc = self._configure_users(dom) logger.info('Conpot IPMI initialized using %s template', template) def _configure_users(self, dom): # XML parsing authdata_name = dom.xpath('//ipmi/user_list/user/user_name/text()') authdata_passwd = dom.xpath('//ipmi/user_list/user/password/text()') authdata_name = [i.encode('utf-8') for i in authdata_name] authdata_passwd = [i.encode('utf-8') for i in authdata_passwd] self.authdata = collections.OrderedDict( zip(authdata_name, authdata_passwd)) authdata_priv = dom.xpath('//ipmi/user_list/user/privilege/text()') if False in map(lambda k: 0 < int(k) <= 4, authdata_priv): raise ValueError("Privilege level must be between 1 and 4") authdata_priv = [int(k) for k in authdata_priv] self.privdata = collections.OrderedDict( zip(authdata_name, authdata_priv)) activeusers = dom.xpath('//ipmi/user_list/user/active/text()') self.activeusers = [1 if x == 'true' else 0 for x in activeusers] fixedusers = dom.xpath('//ipmi/user_list/user/fixed/text()') self.fixedusers = [1 if x == 'true' else 0 for x in fixedusers] self.channelaccessdata = collections.OrderedDict( zip(authdata_name, activeusers)) return FakeBmc(self.authdata, self.port) def _checksum(self, *data): csum = sum(data) csum ^= 0xff csum += 1 csum &= 0xff return csum def handle(self, data, address): # make sure self.session exists if not address[0] in self.sessions.keys() or not hasattr( self, 'session'): # new session for new source logger.info('New IPMI traffic from %s', address) self.session = FakeSession(address[0], "", "", address[1]) self.session.server = self self.uuid = uuid.uuid4() self.kg = None self.session.socket = self.sock self.sessions[address[0]] = self.session self.initiate_session(data, address, self.session) else: # session already exists logger.info('Incoming IPMI traffic from %s', address) if self.session.stage == 0: self.close_server_session() else: self._got_request(data, address, self.session) def initiate_session(self, data, address, session): if len(data) < 22: self.close_server_session() return if not (chr_py3(data[0]) == b'\x06' and data[2:4] == b'\xff\x07'): # check rmcp version, sequencenumber and class; self.close_server_session() return if chr_py3(data[4]) == b'\x06': # ipmi v2 session.ipmiversion = 2.0 session.authtype = 6 payload_type = chr_py3(data[5]) if payload_type not in (b'\x00', b'\x10'): self.close_server_session() return if payload_type == b'\x10': # new session to handle conversation serversession.ServerSession(self.authdata, self.kg, session.sockaddr, self.sock, data[16:], self.uuid, bmc=self) serversession.ServerSession.logged = logger return # data = data[13:] if len(data[14:16]) < 2: self.close_server_session() else: myaddr, netfnlun = struct.unpack('2B', data[14:16]) netfn = (netfnlun & 0b11111100) >> 2 mylun = netfnlun & 0b11 if netfn == 6: # application request if chr_py3(data[19]) == b'\x38': # cmd = get channel auth capabilities verchannel, level = struct.unpack('2B', data[20:22]) version = verchannel & 0b10000000 if version != 0b10000000: self.close_server_session() return channel = verchannel & 0b1111 if channel != 0xe: self.close_server_session() return (clientaddr, clientlun) = struct.unpack('BB', data[17:19]) level &= 0b1111 self.send_auth_cap(myaddr, mylun, clientaddr, clientlun, session.sockaddr) def send_auth_cap(self, myaddr, mylun, clientaddr, clientlun, sockaddr): header = b'\x06\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10' headerdata = (clientaddr, clientlun | (7 << 2)) headersum = self._checksum(*headerdata) header += struct.pack('BBBBBB', *(headerdata + (headersum, myaddr, mylun, 0x38))) header += self.authcap bodydata = struct.unpack('B' * len(header[17:]), header[17:]) header += chr_py3(self._checksum(*bodydata)) self.session.stage += 1 logger.info('Connection established with %s', sockaddr) self.session.send_data(header, sockaddr) def close_server_session(self): logger.info('IPMI Session closed %s', self.session.sockaddr[0]) # cleanup session del self.sessions[self.session.sockaddr[0]] del self.session def _got_request(self, data, address, session): if chr_py3(data[4]) in (b'\x00', b'\x02'): # ipmi 1.5 payload session.ipmiversion = 1.5 remsequencenumber = struct.unpack('<I', data[5:9])[0] if hasattr(session, 'remsequencenumber' ) and remsequencenumber < session.remsequencenumber: self.close_server_session() return session.remsequencenumber = remsequencenumber if ord(chr_py3(data[4])) != session.authtype: self.close_server_session() return remsessid = struct.unpack("<I", data[9:13])[0] if remsessid != session.sessionid: self.close_server_session() return rsp = list(struct.unpack("!%dB" % len(data), data)) authcode = False if chr_py3(data[4]) == b'\x02': # authcode in ipmi 1.5 packet authcode = data[13:29] del rsp[13:29] payload = list(rsp[14:14 + rsp[13]]) if authcode: expectedauthcode = session._ipmi15authcode( payload, checkremotecode=True) expectedauthcode = struct.pack("%dB" % len(expectedauthcode), *expectedauthcode) if expectedauthcode != authcode: self.close_server_session() return session._ipmi15(payload) elif chr_py3(data[4]) == b'\x06': # ipmi 2.0 payload session.ipmiversion = 2.0 session.authtype = 6 session._ipmi20(data) else: # unrecognized data self.close_server_session() return def _got_rmcp_openrequest(self, data): request = struct.pack('B' * len(data), *data) clienttag = ord(chr_py3(request[0])) self.clientsessionid = list(struct.unpack('4B', request[4:8])) self.managedsessionid = list(struct.unpack('4B', os.urandom(4))) self.session.privlevel = 4 response = ([clienttag, 0, self.session.privlevel, 0] + self.clientsessionid + self.managedsessionid + [ 0, 0, 0, 8, 1, 0, 0, 0, # auth 1, 0, 0, 8, 1, 0, 0, 0, # integrity 2, 0, 0, 8, 1, 0, 0, 0, # privacy ]) logger.info('IPMI open session request') self.session.send_payload( response, constants.payload_types['rmcpplusopenresponse'], retry=False) def _got_rakp1(self, data): clienttag = data[0] self.Rm = data[8:24] self.rolem = data[24] self.maxpriv = self.rolem & 0b111 namepresent = data[27] if namepresent == 0: self.close_server_session() return usernamebytes = data[28:] self.username = struct.pack('%dB' % len(usernamebytes), *usernamebytes) if self.username not in self.authdata: logger.info('User {} supplied by client not in user_db.'.format( self.username, )) self.close_server_session() return uuidbytes = self.uuid.bytes uuidbytes = list(struct.unpack('%dB' % len(uuidbytes), uuidbytes)) self.uuiddata = uuidbytes self.Rc = list(struct.unpack('16B', os.urandom(16))) hmacdata = (self.clientsessionid + self.managedsessionid + self.Rm + self.Rc + uuidbytes + [self.rolem, len(self.username)]) hmacdata = struct.pack('%dB' % len(hmacdata), *hmacdata) hmacdata += self.username self.kuid = self.authdata[self.username] if self.kg is None: self.kg = self.kuid authcode = hmac.new(self.kuid, hmacdata, hashlib.sha1).digest() authcode = list(struct.unpack('%dB' % len(authcode), authcode)) newmessage = ([clienttag, 0, 0, 0] + self.clientsessionid + self.Rc + uuidbytes + authcode) logger.info('IPMI rakp1 request') self.session.send_payload(newmessage, constants.payload_types['rakp2'], retry=False) def _got_rakp3(self, data): RmRc = struct.pack('B' * len(self.Rm + self.Rc), *(self.Rm + self.Rc)) self.sik = hmac.new( self.kg, RmRc + struct.pack("2B", self.rolem, len(self.username)) + self.username, hashlib.sha1).digest() self.session.k1 = hmac.new(self.sik, b'\x01' * 20, hashlib.sha1).digest() self.session.k2 = hmac.new(self.sik, b'\x02' * 20, hashlib.sha1).digest() self.session.aeskey = self.session.k2[0:16] hmacdata = struct.pack('B' * len(self.Rc), *self.Rc) + struct.pack("4B", *self.clientsessionid) +\ struct.pack("2B", self.rolem, len(self.username)) + self.username expectedauthcode = hmac.new(self.kuid, hmacdata, hashlib.sha1).digest() authcode = struct.pack("%dB" % len(data[8:]), *data[8:]) if expectedauthcode != authcode: self.close_server_session() return clienttag = data[0] if data[1] != 0: self.close_server_session() return self.session.localsid = struct.unpack( '<I', struct.pack('4B', *self.managedsessionid))[0] logger.info('IPMI rakp3 request') self.session.ipmicallback = self.handle_client_request self._send_rakp4(clienttag, 0) def _send_rakp4(self, tagvalue, statuscode): payload = [tagvalue, statuscode, 0, 0] + self.clientsessionid hmacdata = self.Rm + self.managedsessionid + self.uuiddata hmacdata = struct.pack('%dB' % len(hmacdata), *hmacdata) authdata = hmac.new(self.sik, hmacdata, hashlib.sha1).digest()[:12] payload += struct.unpack('%dB' % len(authdata), authdata) logger.info('IPMI rakp4 sent') self.session.send_payload(payload, constants.payload_types['rakp4'], retry=False) self.session.confalgo = 'aes' self.session.integrityalgo = 'sha1' self.session.sessionid = struct.unpack( '<I', struct.pack('4B', *self.clientsessionid))[0] def handle_client_request(self, request): if request['netfn'] == 6 and request['command'] == 0x3b: # set session privilage level pendingpriv = request['data'][0] returncode = 0 if pendingpriv > 1: if pendingpriv > self.maxpriv: returncode = 0x81 else: self.clientpriv = request['data'][0] self.session._send_ipmi_net_payload(code=returncode, data=[self.clientpriv]) logger.info('IPMI response sent (Set Session Privilege) to %s', self.session.sockaddr) elif request['netfn'] == 6 and request['command'] == 0x3c: # close session self.session.send_ipmi_response() logger.info('IPMI response sent (Close Session) to %s', self.session.sockaddr) self.close_server_session() elif request['netfn'] == 6 and request['command'] == 0x44: # get user access reschan = request['data'][0] channel = reschan & 0b00001111 resuid = request['data'][1] usid = resuid & 0b00011111 if self.clientpriv > self.maxpriv: returncode = 0xd4 else: returncode = 0 self.usercount = len(self.authdata.keys()) self.channelaccess = 0b0000000 | self.privdata[list( self.authdata.keys())[usid - 1]] if self.channelaccessdata[list(self.authdata.keys())[usid - 1]] == 'true': # channelaccess: 7=res; 6=callin; 5=link; 4=messaging; 3-0=privilege self.channelaccess |= 0b00110000 data = list() data.append(self.usercount) data.append(sum(self.activeusers)) data.append(sum(self.fixedusers)) data.append(self.channelaccess) self.session._send_ipmi_net_payload(code=returncode, data=data) logger.info('IPMI response sent (Get User Access) to %s', self.session.sockaddr) elif request['netfn'] == 6 and request['command'] == 0x46: # get user name userid = request['data'][0] returncode = 0 username = list(self.authdata.keys())[userid - 1] data = list(username) while len(data) < 16: # filler data.append(0) self.session._send_ipmi_net_payload(code=returncode, data=data) logger.info('IPMI response sent (Get User Name) to %s', self.session.sockaddr) elif request['netfn'] == 6 and request['command'] == 0x45: # set user name # TODO: fix issue where users can be overwritten # python does not support dictionary with duplicate keys userid = request['data'][0] username = ''.join(chr(x) for x in request['data'][1:]).strip(b'\x00') oldname = list(self.authdata.keys())[userid - 1] # need to recreate dictionary to preserve order self.copyauth = collections.OrderedDict() self.copypriv = collections.OrderedDict() self.copychannel = collections.OrderedDict() index = 0 for k, v in self.authdata.iteritems(): if index == userid - 1: self.copyauth.update({username: self.authdata[oldname]}) self.copypriv.update({username: self.privdata[oldname]}) self.copychannel.update( {username: self.channelaccessdata[oldname]}) else: self.copyauth.update({k: v}) self.copypriv.update({k: self.privdata[k]}) self.copychannel.update({k: self.channelaccessdata[k]}) index += 1 self.authdata = self.copyauth self.privdata = self.copypriv self.channelaccessdata = self.copychannel returncode = 0 self.session._send_ipmi_net_payload(code=returncode) logger.info('IPMI response sent (Set User Name) to %s', self.session.sockaddr) elif request['netfn'] == 6 and request['command'] == 0x47: # set user passwd passwd_length = request['data'][0] & 0b10000000 userid = request['data'][0] & 0b00111111 username = list(self.authdata.keys())[userid - 1] operation = request['data'][1] & 0b00000011 returncode = 0 if passwd_length: # 20 byte passwd = ''.join(chr(x) for x in request['data'][2:22]) else: # 16 byte passwd = ''.join(chr(x) for x in request['data'][2:18]) if operation == 0: # disable user if self.activeusers[list( self.authdata.keys()).index(username)]: self.activeusers[list( self.authdata.keys()).index(username)] = 0 elif operation == 1: # enable user if not self.activeusers[list( self.authdata.keys()).index(username)]: self.activeusers[list( self.authdata.keys()).index(username)] = 1 elif operation == 2: # set passwd if len(passwd) not in [16, 20]: returncode = 0x81 self.authdata[username] = passwd.strip(b'\x00') else: # test passwd if len(passwd) not in [16, 20]: returncode = 0x81 if self.authdata[username] != passwd.strip(b'\x00'): returncode = 0x80 self.session._send_ipmi_net_payload(code=returncode) logger.info('IPMI response sent (Set User Password) to %s', self.session.sockaddr) elif request['netfn'] in [0, 6] and request['command'] in [1, 2, 8, 9]: self.bmc.handle_raw_request(request, self.session) else: returncode = 0xc1 self.session._send_ipmi_net_payload(code=returncode) logger.info('IPMI unrecognized command from %s', self.session.sockaddr) logger.info('IPMI response sent (Invalid Command) to %s', self.session.sockaddr) def start(self, host, port): connection = (host, port) self.port = port self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.setblocking(True) self.sock.bind(connection) self.server = DatagramServer(self.sock, self.handle) self.server.start() logger.info('IPMI server started on: %s', (host, self.server.server_port)) self.server.serve_forever() def stop(self): self.server.stop()
class NodeDiscovery(BaseService, DiscoveryProtocolTransport): """ Persist the list of known nodes with their reputation """ name = 'discovery' server = None # will be set to DatagramServer default_config = dict(discovery=dict( listen_port=30303, listen_host='0.0.0.0', ), node=dict(privkey_hex='')) def __init__(self, app): BaseService.__init__(self, app) log.info('NodeDiscovery init') # man setsockopt self.protocol = DiscoveryProtocol(app=self.app, transport=self) @property def address(self): ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] return Address(ip, port) # def _send(self, address, message): # assert isinstance(address, Address) # sock = gevent.socket.socket(type=gevent.socket.SOCK_DGRAM) # sock.bind(('0.0.0.0', self.address.port)) # send from our recv port # sock.connect((address.ip, address.port)) # log.debug('sending', size=len(message), to=address) # sock.send(message) def send(self, address, message): assert isinstance(address, Address) log.debug('sending', size=len(message), to=address) try: self.server.sendto(message, (address.ip, address.udp_port)) except gevent.socket.error as e: log.critical('udp write error', errno=e.errno, reason=e.strerror) log.critical('waiting for recovery') gevent.sleep(5.) def receive(self, address, message): assert isinstance(address, Address) self.protocol.receive(address, message) def _handle_packet(self, message, ip_port): log.debug('handling packet', address=ip_port, size=len(message)) assert len(ip_port) == 2 address = Address(ip=ip_port[0], udp_port=ip_port[1]) self.receive(address, message) def start(self): log.info('starting discovery') # start a listening server ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] log.info('starting listener', port=port, host=ip) self.server = DatagramServer((ip, port), handle=self._handle_packet) self.server.start() super(NodeDiscovery, self).start() # bootstap nodes = [ Node.from_uri(x) for x in self.app.config['discovery']['bootstrap_nodes'] ] if nodes: self.protocol.kademlia.bootstrap(nodes) def _run(self): log.debug('_run called') evt = gevent.event.Event() evt.wait() def stop(self): log.info('stopping discovery') self.server.stop() super(NodeDiscovery, self).stop()
class UDPNode(NodeABC): def __init__(self, bind_address: Tuple[str, int] = None, handler=None): bind_address = bind_address or self.DEFAULT_ADDRESS handler = handler or (lambda data, address: None) def handle(data: bytes, address: Tuple[str, int]): deserialized = self.deserialize(data) handler(deserialized, address) super().__init__() self._bind_address = bind_address self._server = None # type: DatagramServer self._stop = False def run(): self._tls.gevent = True self._server = DatagramServer(self.bind_address, handle) self._server.start() while self.is_running() and not self._stop: gevent.sleep(0.5) self._server.stop() self._thread = threading.Thread(target=run, daemon=True) self._tls = threading.local() self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) def is_running(self): if self._server: return self._server.started return False @property def bind_address(self) -> Tuple[str, int]: if self._server: return self._server.address return self._bind_address def start(self): self._thread.start() n = 1000 for _ in range(n): if self.is_running(): return self time.sleep(1.0 / n) self.stop() raise Exception('start timeouted 1.0sec') def stop(self, timeout: float = None): self._stop = True self.join(timeout=timeout) return self def join(self, timeout: float = None): self._thread.join(timeout=timeout) def sendto(self, data: Any, address: Tuple[str, int]): serialized = self.serialize(data) assert len(serialized) <= 4096, 'len={} {}'.format( len(serialized), data) try: _ = self._tls.gevent self._server.sendto(serialized, address) except AttributeError: self._socket.sendto(serialized, address) def serialize(self, obj: Any): return pickle.dumps(obj) def deserialize(self, obj: Any): return pickle.loads(obj)
class nscl_dm_adapter(Plugin): def _init(self, ): self._initialized() def _start(self, ): self.sem = Semaphore() self.sem_counter = 0 self.set_configurations() self.api.run_task(self.create_server) self.subscribe_nscl() self.api.run_task(self.subscribe_dm_server) if self.config["enable_test"]: pass # self.api.run_task(self.send_execute_command) # Uncomment to check these operations # self.api.run_task(self.send_specific_observation) # self.api.run_task(self.send_specific_observation1) # self.api.run_task(self.send_cancel_observation) #self.api.run_task(self.send_discover_resources) #self.api.run_task(self.send_write_attributes) #self.api.run_task(self.send_create) self._started() def _stop(self, ): self.local_server.stop() self._stopped() def set_configurations(self, ): self.lwm2m_server_ip = self.config["lwm2m_dm_server_ip"] self.lwm2m_server_port = self.config["lwm2m_dm_server_port"] self.nscl_dm_adapter_listener_ip = self.config[ "nscl_dm_adapter_listener_ip"] self.nscl_dm_adapter_listener_port = self.config[ "nscl_dm_adapter_listener_port"] self.nscl_dm_adapter_client_ip = self.config[ "nscl_dm_adapter_client_ip"] self.nscl_dm_adapter_client_port = self.config[ "nscl_dm_adapter_client_port"] def create_server(self, ): self.local_server = DatagramServer( (self.nscl_dm_adapter_listener_ip, self.nscl_dm_adapter_listener_port), self.handle_request) self.local_server.start() def handle_request(self, message, remote): rx_record = connection.ReceptionRecord(None, message, remote) msg = rx_record.message uriQuery = msg.findOption(options.UriQuery) self.process(rx_record, remote, uriQuery) def process(self, rx_record, remote, uri_query): if rx_record.message.transaction_type == connection.Message.CON: if constants.POST == rx_record.message.code: if self.general_notification_token == rx_record.message.token: self.logger.info("General Notification received") msg = connection.Message(connection.Message.ACK, code=constants.CREATED) self.local_server.sendto( msg._pack(rx_record.transaction_id), remote) self.process_resources( json.loads(rx_record.message.payload)) else: self.logger.info("Specific Notification received") msg = connection.Message(connection.Message.ACK, code=constants.CREATED) self.local_server.sendto( msg._pack(rx_record.transaction_id), remote) payload = json.loads(rx_record.message.payload) observer_ip = payload["observer_ip"] observer_port = payload["observer_port"] del payload["observer_ip"] del payload["observer_port"] self.process_resources(payload, observer_ip=observer_ip, observer_port=observer_port) elif rx_record.message.transaction_type == connection.Message.NON: if self.general_notification_token == rx_record.message.token: self.logger.info("General Notification received") self.process_resources(json.loads(rx_record.message.payload)) else: self.logger.info("Specific Notification received") payload = json.loads(rx_record.message.payload) observer_ip = payload["observer_ip"] observer_port = payload["observer_port"] del payload["observer_ip"] del payload["observer_port"] self.process_resources(payload, observer_ip=observer_ip, observer_port=observer_port) def process_resources(self, payload, observer_ip=None, observer_port=None): total_resources = payload if observer_ip != None and observer_port != None: self.logger.info("The notification should be sent to %s:%s", observer_ip, observer_port) for ep_name, object_resources in total_resources.iteritems(): endpoint_name = ep_name for object_ids, resources in object_resources.iteritems(): object_id = object_ids.split("_")[0] object_inst_id = object_ids.split("_")[1] resources_dict = {} for res_ids, res_value in resources["resources"].iteritems(): res_id = res_ids.split("_")[0] res_inst_id = res_ids.split("_")[1] res_value = res_value resource_name = lwm2m_dict_objects[str( object_id)]["resource_list"][str(res_id)]["resName"] is_multi_inst = lwm2m_dict_objects[str( object_id)]["resource_list"][str(res_id)]["multiInst"] if not is_multi_inst: resources_dict.update({resource_name: res_value}) else: resources_dict.update({ resource_name + "_" + str(res_inst_id): res_value }) self.handle_m2m_server(endpoint_name, object_id, object_inst_id, res_id, res_inst_id, resource_name, res_value, resources_dict) def handle_m2m_server(self, endpoint_name, object_id, object_inst_id, res_id, res_inst_id, res_name, res_value, resources_dict): preferred_scl = endpoint_name.split("/")[0] if endpoint_name.find("attachedDevices") == -1: bool_attachedDevices = False else: attached_device_name = endpoint_name.split("/")[-1] bool_attachedDevices = True object_name = lwm2m_dict_objects[str(object_id)]["object_name"] resource_name = lwm2m_dict_objects[str(object_id)]["resource_list"][ str(res_id)]["resName"] moID_value = lwm2m_dict_objects[str(object_id)]["urn"] res_name_res_inst_id = resource_name + "_" + str(res_inst_id) def add_parameters(response): path = response.resource.path resource = ('{"mgmtObjs" : ' + json.dumps(resources_dict) + '}') request = UpdateRequestIndication(path, resource, content_type="application/json") response = self.api.handle_request_indication(request) def handle_mgmtobjs(response): mgmtobj_exists = False for mgmtobj in response.resource.mgmtObjCollection: if mgmtobj.name == object_name + "_" + str(object_inst_id): mgmtobj_exists = True path = mgmtobj.path request = RetrieveRequestIndication(path) response = self.api.handle_request_indication(request) try: if res_name_res_inst_id in response.value.resource.flex_values: if response.value.resource.flex_values[ res_name_res_inst_id] == str(res_value): continue elif res_name in response.value.resource.flex_values: if response.value.resource.flex_values[ res_name] == str(res_value): continue except: pass resource = ('{"mgmtObjs" : ' + json.dumps(resources_dict) + '}') request = UpdateRequestIndication( path, resource, content_type="application/json") response = self.api.handle_request_indication(request) break if not mgmtobj_exists: mgmtobj_ = MgmtObj(id=str(object_name) + "_" + str(object_inst_id), moID=moID_value) path = response.resource.path request = CreateRequestIndication(path, mgmtobj_) response = self.api.handle_request_indication(request) response.then(add_parameters) def retrieve_mgmtobjs(response): path = response.resource.path + "/mgmtObjs" request = RetrieveRequestIndication(path) response = self.api.handle_request_indication(request) response.then(handle_mgmtobjs) def handle_attached_devices(response): attached_device_exists = False for attached_device in response.resource.attachedDeviceCollection: if attached_device.name == attached_device_name: attached_device_exists = True path = attached_device.path + "/mgmtObjs" request = RetrieveRequestIndication(path) response = self.api.handle_request_indication(request) response.then(handle_mgmtobjs) break if not attached_device_exists: attached_device_object = AttachedDevice( id=attached_device_name) path = response.resource.path request = CreateRequestIndication( path=path, resource=attached_device_object) response = self.api.handle_request_indication(request) response.then(retrieve_mgmtobjs) def retrieve_attached_devices(response): path = response.resource.path + "/attachedDevices" request = RetrieveRequestIndication(path) response = self.api.handle_request_indication(request) response.then(handle_attached_devices) def handle_scl(response): scl_exists = False for _scl in response.resource.sclCollection: if _scl.name == preferred_scl: scl_exists = True if bool_attachedDevices: path = _scl.path + "/attachedDevices" else: path = _scl.path + "/mgmtObjs" request = RetrieveRequestIndication(path) response = self.api.handle_request_indication(request) if bool_attachedDevices: response.then(handle_attached_devices) else: response.then(handle_mgmtobjs) break if not scl_exists: scl_object = Scl(sclId=preferred_scl, link="127.0.0.1", sclType="GSCL", mgmtProtocolType="LWM2M") request = CreateRequestIndication(path="/m2m/scls", resource=scl_object) response = self.api.handle_request_indication(request) if bool_attachedDevices: response.then(retrieve_attached_devices) else: response.then(retrieve_mgmtobjs) path = "/m2m/scls" request = RetrieveRequestIndication(path) response = self.api.handle_request_indication(request) response.then(handle_scl) def _handle_mgmtcmd_created(self, instance, request_indication): pass def _handle_mgmtcmd_updated(self, instance, request_indication): pass def _handle_mgmtobj_created(self, instance, request_indication): pass def _handle_mgmtobj_updated(self, instance, request_indication): filter_keyword = "TransportMgmtPolicy" filter_keyword1 = "DeviceCapability" mgmtobj_name = instance.path.split("/")[-1] if mgmtobj_name.startswith(filter_keyword): self.handle_transport_mgmt_policy(instance, mgmtobj_name) elif mgmtobj_name.startswith(filter_keyword1): self.handle_device_capability(instance, mgmtobj_name, request_indication) def handle_device_capability(self, instance, mgmtobj_name, request_indication): generate_endpoint = instance.path.split("/")[3:-2] endpoint_name = "/".join(generate_endpoint) object_name = mgmtobj_name.split("_")[0] object_id = lwm2m_reverse_dict_objects[object_name]["object_id"] object_inst_id = mgmtobj_name.split("_")[1] if "opEnable" in request_indication.resource and "opDisable" in request_indication.resource: return elif "opEnable" in request_indication.resource: res_id = 5 res_inst_id = 0 elif "opDisable" in request_indication.resource: res_id = 6 res_inst_id = 0 else: return self.send_execute_resource(endpoint_name, object_id, object_inst_id, res_id, res_inst_id) def handle_transport_mgmt_policy(self, instance, mgmtobj_name): res_value_exists = False resources_dict = {} total_dict = {} endpoint_dict = {} generate_endpoint = instance.path.split("/")[3:-2] endpoint_name = "/".join(generate_endpoint) object_name = mgmtobj_name.split("_")[0] object_id = lwm2m_reverse_dict_objects[object_name]["object_id"] object_inst_id = mgmtobj_name.split("_")[1] for key, value in instance.flex_values.iteritems(): res_name = key.split("_")[0] try: res_inst_id = key.split("_")[1] except: res_inst_id = 0 res_value = value res_id = lwm2m_reverse_dict_objects[object_name]["resource_list"][ res_name]["resId"] resources_dict.update( {res_id: { "res_inst_id": res_inst_id, "res_value": res_value }}) if res_value != "" and not res_value_exists: res_value_exists = True if res_value_exists: self.logger.info("Sending the Resource Updates to LWM2M Server") payload = json.dumps(resources_dict) content_type = "application/json" request = lwm2m_api() self.sem.acquire() client_port = self.generate_client_port() response = request.write_resource(self.lwm2m_server_ip, self.lwm2m_server_port, endpoint_name, object_id, payload, content_type, object_inst_id=object_inst_id, client_port=client_port) self.sem.release() def generate_client_port(self, ): if self.sem_counter >= 1000: self.sem_counter = 0 self.sem_counter += 1 sem_counter = self.sem_counter client_port = self.nscl_dm_adapter_client_port + sem_counter return client_port def subscribe_dm_server(self, ): self.logger.info( "Trying to subscribe to LWM2M DM Server for General Subscription") payload = json.dumps({"listener_ip": self.nscl_dm_adapter_listener_ip, "listener_port": \ self.nscl_dm_adapter_listener_port}) content_type = "application/json" request = lwm2m_api() response = request.observe_resource( self.lwm2m_server_ip, self.lwm2m_server_port, payload=payload, content_type=content_type, client_port=self.generate_client_port()) def _handle_response(response): self.logger.info( "Successfully subscribed to LWM2M DM Server for General Subscription" ) self.general_notification_token = response.token def _handle_error(*args): self.subscribe_dm_server() response.then(_handle_response, _handle_error) def subscribe_nscl(self, ): self.events.resource_created.register_handler( self._handle_mgmtobj_created, MgmtObj) self.events.resource_updated.register_handler( self._handle_mgmtobj_updated, MgmtObj) self.events.resource_created.register_handler( self._handle_mgmtcmd_created, MgmtCmd) self.events.resource_updated.register_handler( self._handle_mgmtcmd_updated, MgmtCmd) def send_discover_resources(self, ): sleep(20) self.logger.info("Sending discover request to Dm server") server_ip = self.lwm2m_server_ip server_port = self.lwm2m_server_port payload = "/.well-known/core" request = lwm2m_api() response = request.discover_resources( server_ip, server_port, payload=payload, client_port=self.generate_client_port()) discover = Discovery() payload = json.loads(response.payload) discover.display_all_resources(payload) def send_write_attributes(self, ): sleep(10) self.logger.info("Sending attributes info to DM server") server_ip = self.lwm2m_server_ip server_port = self.lwm2m_server_port endpoint_name = "emulated_device_nb_0" object_id = 3 object_inst_id = 0 res_id = 1 res_inst_id = 0 pmax = 50 pmin = 10 gt = None lt = None st = None cancel = None content_type = "application/json" payload = json.dumps({ "pmax": pmax, "pmin": pmin, "gt": gt, "lt": lt, "st": st, "cancel": cancel }) request = lwm2m_api() response = request.write_attributes( server_ip, server_port, endpoint_name, object_id, payload, content_type, object_inst_id=object_inst_id, res_id=res_id, res_inst_id=res_inst_id, client_port=self.generate_client_port()) def send_create(self, ): sleep(10) self.logger.info("Sending create info to DM server") server_ip = self.lwm2m_server_ip server_port = self.lwm2m_server_port endpoint_name = "emulated_device_nb_0" object_id = 3 object_inst_id = 4 res_id = 0 res_inst_id = 0 res_value = "fokus" res_id_res_inst_id = str(res_id) + "_" + str(res_inst_id) payload = {} res_id_res_inst_id = str(res_id) + "_" + str(res_inst_id) payload[res_id_res_inst_id] = { "res_id": res_id, "res_inst_id": res_inst_id, "res_value": res_value } content_type = "application/json" request = lwm2m_api() response = request.create_object_instance( server_ip, server_port, endpoint_name, object_id, json.dumps(payload), content_type, object_inst_id=object_inst_id, client_port=self.generate_client_port()) def send_specific_observation(self, ): sleep(15) self.logger.info("Sending specific observation to DM server") app_ip = "localhost" app_port = "1111" server_ip = self.lwm2m_server_ip server_port = self.lwm2m_server_port endpoint_name = "gscl/attachedDevices/PulseOximeter" object_id = 4200 object_inst_id = 0 res_id = 1 res_inst_id = 0 request = lwm2m_api() response = request.observe_resource( server_ip, server_port, app_ip=app_ip, app_port=app_port, endpoint_name=endpoint_name, object_id=object_id, object_inst_id=object_inst_id, res_id=res_id, res_inst_id=res_inst_id, client_port=self.generate_client_port()) def _handle_response(response): self.logger.info("response token: %s", response.token) response.then(_handle_response) def send_specific_observation1(self, ): sleep(20) self.logger.info("Sending specific observation to DM server") app_ip = "localhost" app_port = "1115" server_ip = self.lwm2m_server_ip server_port = self.lwm2m_server_port endpoint_name = "gscl_PulseOximeter" object_id = 4200 object_inst_id = 0 res_id = 0 res_inst_id = 0 request = lwm2m_api() response = request.observe_resource( server_ip, server_port, app_ip=app_ip, app_port=app_port, endpoint_name=endpoint_name, object_id=object_id, object_inst_id=object_inst_id, res_id=res_id, res_inst_id=res_inst_id, client_port=self.generate_client_port()) def _handle_response(response): self.logger.info("response token: %s", response.token) response.then(_handle_response) def send_cancel_observation(self, ): sleep(22) self.logger.info("Sending Cancel Observation to DM server") app_ip = "localhost" app_port = "1111" server_ip = self.lwm2m_server_ip server_port = self.lwm2m_server_port endpoint_name = "gscl/attachedDevices/PulseOximeter" object_id = 4200 object_inst_id = 0 res_id = 1 res_inst_id = 0 request = lwm2m_api() response = request.cancel_observe_resource( server_ip, server_port, app_ip, app_port, endpoint_name, object_id, object_inst_id=object_inst_id, res_id=res_id, res_inst_id=res_inst_id, client_port=self.generate_client_port()) def _handle_response(response): self.logger.info("response token: %s", response.token) self.logger.info("response %s", response.payload) response.then(_handle_response) def send_execute_resource(self, endpoint_name, object_id, object_inst_id, res_id, res_inst_id, payload=None): self.logger.info("Sending execution to DM server") server_ip = self.lwm2m_server_ip server_port = self.lwm2m_server_port payload = None request = lwm2m_api() response = request.execute_resource( server_ip, server_port, endpoint_name, object_id, object_inst_id, res_id, res_inst_id=res_inst_id, payload=payload, client_port=self.generate_client_port()) self.logger.info("Updating M2M Resource Tree") resources_dict = {} object_id_res_id = str(object_id) + "/" + str(res_id) if object_id_res_id in action_mapping: res_id = action_mapping[object_id_res_id]["target_res_id"] res_value = action_mapping[object_id_res_id]["target_action"] res_name = lwm2m_dict_objects[str(object_id)]["resource_list"][str( res_id)]["resName"] is_multi_inst = lwm2m_dict_objects[str( object_id)]["resource_list"][str(res_id)]["multiInst"] if not is_multi_inst: resources_dict.update({res_name: res_value}) else: resources_dict.update( {res_name + "_" + str(res_inst_id): res_value}) self.handle_m2m_server(endpoint_name, object_id, object_inst_id, res_id, res_inst_id, res_name, res_value, resources_dict)
class LocalClientCore(LoggerMixin): def __init__(self, local_listener_ip, local_listener_port, lwm2m_server_ip, lwm2m_server_port, local_client_ip, local_client_port): self.ep_location_mapping = {} self.total_resources = {} self.res_dict = {} self.lwm2m_dm_server_ip = lwm2m_server_ip self.lwm2m_dm_server_port = lwm2m_server_port self.sem = Semaphore() self.local_listener_ip = local_listener_ip self.local_listener_port = local_listener_port self.local_client_ip_ = local_client_ip self.local_client_port = local_client_port #local_client_port #self.local_client_port_end = local_client_port_end #local_client_port self.dispatcher = EventDispatcher() self.lwm2m_resources = LWM2MResourceTree(self.dispatcher) self.registration = Registration(self.lwm2m_resources) self.read = Read(self.lwm2m_resources) self.write = Write(self.lwm2m_resources) self.write_attributes = WriteAttributes(self.lwm2m_resources) self.create_object_instance = Create(self.lwm2m_resources) self.observation = ObservationNotificationEngine( self.lwm2m_resources, self.dispatcher) self.execution = Execution(self.lwm2m_resources) self.discover = Discovery(lwm2m_resources=self.lwm2m_resources) self.observation_started = False def load_dm_adapter(self, dm_adapter): self.dm_adapter = dm_adapter def create_server(self, local_listener_ip=None): """ Creates and starts a local server using Gevent DatagramServer. The server listens at the ip and port specified below. A handler is used to entertain the requests coming at that port """ if local_listener_ip is not None: self.local_listener_ip = local_listener_ip self.logger.info("Local Server Created") self.logger.info("local_listener_ip %s", self.local_listener_ip) self.logger.info("local_listener_port %s", self.local_listener_port) self.local_server = DatagramServer( (self.local_listener_ip, self.local_listener_port), self.handle_lwm2m_request) self.local_server.start() def stop_server(self, ): """ Stops the local server """ self.local_server.stop() def handle_lwm2m_request(self, message, remote): """ Handles the requests coming at the specified ip and port """ rx_record = connection.ReceptionRecord(None, message, remote) msg = rx_record.message uri_query = msg.findOption(options.UriQuery) self.process(rx_record, remote, uri_query) """ Used for Create Object Instance, Execution Operation Request """ def handle_lwm2m_post(self, msg, uri_query, remote, rx_record): method = None try: method = uri_query[0].value.split("=")[1] except: pass if method == "create": path = msg.findOption(URI_PATH_VALUE) content_type_number = msg.findOption(options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[content_type_number.value] self.create_object_instance.create_instance( path, remote, content_type, loads(msg.payload)) msg = connection.Message(connection.Message.ACK, code=constants.CREATED, payload="Resource Created") self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) elif method == "execute": path = msg.findOption(URI_PATH_VALUE) content_type_number = msg.findOption(options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[content_type_number.value] endpoint_name, object_id, object_inst_id, res_id, res_value = \ self.execution.execute_resource(path, remote, msg.payload) msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Resource Executed") self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) resource = {} resource[res_id] = {"res_value": res_value} content_type = "application/json" self.dm_adapter.update_resources(endpoint_name, object_id, object_inst_id, dumps(resource), content_type=content_type) """ It consists of Normal Update, Write Operation, Write Attribute Operation. Write Operation is used to update the resource(s) as per the request. Write Attributes operation is used to update the attributes of the object, object instance or resource. """ def handle_lwm2m_put(self, msg, remote, rx_record): uri_query = msg.findOption(options.UriQuery) method = None try: method = uri_query[0].value.split("=")[1] except: pass if method == "write": self.logger.info("Updating the Resources in the Client") path = msg.findOption(URI_PATH_VALUE) content_type_number = msg.findOption(options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[content_type_number.value] self.write.write_resource(msg.payload, path, content_type) payload_forward = msg.payload msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="CHANGED") self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) endpoint_name, object_id, object_inst_id, res_id, res_inst_id, _, _ = OperationRequest( ).find_elements(path, remote) self.dm_adapter.update_resources(endpoint_name, object_id, object_inst_id, \ payload_forward, content_type=content_type) elif method == "write_attributes": path = msg.findOption(URI_PATH_VALUE) content_type_number = msg.findOption(options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[content_type_number.value] payload = loads(msg.payload) self.write_attributes.set_attributes(path, remote, payload) msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Resource Attributes Changed") self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) """ Sets the Observation. Two types of observations. General Observation and Specific Observation. General Observation is used for anything that is not observed and updates are sent as general notifications using a general token. Specific observation is implicitly defined by the observer(as request) and handled as specific notification with a specific token """ def handle_lwm2m_observe(self, msg, remote, rx_record): path = msg.findOption(URI_PATH_VALUE) if len(path) == 1: token_id = self.set_generation_observation_params(msg) payload = "General Observation Started at the Client" content_type = "text/plain" else: self.logger.info("Specific Observation Received") endpoint_name, object_id, object_inst_id, res_id, res_inst_id, _, _ = OperationRequest( ).find_elements(path, remote) token_id = msg.token payload = msg.payload self.observation.set_observation(endpoint_name, object_id, object_inst_id, res_id, token_id, payload, self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port, res_inst_id=res_inst_id) msg = connection.Message(connection.Message.ACK, code=constants.CONTENT) self.local_server.sendto(msg._pack(rx_record.transaction_id, token_id), remote) """ Removes the observation from the List """ def handle_lwm2m_cancel_observe(self, msg, remote, rx_record): self.logger.info("Cancel Observation Request Received") path = msg.findOption(URI_PATH_VALUE) endpoint_name, object_id, object_inst_id, res_id, res_inst_id, _, _ = OperationRequest( ).find_elements(path, remote) token_id = msg.token payload = msg.payload message = self.observation.cancel_observation( endpoint_name, object_id, object_inst_id, res_id, token_id, payload, self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port, res_inst_id=res_inst_id) msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, payload=message) self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) def process(self, rx_record, remote, uri_query): """ Processes various requests like CON (POST, PUT, GET) or NON. POST requests : Generally used for Registration and Execution PUT requests : Generally used for updating the resources GET requests : Generally used for Discovery, Observation, Cancel Observation """ msg = rx_record.message self.uri_query = uri_query if msg.transaction_type == connection.Message.CON: if constants.POST == msg.code: """ Used for Registration requests, Execution Operation Request """ self.handle_lwm2m_post(msg, uri_query, remote, rx_record) elif constants.PUT == msg.code: """ It consists of Normal Update, Write Operation, Write Attribute Operation. Write Operation is used to update the resource(s) as per the request. Write Attributes operation is used to update the attributes of the object, object instance or resource. """ self.handle_lwm2m_put(msg, remote, rx_record) elif constants.GET == msg.code: """ Handles Requests like Discovery, Observation """ try: observe_value = msg.findOption(options.Observe).value except: observe_value = "" if observe_value == OBSERVE_OPTION_VALUE_OBSERVATION: """ Sets the Observation. Two types of observations. General Observation and Specific Observation. General Observation is used for anything that is not observed and updates are sent as general notifications using a general token. Specific observation is implicitly defined by the observer(as request) and handled as specific notification with a specific token """ self.handle_lwm2m_observe(msg, remote, rx_record) elif observe_value == OBSERVE_OPTION_VALUE_CANCEL_OBSERVATION: """ Removes the observation from the List """ self.handle_lwm2m_cancel_observe(msg, remote, rx_record) else: uri_query = msg.findOption(options.UriQuery) method = None try: method = uri_query[0].value.split("=")[1] except: pass if method == "discover": path = msg.findOption(URI_PATH_VALUE) payload = self.discover.get_resource(path, remote) msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, payload=dumps(payload)) self.local_server.sendto( msg._pack(rx_record.transaction_id), remote) def set_generation_observation_params(self, msg): listener_address = json.loads(msg.payload) listener_ip = listener_address["listener_ip"] listener_port = listener_address["listener_port"] token_id = msg.token self.general_observation = GeneralObservationInformation( listener_ip, listener_port, token_id) return token_id def send_client_registration(self, endpoint, local_client_port): """ Client registration request to the LWM2M server """ self.logger.info( "Preparing Client Registration parameters for LWM2M DM Server") registration_params = { "lt": self.lifetime, "lwm2m": self.version, "sms": self.sms_number, "b": self.binding_mode } client_object, response = self.registration.send_client_registration(endpoint, registration_params, self.lwm2m_dm_server_ip, \ self.lwm2m_dm_server_port, self.local_listener_ip, self.local_listener_port, \ local_client_port) self.client = client_object def _handle_response(response): location_address = response.findOption(LOCATION_VALUE)[0].value self.logger.debug( "The registered location address of Client in DM Server is %s", location_address) self.ep_location_mapping[endpoint.endpoint_name] = location_address temp_total_resources = deepcopy(self.total_resources) for ep_name, resdict in temp_total_resources.iteritems(): for mgmt_obj_id, resources in resdict.iteritems(): if self.ep_location_mapping.has_key(ep_name): self.logger.info( "Endpoint Location now available. Forwarding saved resources" ) self.send_add_resources(resources, ep_name, mgmt_obj_id) del self.total_resources[ep_name][mgmt_obj_id] if not any(self.total_resources[ep_name]): del self.total_resources[ep_name] return location_address return response.then(_handle_response) def load_registration_params(self, lifetime=None, version=None, sms_number=None, binding_mode=None): self.lifetime = lifetime self.version = version self.sms_number = sms_number self.binding_mode = binding_mode def local_registration(self, endpoint_name, local_client_port): """ Local registration of the resources in the local server """ self.logger.info("Local Registration Started for Endpoint: %s", endpoint_name) self_object = Endpoint(endpoint_name, objects=None, lifetime=self.lifetime, version=self.version, \ sms_number=self.sms_number, binding_mode=self.binding_mode, \ local_ip=self.local_client_ip_, local_port=local_client_port, \ listener_ip=self.local_listener_ip, listener_port=self.local_listener_port) endpoint = self_object.endpoint response = self.registration.register_client(endpoint) """ Sending Client Registration to the DM Server """ registration_location = self.send_client_registration( endpoint, local_client_port) return endpoint, registration_location def add_resource(self, emulated_device_name, lwm2m_mgmt_obj_id, lwm2m_resource_id, param_value, \ lwm2m_mgmt_obj_inst_id=None, lwm2m_resource_inst_id=None): self.logger.info("Adding Resources in the Resource Model") endpoint = self.lwm2m_resources.return_endpoint_object( emulated_device_name) resource_change_flag = self.lwm2m_resources.add_object_instance_resource_instance(endpoint, lwm2m_mgmt_obj_id, \ lwm2m_resource_id, param_value, object_inst_id=lwm2m_mgmt_obj_inst_id, \ res_inst_id=lwm2m_resource_inst_id) return resource_change_flag def send_add_resources(self, object_and_resources, endpoint_name, mgmt_obj_id_inst_id): if self.ep_location_mapping.has_key(endpoint_name): location_address = self.ep_location_mapping[endpoint_name] else: location_address = None if location_address == None: self.logger.warning( "Location couldn't be fetched !! Saving the Resources") self.res_dict[mgmt_obj_id_inst_id] = object_and_resources self.total_resources[endpoint_name] = self.res_dict else: self.logger.info("Sending Updates on the Resources..") path = location_address query_params = "" payload = json.dumps(object_and_resources) request = lwm2m_api() response = request.client_registration_update(self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port, \ path, query_params, payload, \ client=self.client) def send_total_resources(self, ): #Not used currently self.logger.info("Sending Updates on the Resources") path = self.location_address query_params = "" payload = json.dumps(self.total_resources) request = lwm2m_api() response = request.client_registration_update(self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port, \ path, query_params, payload, \ client_port=self.local_client_port) self.total_resources = {}
class BacnetServer(object): def __init__(self, template, template_directory, args): self.dom = etree.parse(template) device_info_root = self.dom.xpath("//bacnet/device_info")[0] name_key = device_info_root.xpath("./device_name/text()")[0] id_key = device_info_root.xpath("./device_identifier/text()")[0] vendor_name_key = device_info_root.xpath("./vendor_name/text()")[0] vendor_identifier_key = device_info_root.xpath( "./vendor_identifier/text()")[0] apdu_length_key = device_info_root.xpath( "./max_apdu_length_accepted/text()")[0] segmentation_key = device_info_root.xpath( "./segmentation_supported/text()")[0] self.thisDevice = LocalDeviceObject( objectName=name_key, objectIdentifier=int(id_key), maxApduLengthAccepted=int(apdu_length_key), segmentationSupported=segmentation_key, vendorName=vendor_name_key, vendorIdentifier=int(vendor_identifier_key), ) self.bacnet_app = None self.server = None # Initialize later logger.info("Conpot Bacnet initialized using the %s template.", template) def handle(self, data, address): session = conpot_core.get_session( "bacnet", address[0], address[1], get_interface_ip(address[0]), self.server.server_port, ) logger.info("New Bacnet connection from %s:%d. (%s)", address[0], address[1], session.id) session.add_event({"type": "NEW_CONNECTION"}) # I'm not sure if gevent DatagramServer handles issues where the # received data is over the MTU -> fragmentation if data: pdu = PDU() pdu.pduData = bytearray(data) apdu = APDU() try: apdu.decode(pdu) except DecodingError: logger.warning("DecodingError - PDU: {}".format(pdu)) return self.bacnet_app.indication(apdu, address, self.thisDevice) # send an appropriate response from BACnet app to the attacker self.bacnet_app.response(self.bacnet_app._response, address) logger.info("Bacnet client disconnected %s:%d. (%s)", address[0], address[1], session.id) def start(self, host, port): connection = (host, port) self.server = DatagramServer(connection, self.handle) # start to init the socket self.server.start() self.server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.host = self.server.server_host self.port = self.server.server_port # create application instance # not too beautiful, but the BACnetApp needs access to the socket's sendto method # this could properly be refactored in a way such that sending operates on it's own # (non-bound) socket. self.bacnet_app = BACnetApp(self.thisDevice, self.server) # get object_list and properties self.bacnet_app.get_objects_and_properties(self.dom) logger.info("Bacnet server started on: %s", (self.host, self.port)) self.server.serve_forever() def stop(self): self.server.stop()
class BacnetServer(object): def __init__(self, template, template_directory, args): self.dom = etree.parse(template) databus = conpot_core.get_databus() device_info_root = self.dom.xpath('//bacnet/device_info')[0] name_key = databus.get_value( device_info_root.xpath('./device_name/text()')[0]) id_key = device_info_root.xpath('./device_identifier/text()')[0] vendor_name_key = device_info_root.xpath('./vendor_name/text()')[0] vendor_identifier_key = device_info_root.xpath( './vendor_identifier/text()')[0] apdu_length_key = device_info_root.xpath( './max_apdu_length_accepted/text()')[0] segmentation_key = device_info_root.xpath( './segmentation_supported/text()')[0] # self.local_device_address = dom.xpath('./@*[name()="host" or name()="port"]') self.thisDevice = LocalDeviceObject( objectName=name_key, objectIdentifier=int(id_key), maxApduLengthAccepted=int(apdu_length_key), segmentationSupported=segmentation_key, vendorName=vendor_name_key, vendorIdentifier=int(vendor_identifier_key)) self.bacnet_app = None logger.info('Conpot Bacnet initialized using the %s template.', template) def handle(self, data, address): session = conpot_core.get_session('bacnet', address[0], address[1], self.host, self.port) logger.info('New Bacnet connection from %s:%d. (%s)', address[0], address[1], session.id) session.add_event({'type': 'NEW_CONNECTION'}) # I'm not sure if gevent DatagramServer handles issues where the # received data is over the MTU -> fragmentation if data: pdu = PDU() pdu.pduData = data apdu = APDU() try: apdu.decode(pdu) except DecodingError as e: logger.error("DecodingError: %s", e) logger.error("PDU: " + format(pdu)) return self.bacnet_app.indication(apdu, address, self.thisDevice) self.bacnet_app.response(self.bacnet_app._response, address) logger.info('Bacnet client disconnected %s:%d. (%s)', address[0], address[1], session.id) def start(self, host, port): self.host = host self.port = port connection = (host, port) self.server = DatagramServer(connection, self.handle) # start to init the socket self.server.start() self.server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # create application instance # not too beautifull, but the BACnetApp needs access to the socket's sendto method # this could properly be refactored in a way such that sending operates on it's own # (non-bound) socket. self.bacnet_app = BACnetApp(self.thisDevice, self.server) # get object_list and properties self.bacnet_app.get_objects_and_properties(self.dom) logger.info('Bacnet server started on: %s', connection) self.server.serve_forever() def stop(self): self.server.stop()
class UDPTransport: """ Node communication using the UDP protocol. """ def __init__( self, host, port, socket=None, protocol=None, throttle_policy=DummyPolicy()): self.protocol = protocol if socket is not None: self.server = DatagramServer(socket, handle=self.receive) else: self.server = DatagramServer((host, port), handle=self.receive) self.host = self.server.server_host self.port = self.server.server_port self.throttle_policy = throttle_policy def receive(self, data, host_port): # pylint: disable=unused-argument try: self.protocol.receive(data) except InvalidProtocolMessage as e: log.warning("Can't decode: {} (data={}, len={})".format(str(e), data, len(data))) return except RaidenShuttingDown: # For a clean shutdown return # enable debugging using the DummyNetwork callbacks DummyTransport.track_recv(self.protocol.raiden, host_port, data) def send(self, sender, host_port, bytes_): """ Send `bytes_` to `host_port`. Args: sender (address): The address of the running node. host_port (Tuple[(str, int)]): Tuple with the host name and port number. bytes_ (bytes): The bytes that are going to be sent through the wire. """ sleep_timeout = self.throttle_policy.consume(1) # Don't sleep if timeout is zero, otherwise a context-switch is done # and the message is delayed, increasing it's latency if sleep_timeout: gevent.sleep(sleep_timeout) if not hasattr(self.server, 'socket'): raise RuntimeError('trying to send a message on a closed server') self.server.sendto(bytes_, host_port) # enable debugging using the DummyNetwork callbacks DummyTransport.network.track_send(sender, host_port, bytes_) def stop(self): self.server.stop() # Calling `.close()` on a gevent socket doesn't actually close the underlying os socket # so we do that ourselves here. # See: https://github.com/gevent/gevent/blob/master/src/gevent/_socket2.py#L208 # and: https://groups.google.com/forum/#!msg/gevent/Ro8lRra3nH0/ZENgEXrr6M0J try: self.server._socket.close() except socket.error: pass def stop_accepting(self): self.server.stop_accepting() def start(self): assert not self.server.started # server.stop() clears the handle, since this may be a restart the # handle must always be set self.server.set_handle(self.receive) self.server.start()
class NodeDiscovery(BaseService, DiscoveryProtocolTransport): """ Persist the list of known nodes with their reputation """ name = 'discovery' server = None # will be set to DatagramServer default_config = dict( discovery=dict( listen_port=30303, listen_host='0.0.0.0', bootstrap_nodes=dict( cpp_bootstrap= 'enode://487611428e6c99a11a9795a6abe7b529e81315ca6aad66e2a2fc76e3adf263faba0d35466c2f8f68d561dbefa8878d4df5f1f2ddb1fbeab7f42ffb8cd328bd4a@5.1.83.226:30303', go_bootstrap= 'enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303', go_bootstrap2= 'enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303', py_bootstrap= 'enode://2676755dd8477ad3beea32b4e5a144fa10444b70dfa3e05effb0fdfa75683ebd4f75709e1f8126cb5317c5a35cae823d503744e790a3a038ae5dd60f51ee9101@144.76.62.101:30303', ).values() ), node=dict(privkey_hex='')) def __init__(self, app): BaseService.__init__(self, app) log.info('NodeDiscovery init') # man setsockopt self.protocol = DiscoveryProtocol(app=self.app, transport=self) @property def address(self): ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] return Address(ip, port) # def _send(self, address, message): # assert isinstance(address, Address) # sock = gevent.socket.socket(type=gevent.socket.SOCK_DGRAM) # sock.bind(('0.0.0.0', self.address.port)) # send from our recv port # sock.connect((address.ip, address.port)) # log.debug('sending', size=len(message), to=address) # sock.send(message) def send(self, address, message): assert isinstance(address, Address) log.debug('sending', size=len(message), to=address) try: self.server.sendto(message, (address.ip, address.udp_port)) except gevent.socket.error as e: log.critical('udp write error', errno=e.errno, reason=e.strerror) log.critical('waiting for recovery') gevent.sleep(5.) def receive(self, address, message): assert isinstance(address, Address) self.protocol.receive(address, message) def _handle_packet(self, message, ip_port): log.debug('handling packet', address=ip_port, size=len(message)) assert len(ip_port) == 2 address = Address(ip=ip_port[0], udp_port=ip_port[1]) self.receive(address, message) def start(self): log.info('starting discovery') # start a listening server ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] log.info('starting listener', port=port, host=ip) self.server = DatagramServer((ip, port), handle=self._handle_packet) self.server.start() super(NodeDiscovery, self).start() # bootstap nodes = [Node.from_uri(x) for x in self.app.config['discovery']['bootstrap_nodes']] if nodes: self.protocol.kademlia.bootstrap(nodes) def _run(self): log.debug('_run called') evt = gevent.event.Event() evt.wait() def stop(self): log.info('stopping discovery') self.server.stop() super(NodeDiscovery, self).stop()
class Server(object): def __init__(self, loggly_token, bind_ip='127.0.0.1', tcp_port=DEFAULT_TCP, udp_port=DEFAULT_UDP, fqdn=True, hostname=None, tags=None): self.loggly_token = loggly_token self.formatter = formatter.JSONFormatter(tags, hostname, fqdn) self.udp_server = DatagramServer('%s:%s' % (bind_ip, udp_port), self.udp_handle) self.tcp_server = StreamServer('%s:%s' % (bind_ip, tcp_port), self.tcp_handle) self.queue = Queue() [gevent.spawn(self.sender) for i in range(100)] logging.info('Listening on %s (udp=%s tcp=%s).', bind_ip, udp_port, tcp_port) def sender(self): while True: obj = self.queue.get() qsize = self.queue.qsize() if qsize > 100 and qsize % 100 == 0: logger.error("Queue has over %d messages", qsize) record = logging.makeLogRecord(obj) data = self.formatter.format(record, serialize=False) tags = data.pop('tags', []) if sys.version_info < (3, 0): payload = json.dumps(data) else: payload = bytes(json.dumps(data), 'utf-8') log_data = "PLAINTEXT=" + quote(payload) url = "http://logs-01.loggly.com/inputs/%s/tag/%s/" % (self.loggly_token, ','.join(tags)) while True: try: urlopen(url, log_data) break except Exception as exc: logging.error('Can\'t send message to %s: %s', url, exc) gevent.sleep(5) continue def udp_handle(self, data, address): slen = struct.unpack('>L', data[:4])[0] chunk = data[4:slen+4] try: obj = pickle.loads(chunk) except EOFError: logging.error('UDP: invalid data to pickle %s', chunk) return self.queue.put_nowait(obj) def tcp_handle(self, socket, address): fileobj = socket.makefile() while True: chunk = fileobj.read(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = fileobj.read(slen) while len(chunk) < slen: chunk = chunk + fileobj.read(slen - len(chunk)) fileobj.flush() try: obj = pickle.loads(chunk) except EOFError: logging.error('TCP: invalid data to pickle %s', chunk) break self.queue.put_nowait(obj) def start(self): self.udp_server.start() self.tcp_server.start() gevent.wait()
class NodeDiscovery(BaseService, DiscoveryProtocolTransport): """ Persist the list of known nodes with their reputation """ cpp_bootstrap = 'enode://24f904a876975ab5c7acbedc8ec26e6f7559b527c073c6e822049fee4df78f2e9c74840587355a068f2cdb36942679f7a377a6d8c5713ccf40b1d4b99046bba0@5.1.83.226:30303' go_bootstrap = 'enode://6cdd090303f394a1cac34ecc9f7cda18127eafa2a3a06de39f6d920b0e583e062a7362097c7c65ee490a758b442acd5c80c6fce4b148c6a391e946b45131365b@54.169.166.226:30303' bootstrap_nodes = [cpp_bootstrap, go_bootstrap] name = 'discovery' server = None # will be set to DatagramServer default_config = dict(discovery=dict(listen_port=30303, listen_host='0.0.0.0', bootstrap_nodes=bootstrap_nodes), node=dict(privkey_hex='')) def __init__(self, app): BaseService.__init__(self, app) log.info('NodeDiscovery init') # man setsockopt self.protocol = DiscoveryProtocol(app=self.app, transport=self) @property def address(self): ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] return Address(ip, port) # def _send(self, address, message): # assert isinstance(address, Address) # sock = gevent.socket.socket(type=gevent.socket.SOCK_DGRAM) # sock.bind(('0.0.0.0', self.address.port)) # send from our recv port # sock.connect((address.ip, address.port)) # log.debug('sending', size=len(message), to=address) # sock.send(message) def send(self, address, message): assert isinstance(address, Address) log.debug('sending', size=len(message), to=address) try: self.server.sendto(message, (address.ip, address.port)) except gevent.socket.error as e: log.critical('udp write error', errno=e.errno, reason=e.strerror) self.app.stop() def receive(self, address, message): assert isinstance(address, Address) self.protocol.receive(address, message) def _handle_packet(self, message, ip_port): log.debug('handling packet', address=ip_port, size=len(message)) assert len(ip_port) == 2 address = Address(ip=ip_port[0], port=ip_port[1]) self.receive(address, message) def start(self): log.info('starting discovery') # start a listening server ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] log.info('starting listener', port=port, host=ip) self.server = DatagramServer((ip, port), handle=self._handle_packet) self.server.start() super(NodeDiscovery, self).start() # bootstap nodes = [ Node.from_uri(x) for x in self.app.config['discovery']['bootstrap_nodes'] ] if nodes: self.protocol.kademlia.bootstrap(nodes) def _run(self): log.debug('_run called') evt = gevent.event.Event() evt.wait() def stop(self): log.info('stopping discovery') self.server.stop() super(NodeDiscovery, self).stop()
pid_file = config.get_string("Server", "pid_file", CUR_PATH + "/../run/gevent.pid") fd = open(pid_file, 'a') pid = os.getpid() fd.write("%d\n" % pid) fd.close() def serve_forever(server): put_pid_file() server.serve_forever() #get configuration server_ip = config.get_string("Server", "ip_addr", "0.0.0.0") server_port = config.get_int("Server", "port", 23620) pool_size = config.get_int("Server", "processor_pool_size", 4) processor = Processor() pool = pool.Pool(pool_size) server = DatagramServer((server_ip, server_port), processor, spawn=pool) #server.max_accpet = 10000 server.start() process_count = config.get_int("Server", "processor") #for i in range(process_count - 1): # Process(target=serve_forever, args=(server,)).start() serve_forever(server)
class NodeDiscovery(BaseService, DiscoveryProtocolTransport): """ Persist the list of known nodes with their reputation """ cpp_bootstrap = 'enode://24f904a876975ab5c7acbedc8ec26e6f7559b527c073c6e822049fee4df78f2e9c74840587355a068f2cdb36942679f7a377a6d8c5713ccf40b1d4b99046bba0@5.1.83.226:30303' go_bootstrap = 'enode://6cdd090303f394a1cac34ecc9f7cda18127eafa2a3a06de39f6d920b0e583e062a7362097c7c65ee490a758b442acd5c80c6fce4b148c6a391e946b45131365b@54.169.166.226:30303' bootstrap_nodes = [cpp_bootstrap, go_bootstrap] name = 'discovery' server = None # will be set to DatagramServer default_config = dict(discovery=dict(listen_port=30303, listen_host='0.0.0.0', bootstrap_nodes=bootstrap_nodes ), node=dict(privkey_hex='')) def __init__(self, app): BaseService.__init__(self, app) log.info('NodeDiscovery init') # man setsockopt self.protocol = DiscoveryProtocol(app=self.app, transport=self) @property def address(self): ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] return Address(ip, port) # def _send(self, address, message): # assert isinstance(address, Address) # sock = gevent.socket.socket(type=gevent.socket.SOCK_DGRAM) # sock.bind(('0.0.0.0', self.address.port)) # send from our recv port # sock.connect((address.ip, address.port)) # log.debug('sending', size=len(message), to=address) # sock.send(message) def send(self, address, message): assert isinstance(address, Address) log.debug('sending', size=len(message), to=address) try: self.server.sendto(message, (address.ip, address.port)) except gevent.socket.error as e: log.critical('udp write error', errno=e.errno, reason=e.strerror) self.app.stop() def receive(self, address, message): assert isinstance(address, Address) self.protocol.receive(address, message) def _handle_packet(self, message, ip_port): log.debug('handling packet', address=ip_port, size=len(message)) assert len(ip_port) == 2 address = Address(ip=ip_port[0], port=ip_port[1]) self.receive(address, message) def start(self): log.info('starting discovery') # start a listening server ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] log.info('starting listener', port=port, host=ip) self.server = DatagramServer((ip, port), handle=self._handle_packet) self.server.start() super(NodeDiscovery, self).start() # bootstap nodes = [Node.from_uri(x) for x in self.app.config['discovery']['bootstrap_nodes']] if nodes: self.protocol.kademlia.bootstrap(nodes) def _run(self): log.debug('_run called') evt = gevent.event.Event() evt.wait() def stop(self): log.info('stopping discovery') self.server.stop() super(NodeDiscovery, self).stop()
class UDPTransport: UDP_MAX_MESSAGE_SIZE = 1200 def __init__(self, discovery, udpsocket, throttle_policy, config): # these values are initialized by the start method self.queueids_to_queues: typing.Dict self.raiden: RaidenService self.discovery = discovery self.config = config self.retry_interval = config['retry_interval'] self.retries_before_backoff = config['retries_before_backoff'] self.nat_keepalive_retries = config['nat_keepalive_retries'] self.nat_keepalive_timeout = config['nat_keepalive_timeout'] self.nat_invitation_timeout = config['nat_invitation_timeout'] self.event_stop = Event() self.greenlets = list() self.addresses_events = dict() self.messageids_to_asyncresults = dict() # Maps the addresses to a dict with the latest nonce (using a dict # because python integers are immutable) self.nodeaddresses_to_nonces = dict() cache = cachetools.TTLCache( maxsize=50, ttl=CACHE_TTL, ) cache_wrapper = cachetools.cached(cache=cache) self.get_host_port = cache_wrapper(discovery.get) self.throttle_policy = throttle_policy self.server = DatagramServer(udpsocket, handle=self._receive) def start( self, raiden: RaidenService, queueids_to_queues: typing.List[SendMessageEvent], ): self.raiden = raiden self.queueids_to_queues = dict() # server.stop() clears the handle. Since this may be a restart the # handle must always be set self.server.set_handle(self._receive) for (recipient, queue_name), queue in queueids_to_queues.items(): encoded_queue = list() for sendevent in queue: message = message_from_sendevent(sendevent, raiden.address) raiden.sign(message) encoded = message.encode() encoded_queue.append((encoded, sendevent.message_identifier)) self.init_queue_for(recipient, queue_name, encoded_queue) self.server.start() def stop_and_wait(self): # Stop handling incoming packets, but don't close the socket. The # socket can only be safely closed after all outgoing tasks are stopped self.server.stop_accepting() # Stop processing the outgoing queues self.event_stop.set() gevent.wait(self.greenlets) # All outgoing tasks are stopped. Now it's safe to close the socket. At # this point there might be some incoming message being processed, # keeping the socket open is not useful for these. self.server.stop() # Calling `.close()` on a gevent socket doesn't actually close the underlying os socket # so we do that ourselves here. # See: https://github.com/gevent/gevent/blob/master/src/gevent/_socket2.py#L208 # and: https://groups.google.com/forum/#!msg/gevent/Ro8lRra3nH0/ZENgEXrr6M0J try: self.server._socket.close() # pylint: disable=protected-access except socket.error: pass # Set all the pending results to False for async_result in self.messageids_to_asyncresults.values(): async_result.set(False) def get_health_events(self, recipient): """ Starts a healthcheck task for `recipient` and returns a HealthEvents with locks to react on its current state. """ if recipient not in self.addresses_events: self.start_health_check(recipient) return self.addresses_events[recipient] def start_health_check(self, recipient): """ Starts a task for healthchecking `recipient` if there is not one yet. """ if recipient not in self.addresses_events: ping_nonce = self.nodeaddresses_to_nonces.setdefault( recipient, {'nonce': 0}, # HACK: Allows the task to mutate the object ) events = healthcheck.HealthEvents( event_healthy=Event(), event_unhealthy=Event(), ) self.addresses_events[recipient] = events greenlet_healthcheck = gevent.spawn( healthcheck.healthcheck, self, recipient, self.event_stop, events.event_healthy, events.event_unhealthy, self.nat_keepalive_retries, self.nat_keepalive_timeout, self.nat_invitation_timeout, ping_nonce, ) greenlet_healthcheck.name = f'Healthcheck for {pex(recipient)}' self.greenlets.append(greenlet_healthcheck) def init_queue_for( self, recipient: typing.Address, queue_name: bytes, items: typing.List[QueueItem_T], ) -> Queue_T: """ Create the queue identified by the pair `(recipient, queue_name)` and initialize it with `items`. """ queueid = (recipient, queue_name) queue = self.queueids_to_queues.get(queueid) assert queue is None queue = NotifyingQueue(items=items) self.queueids_to_queues[queueid] = queue events = self.get_health_events(recipient) greenlet_queue = gevent.spawn( single_queue_send, self, recipient, queue, self.event_stop, events.event_healthy, events.event_unhealthy, self.retries_before_backoff, self.retry_interval, self.retry_interval * 10, ) if queue_name == b'global': greenlet_queue.name = f'Queue for {pex(recipient)} - global' else: greenlet_queue.name = f'Queue for {pex(recipient)} - {pex(queue_name)}' self.greenlets.append(greenlet_queue) log.debug( 'new queue created for', node=pex(self.raiden.address), token=pex(queue_name), to=pex(recipient), ) return queue def get_queue_for( self, recipient: typing.Address, queue_name: bytes, ) -> Queue_T: """ Return the queue identified by the pair `(recipient, queue_name)`. If the queue doesn't exist it will be instantiated. """ queueid = (recipient, queue_name) queue = self.queueids_to_queues.get(queueid) if queue is None: items = () queue = self.init_queue_for(recipient, queue_name, items) return queue def send_async( self, recipient: typing.Address, queue_name: bytes, message: 'Message', ): """ Send a new ordered message to recipient. Messages that use the same `queue_name` are ordered. """ if not is_binary_address(recipient): raise ValueError('Invalid address {}'.format(pex(recipient))) # These are not protocol messages, but transport specific messages if isinstance(message, (Delivered, Ping, Pong)): raise ValueError('Do not use send for {} messages'.format(message.__class__.__name__)) messagedata = message.encode() if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE: raise ValueError( 'message size exceeds the maximum {}'.format(self.UDP_MAX_MESSAGE_SIZE), ) # message identifiers must be unique message_id = message.message_identifier # ignore duplicates if message_id not in self.messageids_to_asyncresults: self.messageids_to_asyncresults[message_id] = AsyncResult() queue = self.get_queue_for(recipient, queue_name) queue.put((messagedata, message_id)) log.debug( 'MESSAGE QUEUED', node=pex(self.raiden.address), queue_name=queue_name, to=pex(recipient), message=message, ) def maybe_send(self, recipient: typing.Address, message: Message): """ Send message to recipient if the transport is running. """ if not is_binary_address(recipient): raise InvalidAddress('Invalid address {}'.format(pex(recipient))) messagedata = message.encode() host_port = self.get_host_port(recipient) self.maybe_sendraw(host_port, messagedata) def maybe_sendraw_with_result( self, recipient: typing.Address, messagedata: bytes, message_id: typing.MessageID, ) -> AsyncResult: """ Send message to recipient if the transport is running. Returns: An AsyncResult that will be set once the message is delivered. As long as the message has not been acknowledged with a Delivered message the function will return the same AsyncResult. """ async_result = self.messageids_to_asyncresults.get(message_id) if async_result is None: async_result = AsyncResult() self.messageids_to_asyncresults[message_id] = async_result host_port = self.get_host_port(recipient) self.maybe_sendraw(host_port, messagedata) return async_result def maybe_sendraw(self, host_port: typing.Tuple[int, int], messagedata: bytes): """ Send message to recipient if the transport is running. """ # Don't sleep if timeout is zero, otherwise a context-switch is done # and the message is delayed, increasing it's latency sleep_timeout = self.throttle_policy.consume(1) if sleep_timeout: gevent.sleep(sleep_timeout) # Check the udp socket is still available before trying to send the # message. There must be *no context-switches after this test*. if hasattr(self.server, 'socket'): self.server.sendto( messagedata, host_port, ) def _receive(self, data, host_port): # pylint: disable=unused-argument try: self.receive(data) except RaidenShuttingDown: # For a clean shutdown return def receive(self, messagedata: bytes): """ Handle an UDP packet. """ # pylint: disable=unidiomatic-typecheck if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE: log.error( 'INVALID MESSAGE: Packet larger than maximum size', node=pex(self.raiden.address), message=hexlify(messagedata), length=len(messagedata), ) return message = decode(messagedata) if type(message) == Pong: self.receive_pong(message) elif type(message) == Ping: self.receive_ping(message) elif type(message) == Delivered: self.receive_delivered(message) elif message is not None: self.receive_message(message) else: log.error( 'INVALID MESSAGE: Unknown cmdid', node=pex(self.raiden.address), message=hexlify(messagedata), ) def receive_message(self, message: Message): """ Handle a Raiden protocol message. The protocol requires durability of the messages. The UDP transport relies on the node's WAL for durability. The message will be converted to a state change, saved to the WAL, and *processed* before the durability is confirmed, which is a stronger property than what is required of any transport. """ # pylint: disable=unidiomatic-typecheck if on_message(self.raiden, message): # Sending Delivered after the message is decoded and *processed* # gives a stronger guarantee than what is required from a # transport. # # Alternatives are, from weakest to strongest options: # - Just save it on disk and asynchronously process the messages # - Decode it, save to the WAL, and asynchronously process the # state change # - Decode it, save to the WAL, and process it (the current # implementation) delivered_message = Delivered(message.message_identifier) self.raiden.sign(delivered_message) self.maybe_send( message.sender, delivered_message, ) def receive_delivered(self, delivered: Delivered): """ Handle a Delivered message. The Delivered message is how the UDP transport guarantees persistence by the partner node. The message itself is not part of the raiden protocol, but it's required by this transport to provide the required properties. """ processed = ReceiveDelivered(delivered.delivered_message_identifier) self.raiden.handle_state_change(processed) message_id = delivered.delivered_message_identifier async_result = self.raiden.transport.messageids_to_asyncresults.get(message_id) # clear the async result, otherwise we have a memory leak if async_result is not None: del self.messageids_to_asyncresults[message_id] async_result.set() # Pings and Pongs are used to check the health status of another node. They # are /not/ part of the raiden protocol, only part of the UDP transport, # therefore these messages are not forwarded to the message handler. def receive_ping(self, ping: Ping): """ Handle a Ping message by answering with a Pong. """ log.debug( 'PING RECEIVED', node=pex(self.raiden.address), message_id=ping.nonce, message=ping, sender=pex(ping.sender), ) pong = Pong(ping.nonce) self.raiden.sign(pong) try: self.maybe_send(ping.sender, pong) except (InvalidAddress, UnknownAddress) as e: log.debug("Couldn't send the `Delivered` message", e=e) def receive_pong(self, pong: Pong): """ Handles a Pong message. """ message_id = ('ping', pong.nonce, pong.sender) async_result = self.messageids_to_asyncresults.get(message_id) if async_result is not None: log.debug( 'PONG RECEIVED', node=pex(self.raiden.address), sender=pex(pong.sender), message_id=pong.nonce, ) async_result.set(True) def get_ping(self, nonce: int) -> Ping: """ Returns a signed Ping message. Note: Ping messages don't have an enforced ordering, so a Ping message with a higher nonce may be acknowledged first. """ message = Ping(nonce) self.raiden.sign(message) message_data = message.encode() return message_data def set_node_network_state(self, node_address: typing.Address, node_state): state_change = ActionChangeNodeNetworkState(node_address, node_state) self.raiden.handle_state_change(state_change)
class DMServerCore(OperationRequest): def __init__(self, server_ip, server_port, client_ip, client_port): super(DMServerCore, self).__init__() self.lwm2m_dm_server_ip = server_ip self.lwm2m_dm_server_port = server_port self.local_client_ip_ = client_ip self.local_client_port_ = client_port self.sem = Semaphore() self.sem_counter = 0 self.lwm2m_resources = LWM2MResourceTree() self.registration = Registration(self.lwm2m_resources) self.execution = Execution(self.lwm2m_resources) self.discover = Discovery(lwm2m_resources=self.lwm2m_resources) self.observation = ObservationNotificationEngine(self.lwm2m_resources) self.read = Read(self.lwm2m_resources) self.write = Write(self.lwm2m_resources) self.create_object_instance = Create(self.lwm2m_resources) self.write_attributes = WriteAttributes(self.lwm2m_resources) def create_server(self, ): """ Creates and starts a LWM2M DM Server using Gevent DatagramServer. The server listens at the ip and port specified below. A handler is used to entertain the requests coming at that port """ self.dm_server = DatagramServer((self.lwm2m_dm_server_ip, \ self.lwm2m_dm_server_port), self.handle_request) self.dm_server.start() def stop_server(self, ): """ Stops the LWM2M DM Server """ self.dm_server.stop() def handle_request(self, message, remote): """ Handles the requests coming at the specified ip and port """ rx_record = connection.ReceptionRecord(None, message, remote) msg = rx_record.message uri_query = msg.findOption(options.UriQuery) self.process(rx_record, remote, uri_query) def handle_lwm2m_put(self, msg, uri_query, remote, rx_record): """ It consists of Normal Update, Write Operation, Write Attribute Operation. Write Operation is used to update the resource(s) as per the request. Write Attributes operation is used to update the attributes of the object, object instance or resource. """ method = None try: method = uri_query[0].value.split("=")[1] except: pass if method == "write": path = msg.findOption(URI_PATH_VALUE) content_type_number = msg.findOption(options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[content_type_number.value] self.write.write_resource(msg.payload, path, content_type) payload_forward = msg.payload msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Resource Updated") self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote) client_port = self.generate_client_port() self.write.forward_write_request(path, payload_forward, \ content_type, remote, client_port) elif method == "write_attributes": path = msg.findOption(URI_PATH_VALUE) content_type_number = msg.findOption(options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[content_type_number.value] payload = loads(msg.payload) self.write_attributes.set_attributes(path, remote, payload) msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Resource Attributes Updated") self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote) client_port = self.generate_client_port() self.write_attributes.forward_request(path, remote, payload, content_type, client_port) else: endpoint_location = msg.findOption(URI_PATH_VALUE)[0].value if msg.payload == "": self.logger.info("Updating the Registration Params") endpoint_object = self.lwm2m_resources.return_endpoint_object( endpoint_location=endpoint_location) endpoint_object.listener_ip = uri_query[6].value.split("=")[1] endpoint_object.local_ip = uri_query[6].value.split("=")[1] msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Resource Updated") self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote) else: self.logger.info("Adding/Updating the Resources") payload = self.update_resource( loads(msg.payload), endpoint_location=endpoint_location) msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Resource Updated") self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote) self.logger.info("Forwarding the Notification") request = lwm2m_api() client_port = self.generate_client_port() response = request.send_notification(self.general_observation.listener_ip, self.general_observation.listener_port, \ self.general_observation.token_id, payload, content_type="application/json", client_port=client_port) def update_resource(self, res_payload, endpoint_location=None, endpoint_name=None): total_res_dict = {} total_object_info = {} payload = res_payload endpoint_object = self.registration.handle_put_resource_updates( res_payload, endpoint_location=endpoint_location, endpoint_name=endpoint_name) for item, value in payload.iteritems(): resources_dict = endpoint_object.objects_dict[item][ "object"].resources_id_dict res_dict = {} for item1, value1 in resources_dict.iteritems(): res_dict.update({item1: value1["object"].res_value}) total_res_dict.update({item: {"resources": res_dict}}) total_object_info = {endpoint_object.endpoint_name: total_res_dict} return total_object_info def process(self, rx_record, remote, uri_query): """ Processes various requests like CON (POST, PUT, GET) or NON. POST requests : Generally used for Registration and Execution PUT requests : Generally used for Updating the resources GET requests : Generally used for Discovery, Observation, Cancel Observation """ msg = rx_record.message self.uri_query = uri_query if msg.transaction_type == connection.Message.CON: if constants.POST == msg.code: method = None try: method = uri_query[0].value.split("=")[1] except: pass if method == "create": path = msg.findOption(URI_PATH_VALUE) content_type_number = msg.findOption(options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[ content_type_number.value] self.create_object_instance.create_instance( path, remote, content_type, loads(msg.payload)) resources = loads(msg.payload) msg = connection.Message(connection.Message.ACK, code=constants.CREATED, payload="Resource Created") self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote) client_port = self.generate_client_port() self.create_object_instance.forward_request( path, remote, resources, content_type, client_port) elif method == "execute": path = msg.findOption(URI_PATH_VALUE) content_type_number = msg.findOption(options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[ content_type_number.value] self.execution.execute_resource(path, remote, msg.payload) execute_payload = msg.payload msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Resource Executed") self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote) client_port = self.generate_client_port() self.execution.forward_request(path, remote, execute_payload, client_port) elif method == "notify": self.logger.info("Notification Received") client_port = self.generate_client_port() for item1, item2 in loads(msg.payload).iteritems(): if item1 == "observer_ip": observer_ip = item2 elif item1 == "observer_port": observer_port = item2 elif item1 != "observer_ip" and item1 != "observer_port": endpoint_name = item1 for item3, item4 in item2.iteritems(): for item5, item6 in item4[ "resources"].iteritems(): pass res = { item3: { "resources": { item5.split("_")[0]: { "res_value": item6, "res_inst_id": item5.split("_")[1] } } } } payload = {} payload = self.update_resource(res, endpoint_name=endpoint_name) payload["observer_ip"] = observer_ip payload["observer_port"] = observer_port token_id = msg.token observe_value = msg.findOption(options.Observe).value self.logger.info("Forwarding Notification") content_type = "application/json" request = lwm2m_api() response = request.send_notification(self.general_observation.listener_ip, self.general_observation.listener_port, token_id, payload, \ content_type=content_type, time_elapse=observe_value, client_port=client_port) msg = connection.Message(connection.Message.ACK, code=constants.CREATED, payload="Notification Received") self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote) else: """ Handles the Client Registration Request """ self.logger.info( "Registering Client Endpoint in the LWM2M DM Server") endpoint = self.registration.process_registration( msg, uri_query) response = self.registration.register_client(endpoint) registered_client_location = response if registered_client_location is not None: self.logger.info( "Client Endpoint Registration Successful for Endpoint : %s", endpoint.endpoint_name) self.logger.info("The registered location is %s", registered_client_location) payload = self.set_general_observation_params() else: self.logger.info("Client Endpoint Registration Failed") msg = connection.Message( connection.Message.ACK, code=constants.CREATED, location=registered_client_location) self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote) #Send the General Observation to the Registered Client #self.send_general_observation(registered_client_location) elif constants.PUT == msg.code: """ It consists of Normal Update, Write Operation, Write Attribute Operation. Write Operation is used to update the resource(s) as per the request. Write Attributes operation is used to update the attributes of the object, object instance or resource. """ self.handle_lwm2m_put(msg, uri_query, remote, rx_record) elif constants.GET == msg.code: """ Handles Requests like Discovery, Observation """ try: observe_value = msg.findOption(options.Observe).value except: observe_value = "" if observe_value == OBSERVE_OPTION_VALUE_OBSERVATION: """ Sets the Observation. Two types of observations. General Observation and Specific Observation. General Observation is used for anything that is not observed and updates are sent as general notifications using a general token. Specific observation is implicitly defined by the observer(as request) and handled as specific notification with a specific token """ path = msg.findOption(URI_PATH_VALUE) if len(path) == 1: self.set_m2m_server_adapter_params(rx_record, remote) else: self.logger.info( "Specific Observation Request Received") content_type_number = msg.findOption( options.ContentType) if content_type_number is None: content_type = "text/plain" else: content_type = constants.media_types[ content_type_number.value] token_id = msg.token app_ip = loads(msg.payload)["app_ip"] app_port = loads(msg.payload)["app_port"] client_port = self.generate_client_port() response = self.observation.forward_request(path, remote, observe_value, \ self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port, \ app_ip, app_port, token_id, client_port) msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, \ payload="test") #todo: payload to be replaced self.dm_server.sendto( msg._pack(rx_record.transaction_id, token_id), remote) elif observe_value == OBSERVE_OPTION_VALUE_CANCEL_OBSERVATION: """ Removes the observation from the List """ self.logger.info("Cancel Observation Request Received") path = msg.findOption(URI_PATH_VALUE) token_id = msg.token app_ip = loads(msg.payload)["app_ip"] app_port = loads(msg.payload)["app_port"] client_port = self.generate_client_port() response = self.observation.forward_request(path, remote, observe_value, \ self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port, \ app_ip, app_port, token_id, client_port) def _handle_response(response): msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, payload=response) self.dm_server.sendto( msg._pack(rx_record.transaction_id), remote) response.then(_handle_response) else: method = None try: method = uri_query[0].value.split("=")[1] except: pass if method == "read": path = msg.findOption(URI_PATH_VALUE) self.read.read_resource(path, remote) msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, \ payload="info read", content_type="text/plain") self.dm_server.sendto( msg._pack(rx_record.transaction_id), remote) elif method == "discover": if msg.payload == "/.well-known/core": payload = dumps(self.discover.get_all_resources()) else: path = msg.findOption(URI_PATH_VALUE) client_port = self.generate_client_port() payload = self.discover.forward_request( path, remote, client_port) def _handle_response(payload): msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, \ payload=payload, content_type="application/json") self.dm_server.sendto( msg._pack(rx_record.transaction_id), remote) if payload is Promise: payload.then(_handle_response) else: _handle_response(payload) elif msg.transaction_type == connection.Message.NON: print "reached msg non" payload = msg.payload print payload def set_general_observation_params(self, ): return { "listener_ip": self.lwm2m_dm_server_ip, "listener_port": self.lwm2m_dm_server_port } def send_general_observation(self, registered_client_location): if registered_client_location is not None: payload = dumps(self.set_general_observation_params()) endpoint_object = self.lwm2m_resources.return_endpoint_object( endpoint_location=registered_client_location) client_listener_ip = endpoint_object.listener_ip client_listener_port = endpoint_object.listener_port request = lwm2m_api() response = request.observe_resource(client_listener_ip, client_listener_port, \ payload=payload, client_port=self.generate_client_port()) def set_m2m_server_adapter_params(self, rx_record, remote): msg = rx_record.message #content_type is application/json listener_ip = loads(msg.payload)["listener_ip"] listener_port = loads(msg.payload)["listener_port"] token_id = msg.token self.general_observation = GeneralObservationInformation( listener_ip, listener_port, token_id) response = "Observation Started on the LWM2M Server" msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, payload=response) self.dm_server.sendto(msg._pack(rx_record.transaction_id, token_id), remote) def generate_client_port(self, ): if self.sem_counter >= 1000: self.sem_counter = 0 self.sem.acquire() self.sem_counter += 1 sem_counter = self.sem_counter self.sem.release() client_port = self.local_client_port_ + sem_counter return client_port
class dmServer(Plugin): count_client = 0 discover_client_paths = [] def _init(self): self._initialized() def _start(self): self.start_observation_nscl = False self.total_clients = {} self.setting_address() self.server = DatagramServer( (self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port), self.handle_request) self.start_server() self._started() def _stop(self): self.stop_server() self._stopped() def setting_address(self, ): self.lwm2m_dm_server_ip = self.config["lwm2m_dm_server_ip"] self.lwm2m_dm_server_port = self.config["lwm2m_dm_server_port"] self.client_ip = self.config["client_ip"] self.client_port = self.config["client_port"] self.nscl_dm_adapter_listener_ip = self.config[ "nscl_dm_adapter_listener_ip"] self.nscl_dm_adapter_listener_port = self.config[ "nscl_dm_adapter_listener_port"] self.nscl_dm_adapter_client_ip = self.config[ "nscl_dm_adapter_client_ip"] self.nscl_dm_adapter_client_port = self.config[ "nscl_dm_adapter_client_port"] def handle_request(self, message, remote): rx_record = connection.ReceptionRecord(None, message, remote) msg = rx_record.message uriQuery = msg.findOption(options.UriQuery) self.process(rx_record, remote, uriQuery) def start_server(self, ): print "LWM2M Server Started" self.server.start() def stop_server(self, ): print "LWM2M Server Stopped" self.server.stop() def process(self, rx_record, remote, uriQ): position_client = 0 msg = rx_record.message self.uriQuery1 = uriQ payload_type = False if msg.transaction_type == connection.Message.CON: if constants.POST == msg.code: check_for_execute = 0 for val1 in uriQ: if str(val1).find("execute") != -1: check_for_execute = 1 if check_for_execute == 1: check_for_execute = 0 msg = connection.Message(connection.Message.ACK, code=constants.CREATED) self.server.sendto(msg._pack(rx_record.transaction_id), remote) self.execute_resource(rx_record) else: notify_list = self.client_registration( rx_record, msg, payload_type, remote) self.send_notifications_nscl_adapter(notify_list) elif constants.PUT == msg.code: check_pmax = 0 try: for val1 in uriQ: if str(val1).find("pmax") != -1: check_pmax = 1 except: pass if check_pmax == 1: self.write_attributes(rx_record) check_pmax = 0 else: pars.parse_uri_query(self.uriQuery1) pars.parse_payload(str(msg.payload), payload_type) location_address = str(str( msg.options[1]).split(":")[1]).strip() for val1 in maintain_clients: if (val1["client_ip"] == rx_record.remote[0] and val1["client_port"] == rx_record.remote[1] ) or val1["location"] == location_address: position_client = val1["client_id"] endpoint_name = val1["endPointName"] notify_list = self.total_clients[ position_client].update_mgmt_object( pars.return_parse_uri_query(), pars.return_parse_payload(), endpoint_name, self.start_observation_nscl ) #mayn't be about creating objects:: so can be calling another function if self.start_observation_nscl: self.send_notifications_nscl_adapter( notify_list) break msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Changed") self.server.sendto(msg._pack(rx_record.transaction_id), remote) elif constants.DELETE == msg.code: self.__storage = "" print 'Deleting value: %s' % (self.__storage, ) msg = connection.Message(connection.Message.ACK, code=constants.DELETED, payload='Deleting value: %s' % (self.__storage, )) #sendto line msising elif constants.GET == msg.code: try: observe_value = rx_record.message.findOption( options.Observe).value except ValueError: observe_value = None #-1 if observe_value == OBSERVE_OPTION_VALUE_OBSERVATION: if rx_record.remote[ 0] == self.nscl_dm_adapter_client_ip and rx_record.remote[ 1] == self.nscl_dm_adapter_client_port: for v in rx_record.message.findOption(URI_PATH_VALUE): self.start_observation_nscl = True self.msg_transaction_id = rx_record.transaction_id self.msg_uri_port = rx_record.message.findOption( URI_PORT_VALUE).value self.msg_uri_host = rx_record.message.findOption( URI_HOST_VALUE).value else: self.resource_observation(rx_record) elif observe_value == OBSERVE_OPTION_VALUE_CANCEL_OBSERVATION: self.cancel_observation(rx_record) elif str( rx_record.message.findOption(URI_PATH_VALUE) [0].value).find(".well-known") != -1: print "Discovered Clients .." for val4 in dmServer.discover_client_paths: print ''.join( ["/", val4["path"], "/", val4["endPointName"]]) elif str( rx_record.message.findOption(URI_PATH_VALUE) [0].value).find("rd") != -1: self.resource_discovery(rx_record) msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, payload="Request Received") self.server.sendto(msg._pack(rx_record.transaction_id), remote) elif msg.transaction_type == connection.Message.ACK: self.notifications_display(msg.payload) def myfunc(self, ): self.p = subprocess.Popen(['sh', 'recv_mp4v.sh', '34000']) #, stdout=subprocess.PIPE) self.p.communicate() def execute_resource(self, rx_record): path = [] upath = "" i = 0 for v in rx_record.message.findOption(11): print "inside execute : server:: %s" % v path.append( str(rx_record.message.findOption(URI_PATH_VALUE)[i].value)) upath += path[i] + "/" i += 1 upath = upath[:len(upath) - 1] for val2 in maintain_clients: if val2["location"] == path[1]: c_server_ip = val2["serverIPInClient"] c_server_port = val2["serverPortInClient"] break c_server_ip_port = str(c_server_ip) + ":" + str(c_server_port) #self.p.communicate() print "reached down :: after runtask" client_request.executeResource(c_server_ip_port, rx_record.message.payload, path=upath, client_port=self.client_port) print "after executeresource:: server" #self.p = subprocess.Popen(['sh', 'recv_mp4v.sh', '34000'], stdout=subprocess.PIPE) #self.api.run_task(self.p.communicate) #self.api.run_task(self.myfunc) ### #self.myfunc() print "reached last ..." def client_registration(self, rx_record, msg, payload_type, remote): temp = [] endpoint_name = rx_record.message.findOption( URI_QUERY_VALUE)[0].value.split("=")[1] server_ip_in_client = rx_record.message.findOption( URI_QUERY_VALUE)[1].value.split("=")[1] #option 15 is for UriQuery server_port_in_client = rx_record.message.findOption( URI_QUERY_VALUE)[2].value.split("=")[1] temp.append(self.uriQuery1[0]) self.uriQuery1 = temp pars.parse_uri_query(self.uriQuery1) pars.parse_payload(str(msg.payload), payload_type) pars.return_parse_payload() locationAddr = self.locID_generator(10) info_dict = { "client_ip": rx_record.remote[0], "client_port": rx_record.remote[1], "serverIPInClient": server_ip_in_client, "serverPortInClient": server_port_in_client, "client_id": dmServer.count_client, "location": locationAddr, "endPointName": endpoint_name } dmServer.discover_client_paths.append({ "path": "rd", "location": locationAddr, "endPointName": endpoint_name, "objectID": None, "objectInstID": None, "resID": None }) maintain_clients.append(info_dict) position_client = dmServer.count_client self.total_clients[position_client] = ClientCollection() dmServer.count_client += 1 msg = connection.Message(connection.Message.ACK, code=constants.CREATED, location=locationAddr) self.server.sendto(msg._pack(rx_record.transaction_id), remote) return self.total_clients[position_client].create_mgmt_objects( pars.return_parse_uri_query(), pars.return_parse_payload(), endpoint_name, self.start_observation_nscl) def send_notifications_nscl_adapter(self, notify_list): if self.start_observation_nscl: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) msg_notify = connection.Message(connection.Message.ACK, code=constants.CONTENT, payload=json.dumps(notify_list)) sock.sendto(msg_notify._pack(self.msg_transaction_id), (self.msg_uri_host, int(self.msg_uri_port))) sock.close() def notifications_display(self, notifications): print "NOTIFICATIONS ..." storeNotification = json.loads(notifications) for val2 in storeNotification: print "App. IP : %s, App. Port: %s, ObjectID : %s, Instance ID : %s, Resource Name : %s, Resource Value : %s" % ( val2["app_ip"], val2["app_port"], val2["objectID"], val2["objectInstID"], val2["resName"], val2["resValue"]) # Send to the app's ip (and port) def resource_discovery(self, rx_record): app_ip = rx_record.remote[0] app_port = rx_record.remote[1] path = [] upath = "" for v in rx_record.message.findOption(URI_PATH_VALUE): path.append(v.value) upath = "/".join(path) for val2 in maintain_clients: if val2["endPointName"] == path[1] or val2["location"] == path[1]: c_server_ip = val2["serverIPInClient"] c_server_port = val2["serverPortInClient"] count_val = val2["client_id"] break c_server_ip_port = str(c_server_ip) + ":" + str(c_server_port) payload = ''.join( ["app_ip=", str(app_ip), "&app_port=", str(app_port)]) discoveredResources = client_request.discoverResource( c_server_ip_port, path=upath, payload=payload, client_port=self.client_port) print "Discovered Resources .." for val3 in json.loads( discoveredResources.payload ): #it contains requesting application ip and port :: val3["app_ip"] and val3["app_port"] if val3.has_key("resID"): print ''.join([ "/rd/", val3["endPointName"], "/", val3["objectID"], "/", val3["objectInstID"], "/", str(val3["resID"]), " ; pmax : ", str(val3["pmax"]), " , pmin : ", str(val3["pmin"]), " , value : ", str(val3["resValue"]) ]) else: print ''.join([ "/rd/", val3["endPointName"], "/", val3["objectID"], "/", val3["objectInstID"], " ; pmax :", str(val3["pmax"]), " , pmin : ", str(val3["pmin"]) ]) def resource_observation(self, rx_record): print "OBSERVATION STARTED .." self.observeCollection = [] tempObserve = {} app_ip = rx_record.remote[0] app_port = rx_record.remote[1] #move below two lines in the init path = [] for v in rx_record.message.findOption(URI_PATH_VALUE): path.append(v.value) upath = "/".join(path) if len(path) == 3: path.append("None") path.append("None") elif len(path) == 4: path.append("None") #here client means app ip and port self.observeCollection.append({ "clientIP": app_ip, "clientPort": app_port, "endPointName": path[1], "objectID": path[2], "objectInstID": path[3], "resID": path[4] }) tempPayload = "clientIP=" + str(app_ip) + "&clientPort=" + str( app_port) for val2 in maintain_clients: if val2["endPointName"] == path[1] or val2["location"] == path[1]: c_server_ip = val2["serverIPInClient"] c_server_port = val2["serverPortInClient"] break c_server_ip_port = str(c_server_ip) + ":" + str(c_server_port) first_notification = client_request.observeResource( c_server_ip_port, path=upath, payload=tempPayload, uri_host=self.lwm2m_dm_server_ip, uri_port=self.lwm2m_dm_server_port, client_port=self.client_port) if first_notification.payload != "null": self.notifications_display(first_notification.payload) else: print "Already Exists" def cancel_observation(self, rx_record): print "CANCEL OBSERVATION .." app_ip = rx_record.remote[0] app_port = rx_record.remote[1] path = [] for v in rx_record.message.findOption(URI_PATH_VALUE): path.append( str(rx_record.message.findOption(URI_PATH_VALUE)[i].value)) upath = "/".join(path) tempObserve = {} if len(path) == 3: path.append("None") path.append("None") elif len(path) == 4: path.append("None") #here client means app ip and port tempCancelObs = "clientIP=" + str(app_ip) + "&clientPort=" + str( app_port) for val2 in maintain_clients: if val2["endPointName"] == path[1] or val2["location"] == path[1]: c_server_ip = val2["serverIPInClient"] c_server_port = val2["serverPortInClient"] break c_server_ip_port = str(c_server_ip) + ":" + str(c_server_port) client_request.cancelSubscription(c_server_ip_port, path=upath, payload=tempCancelObs, client_port=self.client_port) def write_attributes(self, rx_record): path = [] for v in rx_record.message.findOption(URI_PATH_VALUE): path.append(v.value) upath = "/".join(path) pmax = rx_record.message.findOption(URI_QUERY_VALUE)[0].value.split( "=")[1] pmin = rx_record.message.findOption(URI_QUERY_VALUE)[1].value.split( "=")[1] for val2 in maintain_clients: if val2["endPointName"] == path[1] or val2["location"] == path[1]: c_server_ip = val2["serverIPInClient"] c_server_port = val2["serverPortInClient"] break c_server_ip_port = str(c_server_ip) + ":" + str(c_server_port) query = ''.join(["pmax=", str(pmax), "&pmin=", str(pmin)]) reply = client_request.writeAttributes(c_server_ip_port, query, path=upath, client_port=self.client_port) print reply #update pmax and pmin in server too def locID_generator(self, str_size, chars=string.ascii_uppercase + string.digits): return ''.join([random.choice(chars) for _ in range(str_size)])
class PeerManager(gevent.Greenlet): default_config = dict(p2p=dict(bootstrap_nodes=[], min_peers=1, max_peers=10, num_workers=1, num_queue=10, listen_port=30303, listen_host='0.0.0.0', timeout = 10.0, # tbd pingtime = 5.0, discovery_delay = 0.1), # tbd node=dict(privkey=None, wif=None)) def __init__(self, configs=None): print('Initializing peerManager....') gevent.Greenlet.__init__(self) self.configs = configs if configs else self.default_config self.address = (self.configs['p2p']['listen_host'], int(self.configs['p2p']['listen_port'])) self.peers = [] self.server = Server(self.address, handle=self._new_conn) self.configs['node']['pubkey'] = crypto.priv2pub(priv=self.configs['node']['privkey'], wif=self.configs['node']['wif']) self.state = State.STARTING self.status_book = dict() # recv_queues for the ten types of messages self.recv_queue = [Queue() for i in range(self.configs['p2p']['num_queue'])] # for server def _new_conn(self, data, address): if self.state is State.STARTED: try: node_info = self.check_hello(data, address) except: #TODO: implement class of errors print ("New connection failed!") raise else: disagree = self.approve_connection(node_info['ID']) peer = self.create_peer(node_info) if disagree: peer.send_disconnect(disagree) else: self.status_book[peer.peerID] = ['starting'] peer.send_confirm() else: print ("Incorrect state! ", self.state) # Other functions def start(self): print ("Starting peerManager...") gevent.Greenlet.start(self) self.server.start() self.state = State.STARTED self.bootstrap() #self.discovery() gevent.sleep(5) def bootstrap(self): print ("Bootstrapping to nodes...") if self.state is State.STARTED: for node in self.configs['p2p']['bootstrap_nodes']: addr, pID = node node_info = dict(ID=pID, addr=addr) try: self.make_connection(node_info) except: print ("Connection to node: %s failed." % pID) # TODO def discovery(self): pass def make_connection(self, node_info): disagree = self.approve_connection(node_info['ID']) if not disagree: peer = self.create_peer(node_info) self.status_book[peer.peerID] = ['starting'] peer.send_hello() return True else: print("Reason: %s" % disagree) return False def broadcast(self, packet, num_peers=None, excluded=[]): valid_peers = [p for p in self.peers if p.peerID not in excluded] num_peers = num_peers if num_peers else len(valid_peers) for peer in random.sample(valid_peers, min(num_peers, len(valid_peers))): if self.state is State.STARTED: print("Sending broadcast to peer: %s" % peer.peerID) peer.send_packet(packet) def send(self, packet, peerID): if self.state is State.STARTED: peer = [p for p in self.peers if p.peerID==peerID] if peer: try: assert len(peer) == 1, "Duplicate peer in peer list! " except AssertionError as e: print (e) return print("Sending packet to specific peer: %s" % peer[0].peerID) peer[0].send_packet(packet) else: # TODO: use chord to find next peer print ("Target peer not in peer list!") return else: print ("Peermanager not started yet. Try again later.") def stop(self): print ("Stopping peerManager...") if self.state is not State.STOPPED: self.state = State.STOPPING self.server.stop() msg = "Peer stopping." for peer in self.peers: peer.send_disconnect(msg) gevent.Greenlet.kill(self) self.state = State.STOPPED # Helper functions def check_hello(self, data, address): packet = pickle.loads(data) assert isinstance(packet, Packet) try: packet.verify() except: print ("Uh oh. Problem in hello packet.") else: try: assert packet.ctrl_code == "hello", "This is not a hello packet! Received %s" %packet.ctrl_code assert packet.node['addr'] == address, "Address mismatch! Expected: %s; Got: %s" % (packet.node['addr'], address) except AssertionError as e: raise e else: node_info = packet.node return node_info def create_peer(self, node_info): peer = Peer(self, node_info) print("Peer created.") peer.link(peer_die) peer.start() self.peers.append(peer) return peer def approve_connection(self, peerID): msg = None if len(self.peers) >= self.configs['p2p']['max_peers']: msg = "Too many peers." if peerID in [p.peerID for p in self.peers]: msg = "Duplicate connection." return msg def log(self, peerID, status, reasons=None): if status: self.status_book[peerID] = ['working'] else: self.status_book[peerID] = ['disconnected', reasons] def check_status(self, peerID): return self.status_book[peerID]
class DMClient_CoAP_Server(DeviceClass): """ Handles and process the CoAP requests for Registration, Discovery, Observation/Notifications and etc """ def __init__(self, api, dm_server_ip, dm_server_port, local_server_ip, local_server_port, local_client_ip, local_client_port): super(DMClient_CoAP_Server, self).__init__() self.subscription_list = [] self.sender_info = [] self.lwm2m_dm_server_ip = dm_server_ip self.lwm2m_dm_server_port = dm_server_port self.local_server_ip = local_server_ip self.local_server_port = local_server_port self.local_client_ip = local_client_ip self.local_client_port = local_client_port #TODO: change datagram server to wrapper of coap server self.local_server = DatagramServer((self.local_server_ip, self.local_server_port), self.handle_request) self.api = api def handle_request(self, message, remote): rx_record = connection.ReceptionRecord(None, message, remote) msg = rx_record.message uriQuery = msg.findOption(options.UriQuery) self.process(rx_record, remote, uriQuery) def start_server(self, ): print "Local Server Started" self.local_server.start() def stop_server(self, ): print "Local Server Stopped" self.local_server.stop() def process(self, rx_record, remote, uriQ): """ Processes the POST, PUT and GET requests """ msg=rx_record.message self.uriQuery1 = uriQ self.payload = msg.payload global resourceString if constants.POST == msg.code: #Registration Client global server_ip_port, location_address q = rx_record.message.findOption(URI_QUERY_VALUE)[0].value #TODO: check if coap says somthing about execute messages if str(q).find("execute") != -1: print "entered in client:: execute field" self.executeResource(rx_record) msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="execution") self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) else: server_ip = self.lwm2m_dm_server_ip server_port = self.lwm2m_dm_server_port server_ip_port = server_ip + ":" + str(server_port) payload = msg.payload path = "rd?" query = rx_record.message.findOption(URI_QUERY_VALUE)[0].value.strip() #Creates objects in the local database local_lwm2m_client.create_mgmt_objects(query, payload) msg = connection.Message(connection.Message.ACK, code=constants.CREATED, payload="Client Registered") self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) #Registration in the Server res_register = client_request.clientRegistration(lwm2m_server_ip_port, path, query, payload, client_port = 5683) location_address = res_register.findOption(LOCATION_VALUE)[0].value self.storeDiscoverPaths.append({ "path" : "rd", "location" : location_address, "objectID" : None, "objectInstID" : None, "resID" : None}) elif constants.PUT == msg.code: check_pmax = 0 #pmin and pmax imposed from the dm server, upath related to an object for val1 in uriQ: if str(val1).find("pmax") != -1: check_pmax = 1 if check_pmax == 1: path = [] for v in rx_record.message.findOption(URI_PATH_VALUE): path.append(v.value) upath = "/".join(path) pmax = str(rx_record.message.findOption(URI_QUERY_VALUE)[0].value).split("=")[1] pmin = str(rx_record.message.findOption(URI_QUERY_VALUE)[1].value).split("=")[1] check_pmax = 0 filtered_resources = self.write_attributes(upath, pmax, pmin) msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload=filtered_resources) self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) else: #TODO: Write: standard exists for writing resources? self.resource_update(rx_record, self.uriQuery1, remote) elif constants.GET == msg.code: try: observe_value = rx_record.message.findOption(options.Observe).value except ValueError: observe_value = None #-1 if observe_value == OBSERVE_OPTION_VALUE_OBSERVATION: filtered_resources = self.observe_resource(rx_record, self.uriQuery1) elif observe_value == OBSERVE_OPTION_VALUE_CANCEL_OBSERVATION: filtered_resources = self.cancel_observe_resource(rx_record) elif str(rx_record.message.findOption(URI_PATH_VALUE)[0].value).find("rd") != -1: filtered_resources = self.handle_discover_request(rx_record) msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, payload=json.dumps(filtered_resources)) self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) def resource_update(self, rx_record, uriQuery1, remote): """ Updates the resources in the local database and LWM2M server """ store_query = [] for val in self.uriQuery1: splitQuery = str(str(val).split(":")[1]).strip() store_query.append({"id" : -1, "code" : str(splitQuery).split("=")[0], "value" : str(splitQuery).split("=")[1]}) split_payload = str(rx_record.message.payload).split("/") local_lwm2m_client.update_mgmt_object(split_payload[0], split_payload[1], store_query) payloadForServer = rx_record.message.payload msg = connection.Message(connection.Message.ACK, code=constants.CHANGED, payload="Resource Updated") self.local_server.sendto(msg._pack(rx_record.transaction_id), remote) # Updates the LWM2M Server path = "rd/" + location_address +"?" query = str(splitQuery).split("=")[0] + "=" + str(splitQuery).split("=")[1] payload = payloadForServer client_request.clientUpdate(lwm2m_server_ip_port, query, payload, path=path, client_port=self.local_client_port) def myfunc(self,): self.p1 = subprocess.Popen(['sh', 'webcamstart.sh', '127.0.0.1', '34000']) #, stdout = subprocess.PIPE) self.p1.communicate() def executeResource(self, rx_record): for path_element in rx_record.message.findOption(URI_PATH_VALUE): print "client: elements %s" %path_element objectID = rx_record.message.findOption(URI_PATH_VALUE)[2].value objectInstID = rx_record.message.findOption(URI_PATH_VALUE)[3].value resID = rx_record.message.findOption(URI_PATH_VALUE)[4].value print objectID, objectInstID, resID print rx_record.message.payload storeQuery = [] storeQuery.append({"id" : int(resID), "code" : "Null", "value" : int(rx_record.message.payload)}) local_lwm2m_client.update_mgmt_object(objectID, objectInstID, storeQuery) #self.p = subprocess.Popen(['sh', 'webcamstart.sh', '127.0.0.1', '34000'], stdout = subprocess.PIPE) #self.api.run_task(self.p.communicate) #self.api.run_task(self.myfunc) #self.myfunc() #print "reached down:: client" self.api.run_task(call, ["cheese"]) #call(["cheese"]) def handle_discover_request(self, rx_record): path = [] for v in rx_record.message.findOption(URI_PATH_VALUE): path.append(v.value) pload = str(rx_record.message.payload).split("&") app_ip = str(pload[0]).split("=")[1] app_port = str(pload[1]).split("=")[1] if len(path) == 2: filtered_resources = self.discover_resource(app_ip, app_port, endPoint = path[1], objectID = None, objectInstID = None, resID = None) elif len(path) == 3: filtered_resources = self.discover_resource(app_ip, app_port, endPoint = path[1], objectID = path[2], objectInstID = None, resID = None) elif len(path) == 4: filtered_resources = self.discover_resource(app_ip, app_port, endPoint = path[1], objectID = path[2], objectInstID = path[3], resID = None) elif len(path) == 5: filtered_resources = self.discover_resource(app_ip, app_port, endPoint = path[1], objectID = path[2], objectInstID = path[3], resID = path[4]) return filtered_resources def discover_resource(self, app_ip, app_port, endPoint, objectID=None, objectInstID=None, resID=None): total_result = [] if objectID is None: answer = local_lwm2m_client.return_maintain_objects() for ans in answer: total_result.append({"app_ip" : app_ip, "app_port" : app_port, "endPointName" : endPoint, "objectID" : ans["objectID"], "objectInstID" : ans["objectInstID"], "pmax" : ans["pmax"], "pmin" : ans["pmin"]}) elif objectID != None and objectInstID != None and resID == None: answer = local_lwm2m_client.return_resources(objectID, objectInstID) for ans in answer: total_result.append({"app_ip" : app_ip, "app_port" : app_port, "endPointName" : endPoint, "objectID" : objectID, "objectInstID" : objectInstID, "pmax" : ans["pmax"], "pmin" : ans["pmin"], "resID" : ans["resID"], "resValue" : ans["resValue"]}) return total_result def observe_resource(self, rx_record, query): first_notification = None uri_port = rx_record.message.findOption(URI_PORT_VALUE).value uri_host = rx_record.message.findOption(URI_HOST_VALUE).value s_list = {} sender_details = {} path = [] for v in rx_record.message.findOption(URI_PATH_VALUE): path.append(v.value) pload = str(rx_record.message.payload).split("&") app_ip = str(pload[0]).split("=")[1] app_port = str(pload[1]).split("=")[1] #check if the path includes the object instance id or just the object id if len(path) == 3: #observation to object id path.append("None") path.append("None") elif len(path) == 4: #observation to resource id path.append("None") objectID = path[2] objectInstID = path[3] resID = path[4] s_list = {"app_ip" : app_ip, "app_port" : app_port, "objectID" : objectID, "objectInstID" : objectInstID, "resID" : resID} sender_details = {"transaction_id" : rx_record.transaction_id, "uri_host" : uri_host, "uri_port" : uri_port} #store the new observation if len(self.subscription_list) == 0: self.subscription_list.append(s_list) self.sender_info.append(sender_details) first_notification = local_lwm2m_client.observe_res(self.subscription_list, s_list, self.sender_info) else: for test in self.subscription_list: if test != s_list: self.subscription_list.append(s_list) self.sender_info.append(sender_details) first_notification = local_lwm2m_client.observe_res(self.subscription_list, s_list, self.sender_info) return first_notification def cancel_observe_resource(self, rx_record): counter = 0 path = [] for v in rx_record.message.findOption(URI_PATH_VALUE): path.append(v.value) pload = str(rx_record.message.payload).split("&") app_ip = str(pload[0]).split("=")[1] app_port = str(pload[1]).split("=")[1] if len(path) == 3: path.append("None") path.append("None") elif len(path) == 4: path.append("None") objectID = path[2] objectInstID = path[3] resID = path[4] try: for val in self.subscription_list: if val["app_ip"] == app_ip and val["app_port"] == app_port and val["objectID"] == objectID: self.subscription_list.remove({"app_ip" : app_ip, "app_port" : app_port, "objectID" : objectID, "objectInstID" : objectInstID, "resID" : resID}) self.sender_info.pop(counter) counter += 1 local_lwm2m_client.cancel_obs_res(objectID, self.subscription_list) except: print "The Object ID %s is not present" %objectID return "Unsubscribed Successful" def write_attributes(self, upath, pmax, pmin): splitPath = str(upath).split("/") create_str = [] create_str.append({"attCode" : "pmax", "attValue" : pmax}) create_str.append({"attCode" : "pmin", "attValue" : pmin}) return local_lwm2m_client.write_attr(create_str, splitPath[2])
class BacnetServer(object): def __init__(self, template, template_directory, args): self.dom = etree.parse(template) databus = conpot_core.get_databus() device_info_root = self.dom.xpath('//bacnet/device_info')[0] name_key = databus.get_value(device_info_root.xpath('./device_name/text()')[0]) id_key = device_info_root.xpath('./device_identifier/text()')[0] vendor_name_key = device_info_root.xpath('./vendor_name/text()')[0] vendor_identifier_key = device_info_root.xpath( './vendor_identifier/text()')[0] apdu_length_key = device_info_root.xpath( './max_apdu_length_accepted/text()')[0] segmentation_key = device_info_root.xpath( './segmentation_supported/text()')[0] # self.local_device_address = dom.xpath('./@*[name()="host" or name()="port"]') self.thisDevice = LocalDeviceObject( objectName=name_key, objectIdentifier=int(id_key), maxApduLengthAccepted=int(apdu_length_key), segmentationSupported=segmentation_key, vendorName=vendor_name_key, vendorIdentifier=int(vendor_identifier_key) ) self.bacnet_app = None logger.info('Conpot Bacnet initialized using the %s template.', template) def handle(self, data, address): session = conpot_core.get_session('bacnet', address[0], address[1]) logger.info('New Bacnet connection from %s:%d. (%s)', address[0], address[1], session.id) session.add_event({'type': 'NEW_CONNECTION'}) # I'm not sure if gevent DatagramServer handles issues where the # received data is over the MTU -> fragmentation if data: pdu = PDU() pdu.pduData = data apdu = APDU() try: apdu.decode(pdu) except DecodingError as e: logger.error("DecodingError: %s", e) logger.error("PDU: " + format(pdu)) return self.bacnet_app.indication(apdu, address, self.thisDevice) self.bacnet_app.response(self.bacnet_app._response, address) logger.info('Bacnet client disconnected %s:%d. (%s)', address[0], address[1], session.id) def start(self, host, port): connection = (host, port) self.server = DatagramServer(connection, self.handle) # start to init the socket self.server.start() self.server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # create application instance # not too beautifull, but the BACnetApp needs access to the socket's sendto method # this could properly be refactored in a way such that sending operates on it's own # (non-bound) socket. self.bacnet_app = BACnetApp(self.thisDevice, self.server) # get object_list and properties self.bacnet_app.get_objects_and_properties(self.dom) logger.info('Bacnet server started on: %s', connection) self.server.serve_forever() def stop(self): self.server.stop()
class NodeDiscovery(BaseService, DiscoveryProtocolTransport): """ Persist the list of known nodes with their reputation """ cpp_bootstrap = 'enode://24f904a876975ab5c7acbedc8ec26e6f7559b527c073c6e822049fee4df78f2e9c74840587355a068f2cdb36942679f7a377a6d8c5713ccf40b1d4b99046bba0@5.1.83.226:30303' go_bootstrap = 'enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303' go_bootstrap2 = 'enode://7f25d3eab333a6b98a8b5ed68d962bb22c876ffcd5561fca54e3c2ef27f754df6f7fd7c9b74cc919067abac154fb8e1f8385505954f161ae440abc355855e034@54.207.93.166:30303' py_bootstrap = 'enode://f6ba1f1d9241d48138136ccf5baa6c2c8b008435a1c2bd009ca52fb8edbbc991eba36376beaee9d45f16d5dcbf2ed0bc23006c505d57ffcf70921bd94aa7a172@144.76.62.101:30303' bootstrap_nodes = [cpp_bootstrap, go_bootstrap, go_bootstrap2, py_bootstrap] name = 'discovery' server = None # will be set to DatagramServer default_config = dict(discovery=dict(listen_port=30303, listen_host='0.0.0.0', bootstrap_nodes=bootstrap_nodes ), node=dict(privkey_hex='')) def __init__(self, app): BaseService.__init__(self, app) log.info('NodeDiscovery init') # man setsockopt self.protocol = DiscoveryProtocol(app=self.app, transport=self) @property def address(self): ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] return Address(ip, port) # def _send(self, address, message): # assert isinstance(address, Address) # sock = gevent.socket.socket(type=gevent.socket.SOCK_DGRAM) # sock.bind(('0.0.0.0', self.address.port)) # send from our recv port # sock.connect((address.ip, address.port)) # log.debug('sending', size=len(message), to=address) # sock.send(message) def send(self, address, message): assert isinstance(address, Address) log.debug('sending', size=len(message), to=address) try: self.server.sendto(message, (address.ip, address.udp_port)) except gevent.socket.error as e: log.critical('udp write error', errno=e.errno, reason=e.strerror) log.critical('waiting for recovery') gevent.sleep(5.) def receive(self, address, message): assert isinstance(address, Address) self.protocol.receive(address, message) def _handle_packet(self, message, ip_port): log.debug('handling packet', address=ip_port, size=len(message)) assert len(ip_port) == 2 address = Address(ip=ip_port[0], udp_port=ip_port[1]) self.receive(address, message) def start(self): log.info('starting discovery') # start a listening server ip = self.app.config['discovery']['listen_host'] port = self.app.config['discovery']['listen_port'] log.info('starting listener', port=port, host=ip) self.server = DatagramServer((ip, port), handle=self._handle_packet) self.server.start() super(NodeDiscovery, self).start() # bootstap nodes = [Node.from_uri(x) for x in self.app.config['discovery']['bootstrap_nodes']] if nodes: self.protocol.kademlia.bootstrap(nodes) def _run(self): log.debug('_run called') evt = gevent.event.Event() evt.wait() def stop(self): log.info('stopping discovery') self.server.stop() super(NodeDiscovery, self).stop()
class UDPTransport: """ Node communication using the UDP protocol. """ def __init__( self, host, port, socket=None, protocol=None, throttle_policy=DummyPolicy()): self.protocol = protocol if socket is not None: self.server = DatagramServer(socket, handle=self.receive) else: self.server = DatagramServer((host, port), handle=self.receive) self.host = self.server.server_host self.port = self.server.server_port self.throttle_policy = throttle_policy def receive(self, data, host_port): # pylint: disable=unused-argument try: self.protocol.receive(data) except InvalidProtocolMessage as e: if log.isEnabledFor(logging.WARNING): log.warning("Can't decode: {} (data={}, len={})".format(str(e), data, len(data))) return except RaidenShuttingDown: # For a clean shutdown return # enable debugging using the DummyNetwork callbacks DummyTransport.track_recv(self.protocol.raiden, host_port, data) def send(self, sender, host_port, bytes_): """ Send `bytes_` to `host_port`. Args: sender (address): The address of the running node. host_port (Tuple[(str, int)]): Tuple with the host name and port number. bytes_ (bytes): The bytes that are going to be sent through the wire. """ sleep_timeout = self.throttle_policy.consume(1) # Don't sleep if timeout is zero, otherwise a context-switch is done # and the message is delayed, increasing it's latency if sleep_timeout: gevent.sleep(sleep_timeout) if not hasattr(self.server, 'socket'): raise RuntimeError('trying to send a message on a closed server') self.server.sendto(bytes_, host_port) # enable debugging using the DummyNetwork callbacks DummyTransport.network.track_send(sender, host_port, bytes_) def stop(self): self.server.stop() # Calling `.close()` on a gevent socket doesn't actually close the underlying os socket # so we do that ourselves here. # See: https://github.com/gevent/gevent/blob/master/src/gevent/_socket2.py#L208 # and: https://groups.google.com/forum/#!msg/gevent/Ro8lRra3nH0/ZENgEXrr6M0J try: self.server._socket.close() except socket.error: pass def stop_accepting(self): self.server.stop_accepting() def start(self): assert not self.server.started # server.stop() clears the handle, since this may be a restart the # handle must always be set self.server.set_handle(self.receive) self.server.start()
class UDPTransport: def __init__(self, discovery, udpsocket, throttle_policy, config): # these values are initialized by the start method self.queueids_to_queues: typing.Dict self.raiden: 'RaidenService' self.discovery = discovery self.config = config self.retry_interval = config['retry_interval'] self.retries_before_backoff = config['retries_before_backoff'] self.nat_keepalive_retries = config['nat_keepalive_retries'] self.nat_keepalive_timeout = config['nat_keepalive_timeout'] self.nat_invitation_timeout = config['nat_invitation_timeout'] self.event_stop = Event() self.greenlets = list() self.addresses_events = dict() # Maps the message_id to a SentMessageState self.messageids_to_asyncresults = dict() # Maps the addresses to a dict with the latest nonce (using a dict # because python integers are immutable) self.nodeaddresses_to_nonces = dict() cache = cachetools.TTLCache( maxsize=50, ttl=CACHE_TTL, ) cache_wrapper = cachetools.cached(cache=cache) self.get_host_port = cache_wrapper(discovery.get) self.throttle_policy = throttle_policy self.server = DatagramServer(udpsocket, handle=self._receive) def start(self, raiden, queueids_to_queues): self.raiden = raiden self.queueids_to_queues = dict() # server.stop() clears the handle. Since this may be a restart the # handle must always be set self.server.set_handle(self._receive) for (recipient, queue_name), queue in queueids_to_queues.items(): queue_copy = list(queue) self.init_queue_for(recipient, queue_name, queue_copy) self.server.start() def stop_and_wait(self): # Stop handling incoming packets, but don't close the socket. The # socket can only be safely closed after all outgoing tasks are stopped self.server.stop_accepting() # Stop processing the outgoing queues self.event_stop.set() gevent.wait(self.greenlets) # All outgoing tasks are stopped. Now it's safe to close the socket. At # this point there might be some incoming message being processed, # keeping the socket open is not useful for these. self.server.stop() # Calling `.close()` on a gevent socket doesn't actually close the underlying os socket # so we do that ourselves here. # See: https://github.com/gevent/gevent/blob/master/src/gevent/_socket2.py#L208 # and: https://groups.google.com/forum/#!msg/gevent/Ro8lRra3nH0/ZENgEXrr6M0J try: self.server._socket.close() # pylint: disable=protected-access except socket.error: pass # Set all the pending results to False for async_result in self.messageids_to_asyncresults.values(): async_result.set(False) def get_health_events(self, recipient): """ Starts a healthcheck taks for `recipient` and returns a HealthEvents with locks to react on its current state. """ if recipient not in self.addresses_events: self.start_health_check(recipient) return self.addresses_events[recipient] def start_health_check(self, recipient): """ Starts a task for healthchecking `recipient` if there is not one yet. """ if recipient not in self.addresses_events: ping_nonce = self.nodeaddresses_to_nonces.setdefault( recipient, {'nonce': 0}, # HACK: Allows the task to mutate the object ) events = HealthEvents( event_healthy=Event(), event_unhealthy=Event(), ) self.addresses_events[recipient] = events self.greenlets.append( gevent.spawn( healthcheck, self, recipient, self.event_stop, events.event_healthy, events.event_unhealthy, self.nat_keepalive_retries, self.nat_keepalive_timeout, self.nat_invitation_timeout, ping_nonce, )) def init_queue_for(self, recipient, queue_name, items): """ Create the queue identified by the pair `(recipient, queue_name)` and initialize it with `items`. """ queueid = (recipient, queue_name) queue = self.queueids_to_queues.get(queueid) assert queue is None queue = NotifyingQueue(items=items) self.queueids_to_queues[queueid] = queue events = self.get_health_events(recipient) self.greenlets.append( gevent.spawn( single_queue_send, self, recipient, queue, self.event_stop, events.event_healthy, events.event_unhealthy, self.retries_before_backoff, self.retry_interval, self.retry_interval * 10, )) if log.isEnabledFor(logging.DEBUG): log.debug( 'new queue created for', node=pex(self.raiden.address), token=pex(queue_name), to=pex(recipient), ) return queue def get_queue_for(self, recipient, queue_name): """ Return the queue identified by the pair `(recipient, queue_name)`. If the queue doesn't exist it will be instantiated. """ queueid = (recipient, queue_name) queue = self.queueids_to_queues.get(queueid) if queue is None: items = () queue = self.init_queue_for(recipient, queue_name, items) return queue def send_async(self, queue_name, recipient, message): """ Send a new ordered message to recipient. Messages that use the same `queue_name` are ordered. """ if not isaddress(recipient): raise ValueError('Invalid address {}'.format(pex(recipient))) # These are not protocol messages, but transport specific messages if isinstance(message, (Delivered, Ping, Pong)): raise ValueError('Do not use send for {} messages'.format( message.__class__.__name__)) messagedata = message.encode() if len(messagedata) > UDP_MAX_MESSAGE_SIZE: raise ValueError('message size exceeds the maximum {}'.format( UDP_MAX_MESSAGE_SIZE)) # message identifiers must be unique message_id = message.message_identifier # ignore duplicates if message_id not in self.messageids_to_asyncresults: self.messageids_to_asyncresults[message_id] = AsyncResult() queue = self.get_queue_for(recipient, queue_name) queue.put((messagedata, message_id)) if log.isEnabledFor(logging.DEBUG): log.debug( 'MESSAGE QUEUED', node=pex(self.raiden.address), queue_name=queue_name, to=pex(recipient), message=message, ) def maybe_send(self, recipient, message): """ Send message to recipient if the transport is running. """ if not isaddress(recipient): raise InvalidAddress('Invalid address {}'.format(pex(recipient))) messagedata = message.encode() host_port = self.get_host_port(recipient) self.maybe_sendraw(host_port, messagedata) def maybe_sendraw_with_result(self, recipient, messagedata, message_id): """ Send message to recipient if the transport is running. Returns: An AsyncResult that will be set once the message is delivered. As long as the message has not been acknowledged with a Delivered message the function will return the same AsyncResult. """ async_result = self.messageids_to_asyncresults.get(message_id) if async_result is None: async_result = AsyncResult() self.messageids_to_asyncresults[message_id] = async_result host_port = self.get_host_port(recipient) self.maybe_sendraw(host_port, messagedata) return async_result def maybe_sendraw(self, host_port, messagedata): """ Send message to recipient if the transport is running. """ # Don't sleep if timeout is zero, otherwise a context-switch is done # and the message is delayed, increasing it's latency sleep_timeout = self.throttle_policy.consume(1) if sleep_timeout: gevent.sleep(sleep_timeout) # Check the udp socket is still available before trying to send the # message. There must be *no context-switches after this test*. if hasattr(self.server, 'socket'): self.server.sendto( messagedata, host_port, ) def _receive(self, data, host_port): # pylint: disable=unused-argument try: self.receive(data) except RaidenShuttingDown: # For a clean shutdown return def receive(self, messagedata): """ Handle an UDP packet. """ # pylint: disable=unidiomatic-typecheck if len(messagedata) > UDP_MAX_MESSAGE_SIZE: log.error( 'INVALID MESSAGE: Packet larger than maximum size', node=pex(self.raiden.address), message=hexlify(messagedata), length=len(messagedata), ) return message = decode(messagedata) if type(message) == Pong: self.receive_pong(message) elif type(message) == Ping: self.receive_ping(message) elif type(message) == Delivered: self.receive_delivered(message) elif message is not None: self.receive_message(message) elif log.isEnabledFor(logging.ERROR): log.error( 'INVALID MESSAGE: Unknown cmdid', node=pex(self.raiden.address), message=hexlify(messagedata), ) def receive_message(self, message): """ Handle a Raiden protocol message. The protocol requires durability of the messages. The UDP transport relies on the node's WAL for durability. The message will be converted to a state change, saved to the WAL, and *processed* before the durability is confirmed, which is a stronger property than what is required of any transport. """ # pylint: disable=unidiomatic-typecheck if on_udp_message(self.raiden, message): # Sending Delivered after the message is decoded and *processed* # gives a stronger guarantee than what is required from a # transport. # # Alternatives are, from weakest to strongest options: # - Just save it on disk and asynchronously process the messages # - Decode it, save to the WAL, and asynchronously process the # state change # - Decode it, save to the WAL, and process it (the current # implementation) delivered_message = Delivered(message.message_identifier) self.raiden.sign(delivered_message) self.maybe_send( message.sender, delivered_message, ) def receive_delivered(self, delivered: Delivered): """ Handle a Delivered message. The Delivered message is how the UDP transport guarantees persistence by the partner node. The message itself is not part of the raiden protocol, but it's required by this transport to provide the required properties. """ processed = ReceiveDelivered(delivered.delivered_message_identifier) self.raiden.handle_state_change(processed) message_id = delivered.delivered_message_identifier async_result = self.raiden.protocol.messageids_to_asyncresults.get( message_id) # clear the async result, otherwise we have a memory leak if async_result is not None: del self.messageids_to_asyncresults[message_id] async_result.set() # Pings and Pongs are used to check the health status of another node. They # are /not/ part of the raiden protocol, only part of the UDP transport, # therefore these messages are not forwarded to the message handler. def receive_ping(self, ping): """ Handle a Ping message by answering with a Pong. """ if ping_log.isEnabledFor(logging.DEBUG): ping_log.debug( 'PING RECEIVED', node=pex(self.raiden.address), message_id=ping.nonce, message=ping, sender=pex(ping.sender), ) pong = Pong(ping.nonce) self.raiden.sign(pong) try: self.maybe_send(ping.sender, pong) except (InvalidAddress, UnknownAddress) as e: log.debug("Couldn't send the `Delivered` message", e=e) def receive_pong(self, pong): """ Handles a Pong message. """ message_id = ('ping', pong.nonce, pong.sender) async_result = self.messageids_to_asyncresults.get(message_id) if async_result is not None: if log.isEnabledFor(logging.DEBUG): log.debug( 'PONG RECEIVED', node=pex(self.raiden.address), message_id=pong.nonce, ) async_result.set(True) def get_ping(self, nonce): """ Returns a signed Ping message. Note: Ping messages don't have an enforced ordering, so a Ping message with a higher nonce may be acknowledged first. """ message = Ping(nonce) self.raiden.sign(message) message_data = message.encode() return message_data def set_node_network_state(self, node_address, node_state): state_change = ActionChangeNodeNetworkState(node_address, node_state) self.raiden.handle_state_change(state_change)