Esempio n. 1
0
class Device:
    def __init__(self):
        deviceControllerAddr = 'localhost'

        deviceControllerPort = '5555'
        clientPort = ''

        print("Device Starting\n")

        self.context = zmq.Context()   # get context

        self.loop = IOLoop.instance()

        self.clientSetup = ClientSetup(self.context)  # instantiate the ClientSetup object
#serverSetup = ServerSetup(context) # instantiate the ServerSetup object

# set up separate server and client sockets

        self.clientSocket = self.clientSetup.createClientSocket() # get a client socket



        # NOTE: setIdentity() MUST BE CALLED BEFORE clientConnect or the identity will
        # not take effect
        self.clientSetup.setIdentity(MasterId().getDevId(), self.clientSocket) # get the device id

        self.clientSetup.clientConnect(deviceControllerAddr, deviceControllerPort, self.clientSocket) # connect to server using clientSocket

        self.clientSocket = ZMQStream(self.clientSocket)
        self.clientSocket.on_recv(self.onClientRecv)
        self.messages = Messages() # instantiate a Messages object

    def onClientRecv(self,msg):
        print("on_recv, msg=", msg)
        self.cmdFrmServer = msg[0]
        self.data = msg[1]
        print("Received from DeviceController: cmd=", self.cmdFrmServer, "data=", self.data)

        self.dataList = self.messages.bufferToDict(self.data) # create a list

        print("internal list, devType={}, cmd={}, data={}, returnList={}"
        .format(self.dataList['devType'], self.dataList['cmd'], self.dataList['data'], self.dataList['returnList']))


        self.clientDevId = MasterId().getDevId()
        print("Device's id=", self.clientDevId)

    def start(self):
    #        self.periodic.start()
        cmdToServer = "001".encode()
        outDict = self.messages.createMessageDict('00', '001', 'Hello HostServer', [])
        print("initial state, sending this dictionary:", outDict)
        dataToServer = self.messages.dictToBuffer(outDict).encode()
        print('sending this output message:\n', dataToServer)
        self.clientSocket.send_multipart([cmdToServer, dataToServer])
        try:
            self.loop.start()

        except KeyboardInterrupt:
            pass
Esempio n. 2
0
class LRUQueue(object):
    def __init__(self, backend_socket, frontend_socket, clients, workers):
        self.avaliable_workers = 0
        self.workers = []
        self.worker_num = workers
        self.client_num = clients
        self.backend = ZMQStream(backend_socket)
        self.frontend = ZMQStream(frontend_socket)
        self.backend.on_recv(self.handle_backend)
        self.loop = IOLoop.instance()

    def handle_backend(self, msg):
        worker_addr, empty, client_addr = msg[:3]
        assert self.avaliable_workers < self.worker_num
        self.avaliable_workers += 1
        self.workers.append(worker_addr)
        assert empty == ''
        if client_addr != 'READY':
            empty, reply = msg[3:]
            assert empty == ''
            self.frontend.send_multipart([client_addr, '', reply])
            self.client_num -= 1
            if 0 == self.client_num:
                self.loop.add_timeout(time.time() + 1, self.loop.stop)
        if self.avaliable_workers == 1:
            self.frontend.on_recv(self.handle_frontend)

    def handle_frontend(self, msg):
        client_addr, empty, request = msg
        assert empty == ''
        self.avaliable_workers -= 1
        worker_id = self.workers.pop()
        self.backend.send_multipart([worker_id, '', client_addr, '', request])
        if self.avaliable_workers == 0:
            self.frontend.stop_on_recv()
Esempio n. 3
0
class Publisher(object):

    def __init__(self, context, pub_endpoint):

        self.context = context
        self.pub_endpoint = pub_endpoint

        socket = self.context.socket(zmq.PUB)
        ioloop = IOLoop.instance()
        self.publisher = ZMQStream(socket, ioloop)
        self.publisher.socket.setsockopt(zmq.LINGER, 0)
        self.publisher.bind(self.pub_endpoint)

        return

    def shutdown(self):

        self.publisher.socket.unbind(self.pub_endpoint)
        self.publisher.socket.close()
        self.publisher.close()
        self.publisher = None

        return

    def send(self, msg):

        logger.debug("Publisher sending: {0}".format(msg))
        self.publisher.send_multipart(msg)

        return
Esempio n. 4
0
class ZBus(object):
    def __init__(self):
        self._context = zmq.Context()
        self._callback = {}
        self._zstream = None

    @staticmethod
    def instance():
        if not hasattr(ZBus, '_instance'):
            ZBus._instance = ZBus()
        return ZBus._instance

    @staticmethod
    def initialized():
        return hasattr(ZBus, '_instance')

    def connect(self, dist):
        if self._zstream:
            self._zstream.close()
        self._zsock = self._context.socket(zmq.XREQ)
        self._zsock.connect('tcp://{dist}'.format(dist=dist))
        self._zstream = ZMQStream(self._zsock)
        self._zstream.on_recv(self.on_recv)

    def send(self, request, callback):
        self._callback[request.seed_id] = callback
        self._zstream.send_multipart(request.box())

    def on_recv(self, frame):
        response = ZResponse(frame)
        callback = self._callback.pop(response.seed_id) if self._callback.get(response.seed_id) else None
        if callback and callable(callback):
            callback(response)
Esempio n. 5
0
class WebSocketZMQBridgeHandler(websocket.WebSocketHandler):
    def open(self, *args, **kwargs):
        self.currentMessage = []
        self.__endpoint = 'tcp://localhost:224'
        socket = zmq.Context().socket(zmq.DEALER)
        self.__stream = ZMQStream(socket, IOLoop.current())
        self.__stream.on_recv(self.__onReceive)
        self.__stream.socket.setsockopt(zmq.LINGER, 0)
        self.__stream.connect(self.__endpoint)

    def on_close(self, *args, **kwargs):
        self.__stream.close()

    def on_message(self, message):
        hasMore = message[0]
        self.currentMessage.append(message[1:])
        if not hasMore:
            sendingMessage = self.currentMessage
            self.currentMessage = []
            self.__stream.send_multipart(sendingMessage)

    def __onReceive(self, msg):
        for frame in msg[:-1]:
            self.write_message(b'\x01' + frame, binary=True)
        self.write_message(b'\x00' + msg[-1], binary=True)
Esempio n. 6
0
class ZBus(object):
    def __init__(self):
        self._context = zmq.Context()
        self._callback = {}
        self._zstream = None

    @staticmethod
    def instance():
        if not hasattr(ZBus, '_instance'):
            ZBus._instance = ZBus()
        return ZBus._instance

    @staticmethod
    def initialized():
        return hasattr(ZBus, '_instance')

    def connect(self, dist):
        if self._zstream:
            self._zstream.close()
        self._zsock = self._context.socket(zmq.XREQ)
        self._zsock.connect('tcp://{dist}'.format(dist=dist))
        self._zstream = ZMQStream(self._zsock)
        self._zstream.on_recv(self.on_recv)

    def send(self, request, callback):
        self._callback[request.seed_id] = callback
        self._zstream.send_multipart(request.box())

    def on_recv(self, frame):
        response = ZResponse(frame)
        callback = self._callback.pop(response.seed_id) if self._callback.get(
            response.seed_id) else None
        if callback and callable(callback):
            callback(response)
Esempio n. 7
0
class LRUQueue(object):
    def __init__(self, backend_socket, frontend_socket, clients, workers):
        self.avaliable_workers = 0
        self.workers = []
        self.worker_num = workers
        self.client_num = clients
        self.backend = ZMQStream(backend_socket)
        self.frontend = ZMQStream(frontend_socket)
        self.backend.on_recv(self.handle_backend)
        self.loop = IOLoop.instance()

    def handle_backend(self, msg):
        worker_addr, empty, client_addr = msg[:3]
        assert self.avaliable_workers < self.worker_num
        self.avaliable_workers += 1
        self.workers.append(worker_addr)
        assert empty == ""
        if client_addr != "READY":
            empty, reply = msg[3:]
            assert empty == ""
            self.frontend.send_multipart([client_addr, "", reply])
            self.client_num -= 1
            if 0 == self.client_num:
                self.loop.add_timeout(time.time() + 1, self.loop.stop)
        if self.avaliable_workers == 1:
            self.frontend.on_recv(self.handle_frontend)

    def handle_frontend(self, msg):
        client_addr, empty, request = msg
        assert empty == ""
        self.avaliable_workers -= 1
        worker_id = self.workers.pop()
        self.backend.send_multipart([worker_id, "", client_addr, "", request])
        if self.avaliable_workers == 0:
            self.frontend.stop_on_recv()
Esempio n. 8
0
class DeviceServicePublisher(object):

    def __init__(self, context, publisher_endpoint):
        self.context = context
        self.publisher_endpoint = publisher_endpoint

        socket = self.context.socket(zmq.PUB)
        ioloop = IOLoop.instance()
        self.publisher = ZMQStream(socket, ioloop)
        self.publisher.socket.setsockopt(zmq.LINGER, 0)
        self.publisher.bind(self.publisher_endpoint)

        return

    def shutdown(self):
        self.publisher.socket.close()
        self.publisher.close()
        self.publisher = None

        return

    def send(self, msg, topic='all'):

        pub_msg = []
        pub_msg.append(topic)
        pub_msg.append(msg)
        self.publisher.send_multipart(pub_msg)
        return
Esempio n. 9
0
File: w.py Progetto: themoo/Jelly
class TheWorker(object):
    def __init__(self, context):
        #context = zmq.Context(1)
        self.frontend = ZMQStream(worker_socket(context))
        self.frontend.on_recv(self.handle_frontend)

        self.liveness = HEARTBEAT_LIVENESS
        self.heartbeat = HEARTBEAT_INTERVAL
        self.interval = INTERVAL_INIT
        self.loop = IOLoop.instance()

        self.time = self.interval * self.heartbeat
        self.heartbeat_at = time.time() + self.heartbeat * HEARTBEAT_LIVENESS        
        self.callback = None
        self.timed_out = False

        self.start()

    def start(self):
        self.loop.add_timeout(time.time()+self.heartbeat, self.send_heartbeat)
        # try:
        #     IOLoop.instance().start()
        # except KeyboardInterrupt:
        #     times_str('ctrlc')


    def send_heartbeat(self):
        if time.time() > self.heartbeat_at:
            self.time *= 2 if self.time < INTERVAL_MAX else 1
            times_str('W: Timed out.. Retrying in {} seconds..'.format(self.time))
            self.callback = self.loop.add_timeout(time.time()+self.time*1, self.send_heartbeat)
            self.timed_out = True
            return
        
        times_str('W: Sending Heartbeat..')
        self.frontend.send(PPP_HEARTBEAT)
        self.loop.add_timeout(time.time()+self.heartbeat, self.send_heartbeat)

    def handle_frontend(self,msg):
        m = msg[:]
        if len(m) == 1:
            times_str('W: Received Heartbeat')
            if self.timed_out:
                self.loop.add_timeout(time.time()+self.heartbeat, self.send_heartbeat)
                self.timed_out = False
                self.loop.remove_timeout(self.callback)
                self.time = self.interval * self.heartbeat
        elif len(m) == 3:
            times_str('Received: '+str(m))
            time.sleep(10)
            times_str('Sending it back..')
            self.frontend.send_multipart(m)
        self.heartbeat_at = time.time() + self.heartbeat * HEARTBEAT_LIVENESS
    def test_that_creating_mgmt_works(self):

        ctx = zmq.Context()
        io_loop = IOLoop.instance()

        def stop_looping(_msg):
            io_loop.stop()

        settings = Settings()
        settings.ZEROMQ_MASTER_PUSH = 'inproc://spyder-zmq-master-push'
        settings.ZEROMQ_WORKER_PROC_FETCHER_PULL = \
            settings.ZEROMQ_MASTER_PUSH
        settings.ZEROMQ_MASTER_SUB = 'inproc://spyder-zmq-master-sub'
        settings.ZEROMQ_WORKER_PROC_EXTRACTOR_PUB = \
            settings.ZEROMQ_MASTER_SUB

        settings.ZEROMQ_MGMT_MASTER = 'inproc://spyder-zmq-mgmt-master'
        settings.ZEROMQ_MGMT_WORKER = 'inproc://spyder-zmq-mgmt-worker'

        pubsocket = ctx.socket(zmq.PUB)
        pubsocket.bind(settings.ZEROMQ_MGMT_MASTER)
        pub_stream = ZMQStream(pubsocket, io_loop)

        subsocket = ctx.socket(zmq.SUB)
        subsocket.setsockopt(zmq.SUBSCRIBE, "")
        subsocket.bind(settings.ZEROMQ_MGMT_WORKER)
        sub_stream = ZMQStream(subsocket, io_loop)

        mgmt = workerprocess.create_worker_management(settings, ctx, io_loop)
        mgmt.add_callback(ZMQ_SPYDER_MGMT_WORKER, stop_looping)
        mgmt.start()

        def assert_quit_message(msg):
            self.assertEqual(ZMQ_SPYDER_MGMT_WORKER_QUIT_ACK, msg.data)

        sub_stream.on_recv(assert_quit_message)

        death = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER,
                data=ZMQ_SPYDER_MGMT_WORKER_QUIT)
        pub_stream.send_multipart(death.serialize())

        io_loop.start()

        mgmt._out_stream.close()
        mgmt._in_stream.close()
        mgmt._publisher.close()
        mgmt._subscriber.close()
        pub_stream.close()
        pubsocket.close()
        sub_stream.close()
        subsocket.close()
        ctx.term()
    def test_that_creating_mgmt_works(self):

        ctx = zmq.Context()
        io_loop = IOLoop.instance()

        def stop_looping(_msg):
            io_loop.stop()

        settings = Settings()
        settings.ZEROMQ_MASTER_PUSH = 'inproc://spyder-zmq-master-push'
        settings.ZEROMQ_WORKER_PROC_FETCHER_PULL = \
            settings.ZEROMQ_MASTER_PUSH
        settings.ZEROMQ_MASTER_SUB = 'inproc://spyder-zmq-master-sub'
        settings.ZEROMQ_WORKER_PROC_EXTRACTOR_PUB = \
            settings.ZEROMQ_MASTER_SUB

        settings.ZEROMQ_MGMT_MASTER = 'inproc://spyder-zmq-mgmt-master'
        settings.ZEROMQ_MGMT_WORKER = 'inproc://spyder-zmq-mgmt-worker'

        pubsocket = ctx.socket(zmq.PUB)
        pubsocket.bind(settings.ZEROMQ_MGMT_MASTER)
        pub_stream = ZMQStream(pubsocket, io_loop)

        subsocket = ctx.socket(zmq.SUB)
        subsocket.setsockopt(zmq.SUBSCRIBE, "")
        subsocket.bind(settings.ZEROMQ_MGMT_WORKER)
        sub_stream = ZMQStream(subsocket, io_loop)

        mgmt = workerprocess.create_worker_management(settings, ctx, io_loop)
        mgmt.add_callback(ZMQ_SPYDER_MGMT_WORKER, stop_looping)
        mgmt.start()

        def assert_quit_message(msg):
            self.assertEqual(ZMQ_SPYDER_MGMT_WORKER_QUIT_ACK, msg.data)

        sub_stream.on_recv(assert_quit_message)

        death = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER,
                            data=ZMQ_SPYDER_MGMT_WORKER_QUIT)
        pub_stream.send_multipart(death.serialize())

        io_loop.start()

        mgmt._out_stream.close()
        mgmt._in_stream.close()
        mgmt._publisher.close()
        mgmt._subscriber.close()
        pub_stream.close()
        pubsocket.close()
        sub_stream.close()
        subsocket.close()
        ctx.term()
Esempio n. 12
0
class _Messenger(object):

    def __init__(self, in_sock, out_sock, context, io_loop=None):
        self._context = context
        self._io_loop = io_loop or IOLoop.instance()

        self._create_socket(in_sock, out_sock)
        self._in_stream = ZMQStream(self._in_socket, io_loop)
        self._out_stream = ZMQStream(self._out_socket, io_loop)

        self._callbacks = defaultdict(list)

    def _create_socket(self, in_sock, out_sock):
        raise NotImplementedError()

    def start(self):
        self._in_stream.on_recv(self._on_receive)

    def stop(self):
        self._in_stream.stop_on_recv()
#        self._publish(CTRL_MSG_WORKER, None, CTRL_MSG_WORKER_QUIT_ACK)
#
    def close(self):
        self._in_stream.close()
        self._in_socket.close()
        self._out_stream.close()
        self._out_socket.close()

    def _on_receive(self, zmq_msg):
        msg = CtrlMessage.deserialize(zmq_msg)

        if msg.topic in self._callbacks:
            for callback in self._callbacks[msg.topic]:
                callback(msg)

#        if msg.data == CTRL_MSG_WORKER_QUIT:
#            self.stop()

    def add_callback(self, topic, callback):
        self._callbacks[topic].append(callback)

    def remove_callback(self, topic, callback):
        if topic in self._callbacks and callback in self._callbacks[topic]:
            self._callbacks[topic].remove(callback)

    def publish(self, topic, identity, data):
        msg = CtrlMessage(topic, identity, data)
        self._out_stream.send_multipart(msg.serialize())
class LRUQueue(object):
    """LRUQueue class using ZMQStream/IOLoop for event dispatching"""
    def __init__(self, backend_socket, frontend_socket):
        self.available_workers = 0
        self.workers = []
        self.client_nbr = NBR_CLIENTS

        self.backend = ZMQStream(backend_socket)
        self.frontend = ZMQStream(frontend_socket)
        self.backend.on_recv(self.handle_backend)

        self.loop = IOLoop.instance()

    def handle_backend(self, msg):
        # Queue worker address for LRU routing
        worker_addr, empty, client_addr = msg[:3]

        assert self.available_workers < NBR_WORKERS

        self.available_workers += 1
        self.workers.append(worker_addr)

        assert empty == b""

        if client_addr != b"READY":
            empty, reply = msg[3:]

            assert empty == b""
            self.frontend.send_multipart([client_addr, b'', reply])
            self.client_nbr -= 1
            if self.client_nbr == 0:
                self.loop.add_timeout(time.time() + 1, self.loop.stop)

        if self.available_workers == 1:
            self.frontend.on_recv(self.handle_frontend)

    def handle_frontend(self, msg):
        client_addr, empty, request = msg
        assert empty == b""

        self.available_workers -= 1
        worker_id = self.workers.pop()

        self.backend.send_multipart([worker_id, b'', client_addr, b'', request])

        if self.available_workers == 0:
            self.frontend.stop_on_recv()
class ZMQBaseSocket(object):
    """
    Base socket class for zmq sockets.
    """
    def __init__(self, socket, endpoint):
        """
        :param socket: ZeroMQ socket.
        :type socket: zmq.Context.socket
        :param endpoint: Endpoint to bind or connect the socket to.
        :type endpoint: str
        """
        self._socket = socket
        self._endpoint = endpoint

    def run(self):
        """
        Base method that connects or binds a socket to self._endpoint.
        Sub classes must override this method.
        """
        raise NotImplementedError(self.run)

    def wrap_zmqstream(self):
        """
        Wraps self._socket into ZMQStream socket.
        """
        self._socket = ZMQStream(self._socket)

    def register_handler(self, method, callback, *args, **kwargs):
        """
        Registers a callback for a particular method.

        :param method: The method to which the callback will be attached.
        :type method: str
        :param callback: The callback method.
        :type callback: function
        """
        if isinstance(self._socket, ZMQStream):
            getattr(self._socket, method)(callback)
        else:
            raise TypeError("Not a ZMQStream socket.")

    def send(self, msg):
        """
        Wrapper over "socket.send_multipart()".

        :param msg: Message to be sent.
        :type msg: list

        :return: Return Value of send_multipart()
        :rtype: None or MessageTracker
        """
        # Performing type checking as it is fairly easy to just send a
        # str in param msg.
        if not isinstance(msg, list):
            raise TypeError("param msg expected of <type 'list'>. Found %s."
                            % (type(msg)))
        return self._socket.send_multipart(msg)

    def recv(self):
        """
        Wrapper over "socket.recv_multipart()".

        :return: Received message.
        :rtype: list
        """
        return self._socket.recv_multipart()

    def close(self):
        """
        Wrapper to close the socket.
        """
        self._socket.close()
Esempio n. 15
0
class MNWorker(MN_object):
    """Class for the MN worker side.

    Thin encapsulation of a zmq.DEALER socket.
    Provides a send method with optional timeout parameter.

    Will use a timeout to indicate a broker failure.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param endpoint:   endpoint to connect to.
    :type endpoint:    str
    :param service:    the name of the service we support.
    :type service:     byte-string
    """

    _proto_version = b'MNPW01'  # worker protocol version

    def __init__(self, context, endpoint, service, worker_type, address,
                 protocols):
        """Initialize the MNWorker.
        """
        self.context = context
        self.endpoint = endpoint
        self.service = service
        self.type = worker_type
        self.address = address
        self.protocols = protocols
        self.envelope = None
        self.HB_RETRIES = HB_RETRIES
        self.HB_INTERVAL = HB_INTERVAL
        self._data = {}
        self.stream = None
        self._tmo = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        self.ticker = None
        self._delayed_cb = None
        self._create_stream()
        _LOG.info("Worker initialized and can be found at '%s'" % endpoint)
        return

    def _create_stream(self):
        """Helper to create the socket and the stream.
        """
        socket = self.context.socket(zmq.DEALER)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.connect(self.endpoint)
        self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL)
        self._send_ready()
        self.ticker.start()
        return

    def _send_ready(self):
        """Helper method to prepare and send the workers READY message.
        """
        _LOG.debug("Informing broker I am ready")
        ready_msg = [
            b'', WORKER_PROTO, MSG_READY, self.service, self.type,
            self.address, self.protocols
        ]
        if self.stream.closed():
            self.shutdown()
        self.stream.send_multipart(ready_msg)
        self.curr_retries = self.HB_RETRIES
        return

    def _tick(self):
        """Method called every HB_INTERVAL milliseconds.
        """
        self.curr_retries -= 1
        self.send_hb()
        if self.curr_retries >= 0:
            return
        # connection seems to be dead
        self.shutdown()
        # try to recreate it
        # self._delayed_cb = IOLoop.call_later(self._create_stream, 5000)
        # self._delayed_cb = IOLoop.add_timeout(self._create_stream, 5000)
        self._delayed_cb = DelayedCallback(self._create_stream,
                                           self.HB_INTERVAL)
        self._delayed_cb.start()
        return

    def send_hb(self):
        """Construct and send HB message to broker.
        """
        _LOG.debug("Sending heartbeat")
        msg = [b'', WORKER_PROTO, MSG_HEARTBEAT]
        if self.stream.closed():
            self.shutdown()
        self.stream.send_multipart(msg)
        return

    def shutdown(self):
        """Method to deactivate the worker connection completely.

        Will delete the stream and the underlying socket.
        """
        if self.ticker:
            self.ticker.stop()
            self.ticker = None
        if not self.stream:
            return
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        return

    def reply(self, msg):
        """Send the given message.

        :param msg:    full message to send.
        :type msg:     can either be a byte-string or a list of byte-strings
        """
        if self.need_handshake:
            raise ConnectionNotReadyError()
        to_send = self.envelope
        self.envelope = None
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        if self.stream.closed():
            self.shutdown()
        self.stream.send_multipart(to_send)
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        :param msg:    a list w/ the message parts
        :type msg:     a list of byte-strings
        """
        _LOG.debug("Received: %s." % msg)
        # 1st part is empty
        msg.pop(0)
        # 2nd part is protocol version
        proto = msg.pop(0)
        if proto != WORKER_PROTO:
            # ignore message from not supported protocol
            pass
        # 3rd part is message type
        msg_type = msg.pop(0)
        # XXX: hardcoded message types!
        # any message resets the retries counter
        self.need_handshake = False
        self.curr_retries = self.HB_RETRIES
        if msg_type == MSG_DISCONNECT:  # disconnect
            _LOG.info("Broker wants us to disconnect.")
            self.curr_retries = 0  # reconnect will be triggered by hb timer
        elif msg_type == MSG_QUERY:  # request
            # remaining parts are the user message
            _LOG.debug("Received new request: %s." % msg)
            envelope, msg = split_address(msg)
            envelope.append(b'')
            envelope = [b'', WORKER_PROTO, MSG_REPLY] + envelope  # reply
            self.envelope = envelope
            self.on_request(msg)
        else:
            # invalid message
            # ignored
            _LOG.debug('ignoring message with invalid id')
            pass
        return

    def on_request(self, msg):
        """Public method called when a request arrived.

        :param msg:    a list w/ the message parts
        :type msg:     a list of byte-strings

        Must be overloaded to provide support for various services!
        """
        pass
Esempio n. 16
0
class Broker(object):
    """This is implementation of broker

    You don't need to override any methods in this class. It works immediately.
    Just call start_listening() method

    :type context:    Context
    :param context:   instance of zmq.Context
    :param endpoint:  listening address
    :type endpoint:   str
    """

    def __init__(self, context, endpoint):
        socket = context.socket(zmq.ROUTER)
        socket.bind(endpoint)
        self.stream = ZMQStream(socket)
        self.stream.on_recv(self.on_message)

        # services, workers and multicast groups
        self._workers = {}
        self._services = {}
        self._multicasts = {}
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()
        return

    def start_listening(self):
        """Start listening to new messages
        """
        IOLoop.instance().start()

    def stop_listening(self):
        """Stop listening
        """
        IOLoop.instance().stop()

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        return_addresses, msg = self.split_address(msg)
        # dispatch on first frame after path
        method_to_call = None
        try:
            t = msg.pop(0)
            if t.startswith(b'MDPW'):
                method_to_call = self.on_worker
            elif t.startswith(b'MDPC'):
                method_to_call = self.on_client
            else:
                # Unknown protocol
                pass
        except (AttributeError, IndexError):
            # Wrong incoming msg format
            pass
        if method_to_call is not None:
            method_to_call(return_addresses, msg)
        return

    def on_client(self, return_addresses, message):
        """Method called on client message.

        Frame 0 of msg is the command id
        Frame 1 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        If the service name starts with `multicast.`, the message is sent
        to all worker in that group.

        :param return_addresses:    return address stack
        :type return_addresses:     list of str
        :param message:   message parts
        :type message:    list of str

        :rtype: None
        """

        cmd = message.pop(0)  # always 0x01
        service = message.pop(0)

        # mmi requests
        if service.startswith(b'mmi.'):
            self.on_client_mmi(return_addresses, service, message)
            return

        # multicast requests
        if service.startswith(b'multicast.'):
            self.on_client_multicast(return_addresses, service, message)
            return

        # worker requests
        try:
            available_workers = self._services[service]
            random_worker = choice(available_workers)  # TODO: loadbalancing
            to_send = [random_worker.id, b'', MDP_WORKER_VERSION, b'\x02']
            to_send.extend(return_addresses)
            to_send.append(b'')
            to_send.extend(message)
            self.stream.send_multipart(to_send)

        except KeyError:
            # unknwon service
            self.client_response(return_addresses, b'broker', b'No worker available', error=True)
        return

    def on_client_multicast(self, return_addresses, service, message):
        """Handling multicast messages from client

        :param return_addresses:    return address stack
        :type return_addresses:     list of str
        :param service:   name of mmi service
        :type service:    str
        :param message:   message parts
        :type message:    list of str
        """
        target = service[10:]  # remove 'multicast.'
        try:
            # first, prepare list of workers in target multicast
            grouped_by_names = {}
            for worker in self._multicasts[target]:
                if worker.service in grouped_by_names:
                    grouped_by_names[worker.service].append(worker)
                else:
                    grouped_by_names[worker.service] = [worker]

            # send message to one worker per service
            sent_messages = []
            for name, workers in grouped_by_names.items():
                random_worker = choice(workers)  # TODO: loadbalancing
                to_send = [random_worker.id, b'', MDP_WORKER_VERSION, b'\x02']
                to_send.extend(return_addresses)
                to_send.append(b'')
                to_send.extend(message)
                self.stream.send_multipart(to_send)
                sent_messages.append(random_worker.service)

            # notify client with list of services in multicast group
            client_msg = return_addresses[:]
            client_msg.extend([b'', MDP_WORKER_VERSION, b'\x05'])
            client_msg.extend(sent_messages)
            self.stream.send_multipart(client_msg)
        except KeyError:
            # unknwon service
            self.client_response(return_addresses, b'broker', b'No services available in this multicast', error=True)
        return

    def on_client_mmi(self, return_addresses, service, message):
        """Handling MMI messages from client

        :param return_addresses:    return address stack
        :type return_addresses:     list of str
        :param service:   name of mmi service
        :type service:    str
        :param message:   message parts
        :type message:    list of str
        """
        if service == b'mmi.service':
            return self.on_client_mmi_service(return_addresses, service, message)
        elif service == b'mmi.services':
            return self.on_client_mmi_services(return_addresses, service, message)
        elif service == b'mmi.workers':
            return self.on_client_mmi_workers(return_addresses, service, message)
        elif service == b'mmi.multicasts':
            return self.on_client_mmi_multicasts(return_addresses, service, message)
        else:
            # unknown mmi service - notify client
            self.client_response(return_addresses, b'broker', b'Service not found', error=True)

    def on_client_mmi_service(self, return_addresses, service, message):
        """Check if services exists
        """
        return self.client_response_pack(return_addresses, b'broker', message[0] in self._services.keys())

    def on_client_mmi_services(self, return_addresses, service, message):
        """List of all services
        """
        return self.client_response_pack(return_addresses, b'broker', [k for k in self._services])

    def on_client_mmi_workers(self, return_addresses, service, message):
        """Number of workers per service
        """
        s = {}
        for se in self._services:
            s[se] = len(self._services[se])
        return self.client_response_pack(return_addresses, b'broker', s)

    def on_client_mmi_multicasts(self, return_addresses, service, message):
        """List of available multicast groups
        """
        m = {}
        for se in self._multicasts:
            m[se] = [s.service for s in self._multicasts[se]]
        return self.client_response_pack(return_addresses, b'broker', m)

    def on_worker(self, return_addresses, message):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        cmd = message.pop(0)

        worker_cmds = {
            b'\x01': self.on_worker_ready,
            b'\x03': self.on_worker_partial_reply,
            b'\x04': self.on_worker_final_reply,
            b'\x05': self.on_worker_heartbeat,
            b'\x06': self.on_worker_disconnect,
            b'\x07': self.on_worker_multicast_add,  # this is not part of the Majordomo Protocol 0.2 !
            b'\x08': self.on_worker_exception,  # this is not part of the Majordomo Protocol 0.2 !
            b'\x09': self.on_worker_error,  # this is not part of the Majordomo Protocol 0.2 !
        }
        if cmd in worker_cmds:
            fnc = worker_cmds[cmd]
            fnc(return_addresses, message)
        else:
            # ignore unknown command
            pass
        return

    def on_worker_ready(self, return_addresses, message):
        """Called when new worker is ready to receive messages.

        Register worker to list of available services.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        # Frame 0 of msg is a service.
        service = message.pop(0)
        wid = return_addresses[0]
        return self.register_worker(wid, service)

    def on_worker_partial_reply(self, return_addresses, message):
        """Process worker PARTIAL REPLY command.

        Route the `message` to the client given by the address(es) in front of `message`.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            wrep = self._workers[ret_id]
        except KeyError:
            return  # worker is gone, ignore this message

        return_addresses, msg = self.split_address(message)

        self.client_response(return_addresses, wrep.service, msg, partial=True)
        return

    def on_worker_final_reply(self, return_addresses, message):
        """Process worker FINAL REPLY command.

        Route the `message` to the client given by the address(es) in front of `message`.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            wrep = self._workers[ret_id]
        except KeyError:
            return  # worker is gone, ignore this message

        return_addresses, msg = self.split_address(message)

        self.client_response(return_addresses, wrep.service, msg)
        return

    def on_worker_heartbeat(self, return_addresses, message):
        """Process worker HEARTBEAT command.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            # ignore HB for unknown worker
            pass
        return

    def on_worker_disconnect(self, return_addresses, message):
        """Process worker DISCONNECT command.

        Remove worker from list of services.

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        wid = return_addresses[0]
        return self.unregister_worker(wid)

    def on_worker_multicast_add(self, return_addresses, message):
        """Process worker MULTICAST ADD command.

        Add worker to list of multicasts
        This is not part of the Majordomo Protocol 0.2 !

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        multicast_name = message.pop(0)
        wid = return_addresses[0]
        return self.register_multicast(wid, multicast_name)

    def on_worker_exception(self, return_addresses, message):
        """Process worker EXCEPTION command.

        Route the `message` to the client given by the address(es) in front of `message`.
        This is not part of the Majordomo Protocol 0.2 !

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            wrep = self._workers[ret_id]
        except KeyError:
            return  # worker is gone, ignore this message

        return_addresses, msg = self.split_address(message)

        self.client_response(return_addresses, wrep.service, msg, exception=True)
        return

    def on_worker_error(self, return_addresses, message):
        """Process worker ERROR command.

        Route the `message` to the client given by the address(es) in front of `message`.
        This is not part of the Majordomo Protocol 0.2 !

        :param return_addresses:  return address stack
        :type return_addresses:   list of str
        :param message: message parts
        :type message:  list of str

        :rtype: None
        """
        ret_id = return_addresses[0]
        try:
            wrep = self._workers[ret_id]
        except KeyError:
            return  # worker is gone, ignore this message

        return_addresses, msg = self.split_address(message)

        self.client_response(return_addresses, wrep.service, msg, error=True)
        return

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """
        for wrep in self._workers.values():
            if not wrep.is_alive():
                self.on_log_event('worker.connection_timeout', "Worker connection timeout for service '%s'." % wrep.service)
                self.unregister_worker(wrep.id)
        return

    def client_response(self, return_addresses, service, msg, partial=False, exception=False, error=False):
        """Package and send reply to client.

        :param return_addresses:       return address stack
        :type return_addresses:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str | str

        :rtype: None
        """
        to_send = return_addresses[:]
        if error:
            t = b'\x04'
        elif exception:
            t = b'\x06'
        elif partial:
            t = b'\x02'
        else:
            t = b'\x03'
        to_send.extend([b'', MDP_WORKER_VERSION, t, service])
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self.stream.send_multipart(to_send)
        return

    def client_response_pack(self, return_addresses, service, msg, partial=False):
        """Send message to client and pack it (msg) in msgpack format

        Exception and error messages are not allowed here.

        :param return_addresses:       return address stack
        :type return_addresses:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message to pack and send
        :type msg:       mixed
        :param partial:  if message is partial of final, default False
        :type partial:   bool

        :rtype: None
        """
        packed = msgpack.Packer().pack(msg)
        self.client_response(return_addresses, service, packed, partial=partial)

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        try:
            wrep = self._workers[wid]
        except KeyError:
            # not registered, ignore
            return
        wrep.shutdown()
        service = wrep.service

        # remove worker from service list
        if service in self._services:
            worker_list = self._services[service]
            for worker in worker_list:
                if worker.id == wid:
                    worker_list.remove(worker)
            if not worker_list:
                del self._services[service]

        # remove worker from multicasts
        for m_name in self._multicasts:
            mw = self._multicasts[m_name]
            for w in [w for w in mw if w.id == wid]:
                mw.remove(w)

        # delete empty rows
        empty_keys = [k for k, v in self._multicasts.items() if len(v) == 0]
        for k in empty_keys:
            del self._multicasts[k]

        del self._workers[wid]
        self.on_log_event('worker.unregister', "Worker for service '%s' disconnected." % service)
        return

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """
        if wid in self._workers:
            return
        worker = WorkerRep(wid, service, self.stream)
        self._workers[wid] = worker

        if service in self._services:
            s = self._services[service]
            s.append(worker)
        else:
            self._services[service] = [worker]
        self.on_log_event('worker.register', "Worker for service '%s' is connected." % service)
        return

    def register_multicast(self, wid, multicast_name):
        """Add worker to multicast group

        :type wid:       str
        :param wid:      the worker id.
        :type multicast_name:  str
        :param multicast_name: group name
        """
        if wid not in self._workers:
            return
        worker = self._workers[wid]
        if multicast_name in self._multicasts:
            m = self._multicasts[multicast_name]
            m.append(worker)
        else:
            self._multicasts[multicast_name] = [worker]
        self.on_log_event('worker.register_multicast', "Service '%s' added to multicast group '%s'." % (worker.service, multicast_name))
        return

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        self.stream.on_recv(None)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.socket.close()
        self.stream.close()
        self.stream = None

        self._workers = {}
        self._services = {}
        self._multicasts = {}
        return

    def on_log_event(self, event, message):
        """Override this method if you want to log events from broker

        :type event:    str
        :param event:   event type - used for filtering
        :type message:  str
        :param message: log message

        :rtype: None
        """
        pass

    # helpers:
    def split_address(self, msg):
        """Function to split return Id and message received by ROUTER socket.

        Returns 2-tuple with return Id and remaining message parts.
        Empty frames after the Id are stripped.
        """
        ret_ids = []
        for i, p in enumerate(msg):
            if p:
                ret_ids.append(p)
            else:
                break
        return ret_ids, msg[i + 1:]
Esempio n. 17
0
class TornadoConnection(YakDBConnectionBase):
    """
    A tornado IOLoop-based connection variant that uses a DEALER connection
    with YakDB request IDs to support parallel requests without having to wait.
    
    This class does not have special documentation. See YakDB.Connection for details
    on function parameters. The purpose of this behaviour is to avoid having to update
    duplicate content.
    In addition to the YakDB.Connection arguments, simply supply a callback function.
    It will be called with the result of the operation (or without parameter if the
    operation has no result).

    Also note that exception-based error reporting in this class
    does not allow to backtrace the caller because the receipt handler
    is called from the IO loop. This might be fixed in the future.
    """
    def __init__(self, endpoints, context=None):
        YakDBConnectionBase.__init__(self, context=context)
        self.useDealerMode()
        self.connect(endpoints)
        self.requests = {} #Maps request ID to callback
        self.nextRequestId = 0
        self.stream = ZMQStream(self.socket)
        self.stream.on_recv(self.__recvCallback)
    def scan(self, tableNo, callback, startKey=None, endKey=None, limit=None, keyFilter=None, valueFilter=None, skip=0, invert=False, mapData=False):
        requestId = self.__newRequest(callback, {"mapData": mapData})
        msgParts = [""] + YakDBConnectionBase.buildScanRequest(self, tableNo, startKey, endKey, limit, keyFilter, valueFilter, skip, invert, requestId=requestId)
        self.stream.send_multipart(msgParts)
    def read(self, tableNo, keys, callback, mapKeys=False):
        requestId = self.__newRequest(callback, {"keys": (keys if mapKeys else None), "mapKeys": mapKeys})
        msgParts = [""] + YakDBConnectionBase.buildReadRequest(self, tableNo, keys, requestId)
        self.stream.send_multipart(msgParts)
    def __newRequest(self, callback, params={}):
        """Setup mapping for a new request. Returns the new request ID."""
        self.nextRequestId += 1
        self.requests[self.nextRequestId] = (callback, params)
        return struct.pack("<I", self.nextRequestId)
    def __recvCallback(self, msg):
        #DEALER response contains empty delimiter!
        if len(msg[0]) != 0:
            print >>sys.stderr, "Received malformed message: ", msg
            return
        msg = msg[1:]
        #Currently we don't check the response type
        YakDBConnectionBase._checkHeaderFrame(msg)
        #Struct unpack yields 1-element tuple!
        headerFrame = msg[0]
        assert(len(headerFrame) == 8) #4 bytes response + 4 bytes request ID
        requestId = struct.unpack("<I", YakDBConnectionBase._extractRequestId(headerFrame))[0]
        callback, params = self.requests[requestId]
        #Postprocess, depending on request type.
        responseType = headerFrame[2]
        dataFrames = msg[1:]
        if responseType == "\x13": #Scan
            if params["mapData"]:
                data = YakDBConnectionBase(dataFrames)
            else:
                data = YakDBConnectionBase._mapScanToTupleList(dataFrames)
        elif responseType == "\x10": #Read
            if params["mapKeys"]:
                data = YakDBConnectionBase._mapReadKeyValues(params["keys"], dataFrames)
            else:
                data = dataFrames
        else:
            raise YakDBProtocolException("Received correct response, but cannot handle response code %d" % ord(responseType))
        #Cleanup
        del self.requests[requestId]
        #Call original callback
        callback(data)
Esempio n. 18
0
class Master(object):
    """
        Broker for asynchronous interaction,
        But I would like to call it Master!!!
    """
    def __init__(self, url_worker, url_client, batch_size,
                 estimator_update_callable):

        context = zmq.Context()
        frontend = context.socket(zmq.ROUTER)
        frontend.bind(url_client)
        backend = context.socket(zmq.ROUTER)
        backend.bind(url_worker)

        self.available_workers = 0
        self.workers = []

        self.batch_size = batch_size
        self.estimator_update = estimator_update_callable

        self.backend = ZMQStream(backend)
        self.frontend = ZMQStream(frontend)
        self.backend.on_recv(self.handle_backend)

        self.loop = IOLoop.instance()

    def handle_backend(self, msg):
        # Queue worker address for LRU routing
        worker_addr, empty, client_addr = msg[:3]

        # add worker back to the list of workers
        self.available_workers += 1
        self.workers.append(worker_addr)

        # Third frame is READY or else a client reply address
        # If client reply, send rest back to frontend
        if client_addr != b"READY":
            empty, reply = msg[3:]
            self.frontend.send_multipart([client_addr, b'', reply])

        if self.available_workers == 1:
            # on first recv, start accepting frontend messages
            self.frontend.on_recv(self.handle_frontend)

    def handle_frontend(self, msg):
        # Now get next client request, route to LRU worker
        # Client request is [address][empty][request]
        client_addr, empty, request = msg
        request = msgpack.loads(request)
        if request[0] == 'reset':
            state = request[1]
            msg = [b'', client_addr, b'', msgpack.dumps([state])]
            self.worker_send(msg)
        elif request[0] == 'step':
            t = Transition(*request[1:])
            self.update(t)

            if t.done:
                self.frontend.send_multipart([client_addr, b'', b'reset'])
            else:
                msg = [b'', client_addr, b'', msgpack.dumps([t.next_state])]
                self.worker_send(msg)

    def worker_send(self, msg):
        #  Dequeue and drop the next worker address
        self.available_workers -= 1
        worker_id = self.workers.pop(0)

        self.backend.send_multipart([worker_id] + msg)
        if self.available_workers == 0:
            # stop receiving until workers become available again
            self.frontend.stop_on_recv()
Esempio n. 19
0
class GlinAppZmqPublisher:
    """Publishes state changes via ZeroMQ Push Socket"""
    def __init__(self, app, ctx, port=6606):
        self.app = app
        self.ctx = ctx
        self.publisher = self.ctx.socket(zmq.PUB)
        self.publisher.bind("tcp://*:" + str(port))
        self.snapshot = ctx.socket(zmq.ROUTER)
        self.snapshot.bind("tcp://*:" + str(port+2))
        self.snapshot = ZMQStream(self.snapshot)
        self.snapshot.on_recv(self.handle_snapshot)
        self.sequence_number = 0

    def publish_brightness(self, brightness):
        """publish changed brightness"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.brightness(self.sequence_number, brightness))
        return self.sequence_number
    def publish_mainswitch_state(self, state):
        """publish changed mainswitch state"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.mainswitch_state(self.sequence_number, state))
        return self.sequence_number
    def publish_active_scene(self, scene_id):
        """publish changed active scene"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.scene_active(self.sequence_number, scene_id))
        return self.sequence_number
    def publish_scene_add(self, scene_id, animation_id, name, color, velocity, config):
        """publish added scene"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.scene_add(self.sequence_number, scene_id, animation_id, name, color, velocity, config))
        return self.sequence_number
    def publish_scene_remove(self, scene_id):
        """publish the removal of a scene"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.scene_remove(self.sequence_number, scene_id))
        return self.sequence_number
    def publish_scene_name(self, scene_id, name):
        """publish a changed scene name"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.scene_name(self.sequence_number, scene_id, name))
        return self.sequence_number
    def publish_scene_config(self, scene_id, config):
        """publish a changed scene configuration"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.scene_config(self.sequence_number, scene_id, config))
        return self.sequence_number
    def publish_scene_color(self, scene_id, color):
        """publish a changed scene color"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.scene_color(self.sequence_number, scene_id, color))
        return self.sequence_number
    def publish_scene_velocity(self, scene_id, velocity):
        """publish a changed scene velovity"""
        self.sequence_number += 1
        self.publisher.send_multipart(msgs.MessageBuilder.scene_velocity(self.sequence_number, scene_id, velocity))
        return self.sequence_number

    def handle_snapshot(self, msg):
        """Handles a snapshot request"""
        logging.debug("Sending state snapshot request")
        identity = msg[0]
        self.snapshot.send_multipart([identity] + msgs.MessageBuilder.mainswitch_state(self.sequence_number, self.app.state.mainswitch))
        self.snapshot.send_multipart([identity] + msgs.MessageBuilder.brightness(self.sequence_number, self.app.state.brightness))
        for animation_id, anim  in enumerate(self.app.state.animationClasses):
            self.snapshot.send_multipart([identity] + msgs.MessageBuilder.animation_add(self.sequence_number, animation_id, anim.name))
        for scene_id, scene in self.app.state.scenes.items():
            self.snapshot.send_multipart([identity] + msgs.MessageBuilder.scene_add(
                self.sequence_number, scene_id, scene.animation_id, scene.name, scene.color, scene.velocity, scene.config))
        self.snapshot.send_multipart([identity] + msgs.MessageBuilder.scene_active(
            self.sequence_number, 0 if self.app.state.activeSceneId is None else self.app.state.activeSceneId))
Esempio n. 20
0
class Client(object):

    def __init__(self, context, endpoint, service):
        """Initialize the Client.
        """
        socket = context.socket(zmq.DEALER)
        self.ioloop = IOLoop.instance()
        self.service = service.encode('utf-8')
        self.endpoint = endpoint
        self.stream = ZMQStream(socket, self.ioloop)
        self.stream.on_recv(self._on_message)
        self.can_send = True
        self._proto_prefix = [MDP_CLIENT_VERSION, b'\x01', service.encode('utf-8')]
        self._tmo = None
        self.timed_out = False
        self.multicast_services = None
        self.multicast_already_received = []
        socket.connect(endpoint)
        return

    def shutdown(self):
        """Method to deactivate the client connection completely.

        Will delete the stream and the underlying socket.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        if not self.stream:
            return
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        return

    def request(self, msg, timeout=None):
        """Send the given message.

        :param msg:     message parts to send.
        :type msg:      list of str
        :param timeout: time to wait in milliseconds.
        :type timeout:  int | float

        :rtype None:
        """
        if not self.can_send:
            raise InvalidStateError()
        try:
            if type(msg) in (bytes, unicode):  # python 2
                msg = [msg]
        except NameError:
            if type(msg) == bytes:  # python 3
                msg = [msg]
        # prepare full message
        to_send = [b'']
        to_send.extend(self._proto_prefix[:])
        to_send.extend(msg)
        self.stream.send_multipart(to_send)
        self.can_send = False
        self.multicast_services = None
        if timeout:
            self._start_timeout(timeout)
        return

    def rpc(self, method, args=None, kwargs=None, timeout=None):
        """Call RPC method

        :param method:  name of method
        :type method:   str
        :param args:    list of args
        :type args:     list
        :param kwargs:  list of kwargs
        :type kwargs:   dict
        :param timeout: time to wait in milliseconds.
        :type timeout:  int | float

        :rtype None:
        """
        data = [method.encode('utf-8'), msgpack.packb([] if args is None else args), msgpack.packb({} if kwargs is None else kwargs)]
        return self.request(data, timeout)

    def _on_message(self, message):
        """Helper method called on message receive.

        :param message:   list of message parts.
        :type message:    list of str
        """
        message.pop(0) # remove empty string
        protocol_version = message.pop(0)
        if protocol_version != MDP_WORKER_VERSION:  # version check, ignore old versions
            return
        message_type = message.pop(0)

        if message_type == b'\x02':  # partial message
            worker = message.pop(0)
            try:
                msg = msgpack.unpackb(message[0])
            except:
                msg = message[0]

            self.on_partial_message(worker, msg)
        elif message_type == b'\x03':  # final message
            worker = message.pop(0)
            try:
                msg = msgpack.unpackb(message[0])
            except:
                msg = message[0]
            self.on_message(worker, msg)

            if message[0] not in self.multicast_already_received:
                self.multicast_already_received.append(worker)
            if self.multicast_services is None or len(self.multicast_already_received) == len(self.multicast_services):
                self.stop_waiting()
        elif message_type == b'\x04':  # error message
            worker = message.pop(0)
            self.on_error_message(worker, message[0])
            self.stop_waiting()
        elif message_type == b'\x05':  # multicast start, load list of multicast services
            self.multicast_services = message
            self.multicast_already_received = []
            self.on_multicast_start(message)
        elif message_type == b'\x06':  # exception message
            worker = message.pop(0)
            try:
                msg = msgpack.unpackb(message[0])
            except:
                msg = {b'class': 'Exception', b'message': 'parsing failed', b'traceback': message[0]}
            self.on_exception_message(worker, msg[b'class'], msg[b'message'], msg[b'traceback'])

            if message[0] not in self.multicast_already_received:
                self.multicast_already_received.append(worker)
            if self.multicast_services is None or len(self.multicast_already_received) == len(self.multicast_services):
                self.stop_waiting()
        #else: unknown type - do nothing
        return

    def wait_for_reply(self):
        """Start waiting for replies
        """
        self.ioloop.start()

    def stop_waiting(self):
        """Stop waiting for replies and cancel timeout
        """
        self.ioloop.stop()
        self.can_send = True
        self._stop_timeout()

    def _on_timeout(self):
        """Helper called after timeout.
        """
        self.timed_out = True
        self._tmo = None
        self.ioloop.stop()
        self.on_timeout()
        return

    def _start_timeout(self, timeout):
        """Helper for starting the timeout.

        :param timeout:  the time to wait in milliseconds.
        :type timeout:   int
        """
        self._tmo = DelayedCallback(self._on_timeout, timeout)
        self._tmo.start()
        return

    def _stop_timeout(self):
        """Helper for stopping timeout event
        """
        if self._tmo:
            self._tmo.stop()
            self._tmo = None

    def on_message(self, service, msg):
        """Public method called when a message arrived.

        .. note:: Does nothing. Should be overloaded!

        :param service:  name of the worker
        :type service:   str
        :param msg:      message
        :type msg:       mixed
        """
        pass

    def on_partial_message(self, service, msg):
        """Public method called when a partial message arrived.

        .. note:: Does nothing. Should be overloaded!

        :param service:  name of the worker
        :type service:   str
        :param msg:      message
        :type msg:       mixed
        """
        pass

    def on_error_message(self, worker, msg):
        """Public method called when an error message arrived.

        .. note:: Does nothing. Should be overloaded!

        :param service:  name of the worker
        :type service:   str
        :param msg:      message
        :type msg:       str
        """
        pass

    def on_exception_message(self, service, cls, message, traceback):
        """Public method called when an exception arrived.

        .. note:: Does nothing. Should be overloaded!

        :param service:  name of the worker
        :type service:   str
        :param cls:      exception class name
        :type cls:       str
        :param message:  error message
        :type message:   str
        :param traceback: traceback
        :type traceback:  str
        """
        pass

    def on_timeout(self):
        """Public method called when a timeout occured.

        .. note:: Does nothing. Should be overloaded!
        """
        pass

    def on_multicast_start(self, services):
        """Public method called when multicast request started

        .. note:: Does nothing. Should be overloaded!

        :param services:  list of services in multicast group
        :type services:   list
        """
        pass
Esempio n. 21
0
class ZMQApplicationProxy(object):
    """A proxy for a ZeroMQ based ZMQApplication that is using ZMQHTTPRequest.

    This class is a proxy for a backend that is running a
    ZMQApplication and MUST be used with the ZMQHTTPRequest class. This
    version sends the reply parts (each generated by RequestHandler.flush) as
    a single multipart message for low latency replies. See
    ZMQStreamingApplicationProxy, for a version that has higher latency, but
    which sends each reply part as a separate zmq message.
    """

    def __init__(self, loop=None, context=None):
        self.loop = loop if loop is not None else IOLoop.instance()
        self.context = context if context is not None else zmq.Context.instance()
        self._callbacks = {}
        self.socket = self.context.socket(zmq.DEALER)
        self.stream = ZMQStream(self.socket, self.loop)
        self.stream.on_recv(self._handle_reply)
        self.urls = []

    def connect(self, url):
        """Connect the service client to the proto://ip:port given in the url."""
        self.urls.append(url)
        self.socket.connect(url)

    def bind(self, url):
        """Bind the service client to the proto://ip:port given in the url."""
        self.urls.append(url)
        self.socket.bind(url)

    def send_request(self, request, args, kwargs, handler, timeout):
        """Send a request to the service."""
        req = {}
        req['method'] = request.method
        req['uri'] = request.uri
        req['version'] = request.version
        req['headers'] = dict(request.headers)
        body = request.body
        req['remote_ip'] = request.remote_ip
        req['protocol'] = request.protocol
        req['host'] = request.host
        req['files'] = request.files
        req['arguments'] = request.arguments
        req['args'] = args
        req['kwargs'] = kwargs

        msg_id = bytes(uuid.uuid4())
        msg_list = [b'|', msg_id, jsonapi.dumps(req)]
        if body:
            msg_list.append(body)
        logging.debug('Sending request: %r', msg_list)
        self.stream.send_multipart(msg_list)

        if timeout > 0:
            def _handle_timeout():
                handler.send_error(504) # Gateway timeout
                try:
                    self._callbacks.pop(msg_id)
                except KeyError:
                    logging.error('Unexpected error removing callbacks')
            dc = DelayedCallback(_handle_timeout, timeout, self.loop)
            dc.start()
        else:
            dc = None
        self._callbacks[msg_id] = (handler, dc)
        return msg_id

    def _handle_reply(self, msg_list):
        logging.debug('Handling reply: %r', msg_list)
        len_msg_list = len(msg_list)
        if len_msg_list < 3 or not msg_list[0] == b'|':
            logging.error('Unexpected reply in ZMQApplicationProxy._handle_reply')
            return
        msg_id = msg_list[1]
        replies = msg_list[2:]
        cb = self._callbacks.pop(msg_id, None)
        if cb is not None:
            handler, dc = cb
            if dc is not None:
                dc.stop()
            try:
                for reply in replies:
                    handler.write(reply)
                # The backend has already processed the headers and they are
                # included in the above write calls, so we manually tell the
                # handler that the headers are already written.
                handler._headers_written = True
                # We set transforms to an empty list because the backend
                # has already applied all of the transforms.
                handler._transforms = []
                handler.finish()
            except:
                logging.error('Unexpected error in ZMQApplicationProxy._handle_reply', exc_info=True)
Esempio n. 22
0
class PubSub(BasePubSub):
    """
    This class manages application PUB/SUB logic.
    """
    NAME = 'ZeroMQ'

    def __init__(self, application):
        super(PubSub, self).__init__(application)
        self.sub_stream = None
        self.pub_stream = None
        self.zmq_context = None
        self.zmq_pub_sub_proxy = None
        self.zmq_xpub = None
        self.zmq_xsub = None
        self.zmq_pub_port = None
        self.zmq_sub_address = None

    def initialize(self):

        self.zmq_context = zmq.Context()
        options = self.application.settings['options']

        self.zmq_pub_sub_proxy = options.zmq_pub_sub_proxy

        # create PUB socket to publish instance events into it
        publish_socket = self.zmq_context.socket(zmq.PUB)

        # do not try to send messages after closing
        publish_socket.setsockopt(zmq.LINGER, 0)

        if self.zmq_pub_sub_proxy:
            # application started with XPUB/XSUB proxy
            self.zmq_xsub = options.zmq_xsub
            publish_socket.connect(self.zmq_xsub)
        else:

            # application started without XPUB/XSUB proxy
            if options.zmq_pub_port_shift:
                # calculate zmq pub port number
                zmq_pub_port = options.port - options.zmq_pub_port_shift
            else:
                zmq_pub_port = options.zmq_pub_port

            self.zmq_pub_port = zmq_pub_port

            publish_socket.bind(
                "tcp://%s:%s" %
                (options.zmq_pub_listen, str(self.zmq_pub_port)))

        # wrap pub socket into ZeroMQ stream
        self.pub_stream = ZMQStream(publish_socket)

        # create SUB socket listening to all events from all app instances
        subscribe_socket = self.zmq_context.socket(zmq.SUB)

        if self.zmq_pub_sub_proxy:
            # application started with XPUB/XSUB proxy
            self.zmq_xpub = options.zmq_xpub
            subscribe_socket.connect(self.zmq_xpub)
        else:
            # application started without XPUB/XSUB proxy
            self.zmq_sub_address = options.zmq_sub_address
            for address in self.zmq_sub_address:
                subscribe_socket.connect(address)

        subscribe_socket.setsockopt_string(zmq.SUBSCRIBE,
                                           six.u(CONTROL_CHANNEL))

        subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, six.u(ADMIN_CHANNEL))

        def listen_socket():
            # wrap sub socket into ZeroMQ stream and set its on_recv callback
            self.sub_stream = ZMQStream(subscribe_socket)
            self.sub_stream.on_recv(self.dispatch_published_message)

        tornado.ioloop.IOLoop.instance().add_callback(listen_socket)

        if self.zmq_pub_sub_proxy:
            logger.info("ZeroMQ XPUB: {0}, XSUB: {1}".format(
                self.zmq_xpub, self.zmq_xsub))
        else:
            logger.info("ZeroMQ PUB - {0}; subscribed to {1}".format(
                self.zmq_pub_port, self.zmq_sub_address))

    def publish(self, channel, message, method=None):
        """
        Publish message into channel of stream.
        """
        method = method or self.DEFAULT_PUBLISH_METHOD
        message["message_type"] = method
        message = json_encode(message)
        to_publish = [utf8(channel), utf8(message)]
        self.pub_stream.send_multipart(to_publish)

    @coroutine
    def dispatch_published_message(self, multipart_message):
        """
        Got message, decide what is it and dispatch into right
        application handler.
        """
        channel = multipart_message[0]
        if six.PY3:
            channel = channel.decode()

        message_data = json_decode(multipart_message[1])

        if channel == CONTROL_CHANNEL:
            yield self.handle_control_message(message_data)
        elif channel == ADMIN_CHANNEL:
            yield self.handle_admin_message(message_data)
        else:
            yield self.handle_channel_message(channel, message_data)

    def subscribe_key(self, subscription_key):
        self.sub_stream.setsockopt_string(zmq.SUBSCRIBE,
                                          six.u(subscription_key))

    def unsubscribe_key(self, subscription_key):
        self.sub_stream.setsockopt_string(zmq.UNSUBSCRIBE,
                                          six.u(subscription_key))

    def clean(self):
        """
        Properly close ZeroMQ sockets.
        """
        if hasattr(self, 'pub_stream') and self.pub_stream:
            self.pub_stream.close()
        if hasattr(self, 'sub_stream') and self.sub_stream:
            self.sub_stream.stop_on_recv()
            self.sub_stream.close()
Esempio n. 23
0
class UniClient(object):
    """
    Implementation of "simple" ZeroMQ Paranoid Pirate communication scheme.  This class is the ROUTER, and performs the
    "request" in RPC calls.  By design, only supports one remote worker (DEALER) in order to keep example simple.
    Supports a very basic RPC interface, using MessagePack for encoding/decoding.
    """

    __metaclass__ = ABCMeta

    def __init__(self, endpoint, context=None):
        # type: (str, zmq.Context) -> None
        """
        :param endpoint: ZeroMQ endpoint to bind to.
        :param context: ZeroMQ Context.
        """
        self._q_received_messages = Queue()  # type: Queue[Any]
        self._q_sub_messages = Queue()  # type: Queue[Any]
        self._lock = Lock()

        context = context or zmq.Context.instance()
        socket = context.socket(zmq.ROUTER)
        socket.bind(endpoint)

        self._stream = ZMQStream(socket, IOLoop())
        self._stream.on_recv(self._on_message)

        self._worker_rep = None  # type: Optional[WorkerRep]
        self._connected_event = Event()
        self._hb_check_timer = PeriodicCallback(self._heartbeat, HB_INTERVAL)
        self._hb_check_timer.start()
        self._keep_running = True

    def run(self):
        # type: () -> None
        """
        Start the IOLoop, a blocking call to send/recv ZMQ messsages until the IOLoop is stopped.
        Note: The name of this function needs to stay the same so UniClientThread's run() is overridden with this function.
        """
        # Handle situation where stop() is called before run() activates
        # This blocks until stop is called
        if self._keep_running:
            self._stream.io_loop.start()

    def stop(self):
        # type: () -> None
        """
        Stop the IOLoop.  Note: this overrides the base class (Thread)'s implementation of stop, so don't rename
        this function.
        """
        with self._lock:
            self._keep_running = False
            if self._stream is not None:
                self._stream.io_loop.stop()

    def shutdown(self):
        # type: () -> None
        """
        Shutdown the uniclient, stopping all timers and unbinding from the ZMQ socket.
        """
        with self._lock:
            if self._stream is not None:
                self._stream.on_recv(None)
                self._stream.socket.setsockopt(zmq.LINGER, 0)
                self._stream.close()
                self._stream = None
                self._worker_rep = None

    def wait_for_worker(self, timeout):
        # type: (float) -> None
        """
        Wait for the client to establish a connection with the remote worker.
        Will return immediately if already connected.
        :param timeout: Max time, in seconds, to wait for the connection to establish.
        """
        event_status = self._connected_event.wait(timeout)
        if not event_status:
            raise LostRemoteError("No worker is connected.")

    def is_connected(self):
        # type: () -> bool
        """
        Returns whether client is connected to a worker.
        :return: A boolean flag to indicate whether a connection to the worker is established.
        """
        return self._worker_rep is not None

    def rpc(self, method, args=None, kwargs=None, timeout=RPC_TIMEOUT):
        # type: (str, List[Any], Optional[Dict[str,Any]], Optional[float]) -> Any
        """
        Call RPC 'method' on remote worker.
        :param method: String indicating which remote method to call.
        :param args: Arguments to provide to remote method.
        :param kwargs: Key arguments to provide to remote method.
        :param timeout: RPC call timeout, in seconds.  Use None for no timeout.
        """
        data = [
            method.encode('utf-8'),
            msgpack.packb([] if args is None else args,
                          default=XeroSerializer.encoder),
            msgpack.packb({} if kwargs is None else kwargs,
                          default=XeroSerializer.encoder)
        ]
        self._request(data)
        try:
            ret = self._q_received_messages.get(timeout=timeout)
        except Empty:
            self._unregister_worker()
            raise LostRemoteError(
                "Worker failed to reply to RPC call in time.")
        return ret

    def get_sub_message(self, timeout=None):
        # type: (float) -> Any
        return self._q_sub_messages.get(timeout=timeout)

    def get_sub_messages(self, timeout=None):
        # type: (float) -> List[Any]
        ret = []
        try:
            while True:
                ret.append(self._q_sub_messages.get(timeout=timeout))
        except Empty:
            # Out of entries.
            pass
        return ret

    def _on_reply_timeout(self):
        # type: () -> None
        """
        This gets called when a ZeroMQ message is sent and no reply comes within the specified timeout.
        """
        self.timed_out = True
        self._timeout_handle = None
        self._stream.io_loop.stop()
        self.on_timeout()

    def _request(self, msg):
        # type: (List[bytes]) -> None
        """
        Send msgpack encoded message via ZeroMQ.
        :param msg: msgpack encoded message.
        """
        # prepare full message
        with self._lock:
            if self._worker_rep is not None:
                to_send = [self._worker_rep.id]
                to_send.extend([UNI_CLIENT_HEADER, WORKER_REQUEST])
                to_send.extend(msg)

                # OK, calling this in the callback is extremely important, so be careful about modifying it.
                # All the other ZMQ message sends happen in the context of this thread, which means they work fine.
                # However, this call will typically happen in the context of whatever thread communicates with this
                # thread to issue RPC calls.  That means if you don't send the messages in the context of the callback,
                # they won't get sent immediately-they'll get sent when the IOloop starts again.  This will look like
                # really slow ZeroMQ sends.
                self._stream.io_loop.add_callback(
                    lambda x: self._stream.send_multipart(x), to_send)
            else:
                raise LostRemoteError("No worker is connected.")

    def on_log_event(self, event, message):
        # type: (str, str) -> None
        """
        Logs events, designed to be overridden if helpful.
        :param event: The uniclient event type.
        :param message: The message.
        """
        logger.debug(event + message)

    def _on_message(self, message):
        # type: (List[bytes]) -> None
        """
        Processes a received ZeroMQ message.
        :param message: List of strings in the format:
            [ ZMQ Worker ID, Message Header, StrMessagePart1, StrMessagePart2...]
        """
        return_address = message.pop(0)
        cmd = message.pop(0)

        worker_cmds = {
            WORKER_READY: self._on_worker_ready,
            WORKER_PARTIAL_REPLY: self._on_worker_partial_reply,
            WORKER_FINAL_REPLY: self._on_worker_final_reply,
            WORKER_EXCEPTION: self._on_worker_final_reply,
            WORKER_EMIT: self._on_worker_emit,
            WORKER_HEARTBEAT: self._on_worker_heartbeat,
            WORKER_DISCONNECT: self._on_worker_disconnect,
        }
        if cmd in worker_cmds:
            fnc = worker_cmds[cmd]
            fnc(return_address, message)
        else:
            logger.error(
                "Received worker message with unrecognized header: {}.".format(
                    cmd))

    def _on_worker_ready(self, return_address, message):
        # type: (bytes, List[bytes]) -> None
        """
        This gets called when a worker tells us it's ready to receive messages.  This should be the first message we receive
        from a new worker.
        :param return_address: List of return addresses/Worker IDs.
        :param message: ZeroMQ message.
        :return:
        """
        worker_id = return_address
        self._register_worker(worker_id)

    def _on_worker_partial_reply(self, return_address, message):
        # type: (bytes, List[bytes]) -> None
        """
        Process a received worker's ZMQ partial reply.  It will be forwarded to the requesting client.
        :param return_address: Worker ZMQ ID.
        :param message: The worker's reply message.
        """
        message.pop(0)
        try:
            msg = msgpack.unpackb(message[0], raw=False)
        except (msgpack.OutOfData, msgpack.ExtraData):
            msg = message[0]
        self.on_partial_message(msg)

    def _on_worker_final_reply(self, return_address, message):
        # type: (bytes, List[bytes]) -> None
        """
        Process a received worker's ZMQ final reply.  It will be forwarded to the requesting client.
        :param return_address: Worker ZMQ ID.
        :param message: The worker's reply message.
        """

        with self._lock:
            if self._worker_rep is None or return_address != self._worker_rep.id:
                logger.info("Got final reply from unknown worker, discarding")
                return

            if return_address == self._worker_rep.id:
                self._worker_rep.on_heartbeat()

        message.pop(0)
        try:
            msg = msgpack.unpackb(message[0],
                                  object_hook=XeroSerializer.decoder,
                                  raw=False)
        except (msgpack.OutOfData, msgpack.ExtraData):
            msg = message[0]
        self._q_received_messages.put_nowait(msg)

    def _on_worker_emit(self, return_address, message):
        # type: (bytes, List[bytes]) -> None

        with self._lock:
            if self._worker_rep is not None:
                ret_id = return_address
                if ret_id == self._worker_rep.id:
                    self._worker_rep.on_heartbeat()
                else:
                    logger.error(
                        "Received heartbeat message from unknown worker.")

        message.pop(0)
        try:
            msg = msgpack.unpackb(message[0],
                                  object_hook=XeroSerializer.decoder,
                                  raw=False)
        except (msgpack.OutOfData, msgpack.ExtraData):
            msg = message[0]
        self._q_sub_messages.put(msg)

    def _on_worker_heartbeat(self, return_address, message):
        # type: (bytes, List[bytes]) -> None
        """
        Process worker ZMQ heartbeat message.
        :param return_address: Worker ZMQ ID.
        :param message: Heartbeat message is only the header, so unused.
        """
        with self._lock:
            if self._worker_rep is not None:
                ret_id = return_address
                if ret_id == self._worker_rep.id:
                    self._worker_rep.on_heartbeat()
                else:
                    logger.error(
                        "Received heartbeat message from unknown worker.")

    def _on_worker_disconnect(self, return_address, message):
        # type: (bytes, List[bytes]) -> None
        """
        Process worker ZMQ disconnect message.
        :param return_address: Worker ZMQ ID.
        :param message: Disconnect message is only the header, so unused.
        """
        worker_id = return_address
        self._unregister_worker(worker_id)

    def _heartbeat(self):
        # type: () -> None
        """
        This gets called periodically.  Check for dead worker and remove.
        """
        with self._lock:
            if self._worker_rep is not None:
                self._worker_rep.curr_liveness -= 1
                if not self._worker_rep.is_alive():
                    self._worker_rep = None
                    self.on_log_event("worker.unregister",
                                      "Worker disconnected.")
                else:
                    msg = [
                        self._worker_rep.id, UNI_CLIENT_HEADER,
                        WORKER_HEARTBEAT
                    ]
                    logger.debug("Client Sending heartbeat")
                    self._stream.send_multipart(msg)

    def _register_worker(self, worker_id):
        # type: (bytes) -> None
        """
        Register a worker and associate with a service.
        :param worker_id: The ID of the worker to register.
        """
        logger.info("_register_worker")
        if self._worker_rep is None:
            self._worker_rep = WorkerRep(worker_id)
            self._connected_event.set()
            self.on_log_event(
                "worker.register",
                "Worker for '{}' is connected.".format(worker_id))
        else:
            logger.warning(
                "Received a worker registration message when another worker is already registered."
            )

    def _unregister_worker(self, worker_id=None):
        # type: (bytes) -> None
        """
        Unregister a worker.
        """
        with self._lock:
            if worker_id is None or self._worker_rep is not None and worker_id == self._worker_rep.id:
                self._worker_rep = None
                self._connected_event.clear()

        self.on_log_event("worker.unregister", "Worker disconnected.")

    def _start_reply_timeout(self, timeout):
        # type: (float) -> None
        """
        Start a timer that will fire if we don't receive a ZeroMQ reply within the specified timeout.
        :param timeout: Period of time, in seconds, when timeout should happen if no reply is received.
        """
        self._timeout_handle = self._stream.io_loop.call_later(
            timeout, self._on_reply_timeout)

    def _stop_reply_timeout(self):
        # type: () -> None
        """
        Stop/delete the reply timeout.
        """
        if self._timeout_handle:
            self._stream.io_loop.remove_timeout(self._timeout_handle)
            self._timeout_handle = None

    @abstractmethod
    def on_message(self, msg):
        # type: (Any) -> None
        """
        This gets called when a message arrives.  It should be overloaded.

        :param msg: Any serializable datatype, or a list of serializable datatypes.
        """
        raise NotImplementedError()

    @abstractmethod
    def on_partial_message(self, msg):
        # type: (Any) -> None
        """
        This gets called if/when a worker gives a partial reply to a message.  It should be overloaded.

        :param msg: Any serializable datatype, or a list of serializable datatypes.
        """
        raise NotImplementedError()

    @abstractmethod
    def on_timeout(self):
        # type: () -> None
        """
        This gets called when a reply timeout occurs.  Override it.
        """
        raise NotImplementedError()
Esempio n. 24
0
class ZmqWorker(object, LoggingMixin):
    """
    This is the ZMQ worker implementation.

    The worker will register a :class:`ZMQStream` with the configured
    :class:`zmq.Socket` and :class:`zmq.eventloop.ioloop.IOLoop` instance.

    Upon `ZMQStream.on_recv` the configured `processors` will be executed
    with the deserialized context and the result will be published through the
    configured `zmq.socket`.
    """

    def __init__(self, insocket, outsocket, mgmt, processing, log_handler,
            log_level, io_loop=None):
        """
        Initialize the `ZMQStream` with the `insocket` and `io_loop` and store
        the `outsocket`.

        `insocket` should be of the type `zmq.socket.PULL` `outsocket` should
        be of the type `zmq.socket.PUB`

        `mgmt` is an instance of `spyder.core.mgmt.ZmqMgmt` that handles
        communication between master and worker processes.
        """
        LoggingMixin.__init__(self, log_handler, log_level)

        self._insocket = insocket
        self._io_loop = io_loop or IOLoop.instance()
        self._outsocket = outsocket

        self._processing = processing
        self._mgmt = mgmt
        self._in_stream = ZMQStream(self._insocket, self._io_loop)
        self._out_stream = ZMQStream(self._outsocket, self._io_loop)

    def _quit(self, msg):
        """
        The worker is quitting, stop receiving messages.
        """
        if ZMQ_SPYDER_MGMT_WORKER_QUIT == msg.data:
            self.stop()

    def _receive(self, msg):
        """
        We have a message!

        `msg` is a serialized version of a `DataMessage`.
        """
        message = DataMessage(msg)

        try:
            # this is the real work we want to do
            curi = self._processing(message.curi)
            message.curi = curi
        except:
            # catch any uncaught exception and only log it as CRITICAL
            self._logger.critical(
                    "worker::Uncaught exception executing the worker for URL %s!" %
                    (message.curi.url,))
            self._logger.critical("worker::%s" % (traceback.format_exc(),))

        # finished, now send the result back to the master
        self._out_stream.send_multipart(message.serialize())

    def start(self):
        """
        Start the worker.
        """
        self._mgmt.add_callback(ZMQ_SPYDER_MGMT_WORKER, self._quit)
        self._in_stream.on_recv(self._receive)

    def stop(self):
        """
        Stop the worker.
        """
        # stop receiving
        self._in_stream.stop_on_recv()
        self._mgmt.remove_callback(ZMQ_SPYDER_MGMT_WORKER, self._quit)
        # but work on anything we might already have
        self._in_stream.flush()
        self._out_stream.flush()

    def close(self):
        """
        Close all open sockets.
        """
        self._in_stream.close()
        self._insocket.close()
        self._out_stream.close()
        self._outsocket.close()
Esempio n. 25
0
class Master(object):

    def __init__(self, frontier,
            data_in_sock='ipc:///tmp/robot-data-w2m.sock',
            data_out_sock='ipc:///tmp/robot-data-m2w.sock',
            msg_in_sock='ipc:///tmp/robot-msg-w2m.sock',
            msg_out_sock='ipc:///tmp/robot-msg-m2w.sock',
            io_loop=None):
        self.identity = 'master:%s:%s' % (socket.gethostname(), os.getpid())

        context = zmq.Context()

        self._io_loop = io_loop or IOLoop.instance()

        self._in_socket = context.socket(zmq.SUB)
        self._in_socket.setsockopt(zmq.SUBSCRIBE, '')
        self._in_socket.bind(data_in_sock)
        self._in_stream = ZMQStream(self._in_socket, io_loop)

        self._out_socket = context.socket(zmq.PUSH)
        self._out_socket.bind(data_out_sock)
        self._out_stream = ZMQStream(self._out_socket, io_loop)

        self._online_workers = set()
        self._running = False

        self._updater = PeriodicCallback(self._send_next, 100, io_loop=io_loop)
        self._reloader = PeriodicCallback(self.reload, 1000, io_loop=io_loop)

        self.frontier = frontier
        self.messenger = ServerMessenger(msg_in_sock, msg_out_sock,
                context, io_loop)

    def start(self):
        logging.info('[%s] starting', self.identity)
        self.messenger.add_callback(CTRL_MSG_WORKER, self._on_worker_msg)
        self.messenger.start()

        self._in_stream.on_recv(self._on_receive_processed)
        self._updater.start()
        self._reloader.start()
        self._running = True

    def stop(self):
        self._running = False
        self._reloader.stop()
        self._updater.stop()
        self.messenger.stop()
#        self.messenger.publish(CTRL_MSG_WORKER, self.identity,
#                CTRL_MSG_WORKER_QUIT)

    def close(self):
        self._in_stream.close()
        self._in_socket.close()
        self._out_stream.close()
        self._out_socket.close()
        self.messenger.close()

    def reload(self):
        pass

    def _on_worker_msg(self, msg):
        if msg.data == CTRL_MSG_WORKER_ONLINE:
            self._online_workers.add(msg.identity)
            logging.info('[%s] append [%s]', self.identity, msg.identity)
            self._send_next()

#        if msg.data == CTRL_MSG_WORKER_QUIT_ACK:
#            if msg.identity in self._online_workers:
#                self._online_workers.remove(msg.identity)

    def _send_next(self):
        if not self._running:
            return

        worker_num = len(self._online_workers)

        if self._running and worker_num > 0:
            while self._out_stream._send_queue.qsize() < worker_num * 4:
                request = self.frontier.get_next_request()
                if not request:
                    break

                msg = RequestMessage(self.identity, request)
                self._out_stream.send_multipart(msg.serialize())
                logging.debug('[%s] send request(%s)',
                        self.identity, request.url)

                self.frontier.reload_request(request)

    def _on_receive_processed(self, zmq_msg):
        msg = ResponseMessage.deserialize(zmq_msg)
        request = msg.response.request
        logging.debug('[%s] receive response(%s)', self.identity, request.url)
        self._send_next()
Esempio n. 26
0
class keyserver(object):
    config_file = None
    mainloop = None
    sqlite_connection = None
    sqlite_cursor = None
    db_file =  None

    def __init__(self, config_file, mainloop):
        self.config_file = config_file
        self.mainloop = mainloop
        self.reload()

    def hook_signals(self):
        """Hooks POSIX signals to correct callbacks, call only from the main thread!"""
        import signal as posixsignal
        posixsignal.signal(posixsignal.SIGTERM, self.quit)
        posixsignal.signal(posixsignal.SIGQUIT, self.quit)
        posixsignal.signal(posixsignal.SIGHUP, self.reload)

    def reload(self, *args):
        if self.sqlite_connection:
            self.sqlite_connection.close()
        with open(self.config_file) as f:
            self.config = yaml.load(f)
        if os.path.exists(self.config['keydb']):
            self.db_file = os.path.realpath(self.config['keydb'])
        else:
            self.db_file = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(self.config_file)), self.config['keydb']))
        if not os.path.exists(self.db_file):
            raise RuntimeError("DB file %s does not exist!" % self.db_file)
        self.sqlite_connection = sqlite3.connect(self.db_file, detect_types=sqlite3.PARSE_DECLTYPES)
        self.sqlite_cursor = self.sqlite_connection.cursor()
        
        self.zmq_context = zmq.Context()
        self.zmq_socket = self.zmq_context.socket(zmq.REP)
        self.zmq_socket.bind(self.config['rep_socket'])
        self.zmq_stream = ZMQStream(self.zmq_socket)
        self.zmq_stream.on_recv(self._on_recv)

        print("Config (re-)loaded")

    def _on_recv(self, data, *args, **kwargs):
        #print("_on_recv: data=%s" % repr(data))
        if len(data) != 1:
            self.zmq_stream.send_multipart(["INV", "0x0"]) # Invalid request
            return

        uid = data[0].lower()

        self.sqlite_cursor.execute("SELECT rowid FROM revoked WHERE uid=?;", (uid, ) )
        revokeddata = self.sqlite_cursor.fetchone()
        if revokeddata:
            # PONDER: Do we log this here or let the gatekeeper program to publish the final result ?
            self.zmq_stream.send_multipart(["REV", "0x0"]) # Revoked card
            return

        self.sqlite_cursor.execute("SELECT acl FROM keys WHERE uid=?;", (uid, ) )
        acldata = self.sqlite_cursor.fetchone()
        if not acldata:
            # PONDER: Do we log this here or let the gatekeeper program to publish the final result ?
            self.zmq_stream.send_multipart(["NF", "0x0"]) # Not found
            return

        # PONDER: About logging: note that we do not yet know if this ACL is accepted at the gatekeeper end
        self.zmq_stream.send_multipart(["OK", "0x%x" % acldata[0]])
        

    def quit(self, *args):
        # This will close the sockets too
        self.zmq_context.destroy()
        self.mainloop.stop()

    def run(self):
        print("Starting mainloop")
        self.mainloop.start()
Esempio n. 27
0
class TornadoRPCClient(RPCClientBase):  #{
    """An asynchronous service proxy (based on Tornado IOLoop)"""

    def __init__(self, context=None, ioloop=None, **kwargs):  #{
        """
        Parameters
        ==========
        ioloop : IOLoop
            An existing IOLoop instance, if not passed, zmq.IOLoop.instance()
            will be used.
        context : Context
            An existing Context instance, if not passed, zmq.Context.instance()
            will be used.
        serializer : Serializer
            An instance of a Serializer subclass that will be used to serialize
            and deserialize args, kwargs and the result.
        """
        Context, _ = get_zmq_classes()

        if context is None:
            self.context = Context.instance()
        else:
            assert isinstance(context, Context)
            self.context = context

        self.ioloop   = IOLoop.instance() if ioloop is None else ioloop
        self._futures = {}  # {<req_id> : <Future>}

        super(TornadoRPCClient, self).__init__(**kwargs)
    #}
    def _create_socket(self):  #{
        super(TornadoRPCClient, self)._create_socket()
        self.socket = ZMQStream(self.socket, self.ioloop)
        self.socket.on_recv(self._handle_reply)
    #}
    def _handle_reply(self, msg_list):  #{
        self.logger.debug('received: %r' % msg_list)
        reply = self._parse_reply(msg_list)

        if reply is None:
            return

        req_id   = reply['req_id']
        msg_type = reply['type']
        result   = reply['result']

        if msg_type == b'ACK':
            return

        future_tout = self._futures.pop(req_id, None)

        if future_tout is None:
            return

        future, tout_cb = future_tout

        # stop the timeout if there is one
        if tout_cb is not None:
            tout_cb.stop()

        if msg_type == b'OK':
            future.set_result(result)
        else:
            future.set_exception(result)
    #}

    #-------------------------------------------------------------------------
    # Public API
    #-------------------------------------------------------------------------

    def __getattr__(self, name):  #{
        return AsyncRemoteMethod(self, name)
    #}
    def call(self, proc_name, args=[], kwargs={}, ignore=False, timeout=None):  #{
        """
        Call the remote method with *args and **kwargs.

        Parameters
        ----------
        proc_name : <str>   name of the remote procedure to call
        args      : <tuple> positional arguments of the procedure
        kwargs    : <dict>  keyword arguments of the procedure
        ignore    : <bool>  whether to ignore result or wait for it
        timeout   : <float> | None
            Number of seconds to wait for a reply.
            RPCTimeoutError is set as the future result in case of timeout.
            Set to None, 0 or a negative number to disable.

        Returns None or a <Future> representing a remote call result
        """
        if not (timeout is None or isinstance(timeout, (int, float))):
            raise TypeError("timeout param: <float> or None expected, got %r" % timeout)

        req_id, msg_list = self._build_request(proc_name, args, kwargs, ignore)
        self.socket.send_multipart(msg_list)

        if ignore:
            return None

        # The following logic assumes that the reply won't come back too
        # quickly, otherwise the callbacks won't be in place in time. It should
        # be fine as this code should run very fast. This approach improves
        # latency we send the request ASAP.
        def _abort_request():
            future_tout = self._futures.pop(req_id, None)
            if future_tout:
                future, _ = future_tout
                tout_msg  = "Request %s timed out after %s sec" % (req_id, timeout)
                self.logger.debug(tout_msg)
                future.set_exception(RPCTimeoutError(tout_msg))

        timeout = timeout or 0

        if timeout > 0:
            tout_cb = DelayedCallback(_abort_request, int(timeout*1000), self.ioloop)
            tout_cb.start()
        else:
            tout_cb = None

        future = Future()
        self._futures[req_id] = (future, tout_cb)

        return future
Esempio n. 28
0
class TestMNWorker(TestCase):

    endpoint = b'tcp://127.0.0.1:5555'
    service = b'test'

    def setUp(self):
        if _do_print:
            print('Setting up...')
        sys.stdout.flush()
        self.context = zmq.Context()
        self.broker = None
        self._msgs = []
        return

    def tearDown(self):
        if _do_print:
            print('Tearing down...')
        sys.stdout.flush()
        if self.broker:
            self._stop_broker()
        self.broker = None
        self.context = None
        return

    def _on_msg(self, msg):
        if _do_print:
            print('Broker received:', msg)
        self.target = msg.pop(0)
        marker_frame = msg.pop(0)
        if msg[1] == b'\x01':  # ready
            if _do_print:
                print('READY received')
            return
        if msg[1] == b'\x04':  # ready
            if _do_print:
                print('HB received')
            return
        if msg[1] == b'\x03':  # reply
            IOLoop.instance().stop()
            return
        return

    def _start_broker(self, do_reply=False):
        """Helper activating a fake broker in the ioloop.
        """
        socket = self.context.socket(zmq.ROUTER)
        self.broker = ZMQStream(socket)
        self.broker.socket.setsockopt(zmq.LINGER, 0)
        self.broker.bind(self.endpoint)
        self.broker.on_recv(self._on_msg)
        self.broker.do_reply = do_reply
        self.broker.ticker = PeriodicCallback(self._tick,
                                              WorkerRunner.HB_INTERVAL)
        self.broker.ticker.start()
        self.target = None
        if _do_print:
            print("Broker started")
        return

    def _stop_broker(self):
        if self.broker:
            self.broker.ticker.stop()
            self.broker.ticker = None
            self.broker.socket.close()
            self.broker.close()
            self.broker = None
        if _do_print:
            print("Broker stopped")
        return

    def _tick(self):
        if self.broker and self.target:
            msg = [self.target, b'', b'MNPW01', b'\x04']
            self.broker.send_multipart(msg)
            if _do_print:
                print("Tick sent:", msg)
        return

    def send_req(self):
        data = [b'AA', b'bb']
        msg = [self.target, b'', b'MNPW01', b'\x02', self.target, b''] + data
        self.broker.send_multipart(msg)
        if _do_print:
            print('broker sent:', msg)
        return

    @staticmethod
    def stop_loop():
        IOLoop.instance().stop()
        return

    # Tests follow

    def test_simple_worker(self):
        """Test MNWorker simple req/reply.
        """
        self._start_broker()
        time.sleep(0.2)
        worker = WorkerRunner(self.context, self.endpoint, self.service)
        sender = DelayedCallback(self.send_req, 500)
        stopper = DelayedCallback(self.stop_loop, 2500)
        sender.start()
        stopper.start()
        IOLoop.instance().start()
        worker.shutdown()
        self._stop_broker()
        return
Esempio n. 29
0
class MqAsyncReq(object):

    """Class for the MDP client side.

    Thin asynchronous encapsulation of a zmq.REQ socket.
    Provides a :func:`request` method with optional timeout.

    Objects of this class are ment to be integrated into the
    asynchronous IOLoop of pyzmq.

    :param context:  the ZeroMQ context to create the socket in.
    :type context:   zmq.Context
    :param endpoint: the enpoint to connect to.
    :type endpoint:  str
    :param service:  the service the client should use
    :type service:   str
    """

    _proto_version = b'MDPC01'

    def __init__(self, context, service):
        """Initialize the MDPClient.
        """
        if ("domogik.common.configloader" in sys.modules):
            cfg = Loader('mq').load()
            confi = dict(cfg[1])
            self.endpoint = "tcp://{0}:{1}".format(config['ip'], config['req_rep_port'])
        else:
            ip = Parameter.objects.get(key='mq-ip')
            port = Parameter.objects.get(key='mq-req_rep_port')
            self.endpoint = "tcp://{0}:{1}".format(ip.value, port.value)
        socket = ZmqSocket(context, zmq.REQ)
        ioloop = IOLoop.instance()
        self.service = service
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.can_send = True
        self._proto_prefix = [ PROTO_VERSION, service]
        self._tmo = None
        self.timed_out = False
        socket.connect(self.endpoint)
        return

    def shutdown(self):
        """Method to deactivate the client connection completely.

        Will delete the stream and the underlying socket.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        if not self.stream:
            return
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        return

    def request(self, msg, timeout=None):
        """Send the given message.

        :param msg:     message parts to send.
        :type msg:      list of str
        :param timeout: time to wait in milliseconds.
        :type timeout:  int
        
        :rtype None:
        """
        if not self.can_send:
            raise InvalidStateError()
        if type(msg) in (bytes, str):
            msg = [msg]
        # prepare full message
        to_send = self._proto_prefix[:]
        to_send.extend(msg)
        self.stream.send_multipart(to_send)
        self.can_send = False
        if timeout:
            self._start_timeout(timeout)
        return

    def _on_timeout(self):
        """Helper called after timeout.
        """
        self.timed_out = True
        self._tmo = None
        self.on_timeout()
        return

    def _start_timeout(self, timeout):
        """Helper for starting the timeout.

        :param timeout:  the time to wait in milliseconds.
        :type timeout:   int
        """
        self._tmo = DelayedCallback(self._on_timeout, timeout)
        self._tmo.start()
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        :param msg:   list of message parts.
        :type msg:    list of str
        """
        if self._tmo:
            # disable timout
            self._tmo.stop()
            self._tmo = None
        # setting state before invoking on_message, so we can request from there
        self.can_send = True
        self.on_message(msg)
        return

    def on_message(self, msg):
        """Public method called when a message arrived.

        .. note:: Does nothing. Should be overloaded!
        """
        pass

    def on_timeout(self):
        """Public method called when a timeout occured.

        .. note:: Does nothing. Should be overloaded!
        """
        pass
class Test_MDPWorker(unittest.TestCase):

    endpoint = b'tcp://127.0.0.1:7777'
    service = b'test'

    def setUp(self):
        print 'set up'
        sys.stdout.flush()
        self.context = zmq.Context()
        self.broker = None
        self._msgs = []
        return

    def tearDown(self):
        print 'tear down'
        sys.stdout.flush()
        if self.broker:
            self._stop_broker()
        self.broker = None
        ##         self.context.term()
        self.context = None
        return

    def _on_msg(self, msg):
        if _do_print:
            print 'broker received:',
            pprint(msg)
        self.target = msg.pop(0)
        if msg[1] == chr(1):  # ready
            print 'READY'
            self.target = msg[0]
            return
        if msg[1] == chr(4):  # ready
            print 'HB'
            return
        if msg[1] == chr(3):  # reply
            IOLoop.instance().stop()
            return
        return

    def _start_broker(self, do_reply=False):
        """Helper activating a fake broker in the ioloop.
        """
        socket = self.context.socket(zmq.XREP)
        self.broker = ZMQStream(socket)
        self.broker.socket.setsockopt(zmq.LINGER, 0)
        self.broker.bind(self.endpoint)
        self.broker.on_recv(self._on_msg)
        self.broker.do_reply = do_reply
        self.broker.ticker = PeriodicCallback(self._tick, MyWorker.HB_INTERVAL)
        self.broker.ticker.start()
        self.target = None
        return

    def _stop_broker(self):
        if self.broker:
            self.broker.ticker.stop()
            self.broker.ticker = None
            self.broker.socket.close()
            self.broker.close()
            self.broker = None
        return

    def _tick(self):
        if self.broker and self.target:
            msg = [self.target, b'MPDW01', chr(4)]
            self.broker.send_multipart(msg)
        return

    def send_req(self):
        data = ['AA', 'bb']
        msg = [self.target, b'MPDW01', chr(2), self.target, b''] + data
        print 'borker sending:',
        pprint(msg)
        self.broker.send_multipart(msg)
        return

    # tests follow

    def test_01_simple_01(self):
        """Test MDPWorker simple req/reply.
        """
        self._start_broker()
        time.sleep(0.2)
        worker = MyWorker(self.context, self.endpoint, self.service)
        sender = DelayedCallback(self.send_req, 1000)
        sender.start()
        IOLoop.instance().start()
        worker.shutdown()
        self._stop_broker()
        return
Esempio n. 31
0
class LRUQueue(object):
    """LRUQueue class using ZMQStream/IOLoop for event dispatching"""

    def __init__(self, backend_socket, frontend_socket):
        self.available_workers = 0
        self.workers = []
        self.client_nbr = NBR_CLIENTS

        self.backend = ZMQStream(backend_socket)
        self.frontend = ZMQStream(frontend_socket)
        self.backend.on_recv(self.handle_backend)

        self.loop = IOLoop.instance()

    def handle_backend(self, msg):
        # Queue worker address for LRU routing
        worker_addr, empty, client_addr = msg[:3]

        assert self.available_workers < NBR_WORKERS

        # add worker back to the list of workers
        self.available_workers += 1
        self.workers.append(worker_addr)

        #   Second frame is empty
        assert empty == b""

        # Third frame is READY or else a client reply address
        # If client reply, send rest back to frontend
        if client_addr != b"READY":
            empty, reply = msg[3:]

            # Following frame is empty
            assert empty == b""

            self.frontend.send_multipart([client_addr, b'', reply])

            self.client_nbr -= 1

            if self.client_nbr == 0:
                # Exit after N messages
                self.loop.add_timeout(time.time()+1, self.loop.stop)

        if self.available_workers == 1:
            # on first recv, start accepting frontend messages
            self.frontend.on_recv(self.handle_frontend)

    def handle_frontend(self, msg):
        # Now get next client request, route to LRU worker
        # Client request is [address][empty][request]
        client_addr, empty, request = msg

        assert empty == b""

        #  Dequeue and drop the next worker address
        self.available_workers -= 1
        worker_id = self.workers.pop()

        self.backend.send_multipart([worker_id, b'', client_addr, b'', request])
        if self.available_workers == 0:
            # stop receiving until workers become available again
            self.frontend.stop_on_recv()
Esempio n. 32
0
class MQRep(object):

    """Class for the MDP worker side.

    Thin encapsulation of a zmq.DEALER socket.
    Provides a send method with optional timeout parameter.

    Will use a timeout to indicate a broker failure.
    """

    _proto_version = b'MDPW01'

    # TODO: integrate that into API
    HB_INTERVAL = 1000  # in milliseconds
    HB_LIVENESS = 3    # HBs to miss before connection counts as dead

    def __init__(self, context, service):
        """Initialize the MDPWorker.

        context is the zmq context to create the socket from.
        service is a byte-string with the service name.
        """
        if DEBUG:
            print("MQRep > __init__")
        cfg = Loader('mq').load()
        config = dict(cfg[1])
        if config['ip'].strip() == "*":
            config['ip'] = get_ip()
        self.endpoint = "tcp://{0}:{1}".format(config['ip'], config['req_rep_port'])
        self.context = context
        self.service = service
        self.stream = None
        self._tmo = None
        self.need_handshake = True
        self.ticker = None
        self._delayed_cb = None
        self._create_stream()

        ### patch fritz
        self._reconnect_in_progress = False
        ### end patch fritz
        return

    def _create_stream(self):
        """Helper to create the socket and the stream.
        """
        if DEBUG:
            print("MQRep > _create_stream")
        socket = ZmqSocket(self.context, zmq.DEALER)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_mpd_message)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.connect(self.endpoint)
        if self.ticker != None:
            if DEBUG:
                print("MQRep > _create_stream - stop ticker")
            self.ticker.stop()
        self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL)
        self._send_ready()
        self.ticker.start()
        return

    def _send_ready(self):
        """Helper method to prepare and send the workers READY message.
        """
        if DEBUG:
            print("MQREP > _send_ready")
        ready_msg = [ b'', self._proto_version, b'\x01', self.service ]
        self.stream.send_multipart(ready_msg)
        self.curr_liveness = self.HB_LIVENESS
        if DEBUG:
            print("MQREP > _send_ready > curr_liveness <= {0}".format(self.HB_LIVENESS))
        return

    def _tick(self):
        """Method called every HB_INTERVAL milliseconds.
        """
        if DEBUG:
            print("MQREP > _tick")
        self.curr_liveness -= 1
        if DEBUG:
            print('MQREP > _tick - {0} tick = {1}'.format(time.time(), self.curr_liveness))
        self.send_hb()
        if self.curr_liveness >= 0:
            return
        if DEBUG:
            print('MQREP > _tick - {0} lost connection'.format(time.time()))
        # ouch, connection seems to be dead
        self.shutdown()
        # try to recreate it
        self._delayed_cb = DelayedCallback(self._create_stream, self.HB_INTERVAL)
        self._delayed_cb.start()
        return

    def send_hb(self):
        """Construct and send HB message to broker.
        """
        msg = [ b'', self._proto_version, b'\x04' ]
        self.stream.send_multipart(msg)
        return

    def shutdown(self):
        """Method to deactivate the worker connection completely.

        Will delete the stream and the underlying socket.
        """
        if self.ticker:
            self.ticker.stop()
            self.ticker = None
        if not self.stream:
            return
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        return

    def reply(self, msg):
        """Send the given message.

        msg can either be a byte-string or a list of byte-strings.
        """
##         if self.need_handshake:
##             raise ConnectionNotReadyError()
        # prepare full message
        to_send = self.envelope
        self.envelope = None
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self.stream.send_multipart(to_send)
        return

    def _on_mpd_message(self, msg):
        """Helper method called on message receive.

        msg is a list w/ the message parts
        """
        if DEBUG:
            print("MQRep > _on_mpd_message : {0} - {1}".format(time.strftime("%H:%M:%S"), msg))
        # 1st part is empty
        msg.pop(0)
        # 2nd part is protocol version
        # TODO: version check
        proto = msg.pop(0)
        # 3rd part is message type
        msg_type = msg.pop(0)
        # XXX: hardcoded message types!
        # any message resets the liveness counter
        self.need_handshake = False
        self.curr_liveness = self.HB_LIVENESS
        if DEBUG:
            print("MQREP > _on_mpd_message > curr_liveness <= {0}".format(self.HB_LIVENESS))
        if msg_type == b'\x05': # disconnect
            if DEBUG:
                print("MQREP > _on_mpd_message > type x05 : disconnect")
            self.curr_liveness = 0 # reconnect will be triggered by hb timer
        elif msg_type == b'\x02': # request
            if DEBUG:
                print("MQREP > _on_mpd_message > type x02 : request")
            # remaining parts are the user message
            envelope, msg = split_address(msg)
            envelope.append(b'')
            envelope = [ b'', self._proto_version, b'\x03'] + envelope # REPLY
            self.envelope = envelope
            mes = MQMessage()
            mes.set(msg)
            #print("MQRep > before self.on_mdp_request")
            #print(self.on_mdp_request)
            #print(mes)
            try:
                self.on_mdp_request(mes)
            except:
                print("ERROR {0}".format(traceback.format_exc()))
        else:
            if DEBUG:
                print("MQREP > _on_mpd_message > type ??? : invalid or hbeat")
            # invalid message
            # ignored
            # if \x04, this is a hbeat message
            pass
        return

    def on_mdp_request(self, msg):
        """Public method called when a request arrived.

        Must be overloaded!
        """
        pass
Esempio n. 33
0
class HostController:
    def __init__(self):
        #procControllerAddr = '165.227.24.226'  # I am client to HostController
        #procControllerPort = '5557'
        hostControllerPort = '5556' # I server to device

        print("Host Controller Starting\n")

        self.context = zmq.Context()   # get context
        self.loop = IOLoop.instance()

#       self.clientSetup = ClientSetup(context)  # instantiate the ClientSetup object
        self.serverSetup = ServerSetup(self.context) # instantiate the ServerSetup object


        # set up separate server and client sockets
        self.serverSocket = self.serverSetup.createServerSocket() # get a server socket
        self.serverSetup.serverBind(hostControllerPort, self.serverSocket) # bind to an address

#       self.clientSocket = self.clientSetup.createClientSocket() # get a client socket

# NOTE: setIdentity() MUST BE CALLED BEFORE clientConnect or the identity will
# not take effect
#       self.clientSetup.setIdentity(MasterId().getDevId()) # get the device id
#       self.clientSetup.clientConnect(hostControllerAddr, hostControllerPort, self.clientSocket) # connect to server using clientSocket
        self.serverSocket = ZMQStream(self.serverSocket)
        self.serverSocket.on_recv(self.onServerRecv)
        self.messages = Messages() # instantiate a Messages object

        self.inDict = {}
        self.outDict = {}

    def onServerRecv(self,msg):
        print("msg=", msg)
        print("length of msg=", len(msg))
        ident = msg[0]
        cmdFrmClient = msg[1]
        data = msg[2]
        # is it a message from a client?
        if (len(msg) == 3):
            print("Message received from device controller: ident=", ident,"cmd=", cmdFrmClient, "data=", data)
            self.inDict = self.messages.bufferToDict(data) # create a list from the message
            print("Internal list, devType={}, cmd={}, data={}, returnList={}\n"
                    .format(self.inDict['devType'], self.inDict['cmd'], self.inDict['data'], self.inDict['returnList']))

            # For testing purposes, now send the message back down the line
            self.inDict['data'] += ' host controller got it'
            self.outIdent = self.messages.popLastReturnId(self.inDict).encode() # get the device controller id
            print("Ident poped from returnId", self.outIdent)
            dataToClient = self.messages.dictToBuffer(self.inDict).encode() # create a buffer
            print("Data to Device Controller=", dataToClient)
            cmdToClient = self.inDict['cmd'].encode()
            print("Cmd to Device Controller=", cmdToClient)
            print("Sending to outIdent =", self.outIdent)
            self.serverSocket.send_multipart([self.outIdent, cmdToClient, dataToClient])

        else:
            print("Message received from host proc: cmd=", cmdFrmClient, "data=", data)


    def start(self):
    #        self.periodic.start()
        try:
            self.loop.start()

        except KeyboardInterrupt:
            pass
Esempio n. 34
0
class UniWorker(object):
    """
    Implementation of "simple" ZeroMQ Paranoid Pirate communication scheme.  This class is the DEALER, and performs the
    "reply" in RPC calls.  By design, only supports one remote client (ROUTER) in order to keep example simple.
    Supports a very basic RPC interface, using MessagePack for encoding/decoding.
    """

    __metaclass__ = ABCMeta

    def __init__(self, endpoint, context=None):
        # type: (str, zmq.Context) -> None
        """
        Initialize the worker.
        :param endpoint: ZeroMQ endpoint to connect to.
        :param context: ZeroMQ Context
        """
        self._context = context or zmq.Context.instance()
        self._endpoint = endpoint
        self._stream = None  # type: Optional[ZMQStream]
        self._tmo = None
        self._need_handshake = True
        self._ticker = None  # type: Optional[PeriodicCallback]
        self._delayed_cb = None
        self._connected_event = Event()
        self._lock = Lock()

        self._create_stream()

        self._curr_liveness = HB_LIVENESS
        self._keep_running = True

    def _create_stream(self):
        # type: () -> None
        """
        Helper function to create the ZMQ stream, configure callbacks.
        """
        self.on_log_event("uniworker.connect", "Trying to connect to client")
        socket = self._context.socket(zmq.DEALER)

        self._stream = ZMQStream(socket, IOLoop())
        self._stream.on_recv(self._on_message)
        self._stream.socket.setsockopt(zmq.LINGER, 0)
        self._stream.connect(self._endpoint)

        self._ticker = PeriodicCallback(self._tick, HB_INTERVAL)
        self._send_ready()
        self._ticker.start()

    def run(self):
        # type: () -> None
        """
        Start the IOLoop, a blocking call to send/recv ZMQ messsages until the IOLoop is stopped.
        Note: The name of this function needs to stay the same so UniWorkerThread's run() is overridden with this function.
        """
        if self._keep_running:
            self._stream.io_loop.start()

    def stop(self):
        # type: () -> None
        """
        Stop the IOLoop.
        """
        with self._lock:
            self._keep_running = False
            if self._stream is not None:
                self._stream.io_loop.stop()
            else:
                logger.warning("Can't stop worker-has shutdown() been called?")

    def shutdown(self):
        # type: () -> None
        """
        Close the stream/socket.  This should be called with the final flag when closing the connection for the last time.
        """

        with self._lock:
            if self._ticker:
                self._ticker.stop()
                self._ticker = None
            if not self._stream:
                return

            self._stream.on_recv(None)
            self._send_disconnect()
            self._stream.close()
            self._stream = None
            self._need_handshake = True

    def wait_for_client(self, timeout):
        # type: (float) -> None
        """
        Wait for the worker to establish a connection with the remote client.
        Will return immediately if already connected.
        Typically, the worker provides a service/responds to requests, so this is really only used for unit testing.
        :param timeout: Max time, in seconds, to wait for the connection to establish.
        """
        event_status = self._connected_event.wait(timeout)
        if not event_status:
            raise LostRemoteError("No worker is connected.")

    def is_connected(self):
        # type: () -> bool
        """
        Returns whether worker is connected to a client.
        :return: A boolean flag to indicate whether a connection to a client is established.
        """
        return not self._need_handshake

    def send_reply(self, msg, partial=False, exception=False):
        # type: (Any, bool, bool) -> None
        """
        Send a ZeroMQ message in reply to a client request.
        This should only be called out of the overridden do_work method.

        :param msg: The message to be sent out.
        :param partial: Flag indicating whether the response is a partial or final ZMQ message.
        """

        msg = msgpack.Packer(default=XeroSerializer.encoder).pack(msg)
        if exception:
            to_send = [WORKER_EXCEPTION]
        elif partial:
            to_send = [WORKER_PARTIAL_REPLY]
        else:
            to_send = [WORKER_FINAL_REPLY]
        to_send.append(b'')
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)

        self._stream.send_multipart(to_send, track=True, copy=False)

    def emit(self, msg):
        # type: (Any) -> None
        if not self.is_connected():
            raise LostRemoteError("No client is connected.")
        msg = msgpack.Packer(default=XeroSerializer.encoder).pack(msg)
        to_send = [WORKER_EMIT]
        to_send.append(b'')
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self._stream.io_loop.add_callback(
            lambda x: self._stream.send_multipart(x, track=True, copy=False),
            to_send)

    def _tick(self):
        # type: () -> None
        """
        Periodic callback to check connectivity to client.
        """
        if self._curr_liveness >= 0:
            self._curr_liveness -= 1

        if self._curr_liveness > 0:
            self._send_heartbeat()
        elif self._curr_liveness == 0:
            # Connection died, close on our side.
            self.on_log_event(
                "uniworker.tick",
                "Connection to uniclient timed out, disconnecting")
            self._connected_event.clear()
        else:
            self._send_ready()

    def _send_heartbeat(self):
        # type: () -> None
        """
        Send a heartbeat message to the client.
        """
        # Heartbeats should go out immediately, if a lot of messages to be emitted are queued up heartbeats should
        # still be sent out regularly.  Therefore, send it out via the stream's socket, rather than the stream itself
        # See https://pyzmq.readthedocs.io/en/latest/eventloop.html#send
        self._stream.send_multipart([WORKER_HEARTBEAT])

    def _send_disconnect(self):
        # type: () -> None
        """
        Send a disconnect message to the client.
        """
        # Send out via the socket, this message takes priority.
        self._stream.send_multipart([WORKER_DISCONNECT])

    def _send_ready(self):
        # type: () -> None
        """
        Send a ready message to the client.
        """
        self.on_log_event("uniworker.ready", "Sending ready to client.")
        self._stream.send_multipart([WORKER_READY])

    def _on_message(self, msg):
        # type: (List[bytes]) -> None
        """
        Processes a received ZeroMQ message.
        :param msg: List of strings in the format:
            [ ZMQ Client ID, Header, StrMessagePart1, StrMessagePart2...]
        """

        # 2nd part is protocol version
        protocol_version = msg.pop(0)
        if protocol_version != UNI_CLIENT_HEADER:  # version check, ignore old versions
            logger.error(
                "Message doesn't start with {}".format(UNI_CLIENT_HEADER))
            return
        # 3rd part is message type
        msg_type = msg.pop(0)
        # any message resets the liveness counter
        self._need_handshake = False
        self._connected_event.set()
        self._curr_liveness = HB_LIVENESS
        if msg_type == WORKER_DISCONNECT:  # disconnect
            self._curr_liveness = 0  # reconnect will be triggered by hb timer
        elif msg_type == WORKER_REQUEST:  # request
            # remaining parts are the user message
            self._on_request(msg)
        elif msg_type == WORKER_HEARTBEAT:
            # received hardbeat - timer handled above
            pass
        else:
            logger.error("Uniworker received unrecognized message")

    def _on_request(self, message):
        # type: (List[bytes]) -> None
        """
        This gets called on incoming RPC messages, will break up the encoded message into something do_work() can process
        :param message: 
        """
        name = str(message[0], 'utf-8')
        args = msgpack.unpackb(message[1],
                               object_hook=XeroSerializer.decoder,
                               raw=False)
        kwargs = msgpack.unpackb(message[2],
                                 object_hook=XeroSerializer.decoder,
                                 raw=False)
        self.do_work(name, args, kwargs)

    def on_log_event(self, event, message):
        # type: (str, str) -> None
        """
        Called on internal loggable events.  Designed for override.
        :param event: The event type.
        :param message: Loggable message.
        """
        logger.debug("{}: {}".format(event, message))

    @abstractmethod
    def do_work(self, name, args, kwargs):
        # type: (str, List[Any], Dict[Any,Any]) -> None
        """
        Override this method for worker-specific message handling.
        :param name: The 'name' of the function/rpc call.
        :param args: Function call arguments.
        :param kwargs: Function call key arguments.
        """
        raise NotImplementedError()
Esempio n. 35
0
class MDPWorker(object):

    """Class for the MDP worker side.

    Thin encapsulation of a zmq.XREQ socket.
    Provides a send method with optional timeout parameter.

    Will use a timeout to indicate a broker failure.
    """

    _proto_version = b'MDPW01'

    # TODO: integrate that into API
    HB_INTERVAL = 1000  # in milliseconds
    HB_LIVENESS = 3    # HBs to miss before connection counts as dead

    def __init__(self, context, endpoint, service):
        """Initialize the MDPWorker.

        context is the zmq context to create the socket from.
        service is a byte-string with the service name.
        """
        self.context = context
        self.endpoint = endpoint
        self.service = service
        self.stream = None
        self._tmo = None
        self.need_handshake = True
        self.ticker = None
        self._delayed_cb = None
        self._create_stream()
        return

    def _create_stream(self):
        """Helper to create the socket and the stream.
        """
        socket = self.context.socket(zmq.XREQ)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.connect(self.endpoint)
        self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL)
        self._send_ready()
        self.ticker.start()
        return

    def _send_ready(self):
        """Helper method to prepare and send the workers READY message.
        """
        ready_msg = [ b'', self._proto_version, chr(1), self.service ]
        self.stream.send_multipart(ready_msg)
        self.curr_liveness = self.HB_LIVENESS
        return

    def _tick(self):
        """Method called every HB_INTERVAL milliseconds.
        """
        self.curr_liveness -= 1
##         print '%.3f tick - %d' % (time.time(), self.curr_liveness)
        self.send_hb()
        if self.curr_liveness >= 0:
            return
        print '%.3f lost connection' % time.time()
        # ouch, connection seems to be dead
        self.shutdown()
        # try to recreate it
        self._delayed_cb = DelayedCallback(self._create_stream, 5000)
        self._delayed_cb.start()
        return

    def send_hb(self):
        """Construct and send HB message to broker.
        """
        msg = [ b'', self._proto_version, chr(4) ]
        self.stream.send_multipart(msg)
        return

    def shutdown(self):
        """Method to deactivate the worker connection completely.

        Will delete the stream and the underlying socket.
        """
        if self.ticker:
            self.ticker.stop()
            self.ticker = None
        if not self.stream:
            return
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        return

    def reply(self, msg):
        """Send the given message.

        msg can either be a byte-string or a list of byte-strings.
        """
##         if self.need_handshake:
##             raise ConnectionNotReadyError()
        # prepare full message
        to_send = self.envelope
        self.envelope = None
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self.stream.send_multipart(to_send)
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        msg is a list w/ the message parts
        """
        # 1st part is empty
        msg.pop(0)
        # 2nd part is protocol version
        # TODO: version check
        proto = msg.pop(0)
        # 3nd part is message type
        msg_type = msg.pop(0)
        # XXX: hardcoded message types!
        # any message resets the liveness counter
        self.need_handshake = False
        self.curr_liveness = self.HB_LIVENESS
        if msg_type == '\x05': # disconnect
            print '    DISC'
            self.curr_liveness = 0 # reconnect will be triggered by hb timer
        elif msg_type == '\x02': # request
            # remaining parts are the user message
            envelope, msg = split_address(msg)
            envelope.append(b'')
            envelope = [ b'', self._proto_version, '\x03'] + envelope # REPLY
            self.envelope = envelope
            self.on_request(msg)
        else:
            # invalid message
            # ignored
            pass
        return

    def on_request(self, msg):
        """Public method called when a request arrived.

        Must be overloaded!
        """
        pass
Esempio n. 36
0
class RPCService(RPCBase):
    """An RPC service that takes requests over a ROUTER socket."""

    def _create_socket(self):
        self.socket = self.context.socket(zmq.ROUTER)
        self.stream = ZMQStream(self.socket, self.loop)
        self.stream.on_recv(self._handle_request)

    def _build_reply(self, status, data):
        """Build a reply message for status and data.

        Parameters
        ----------
        status : bytes
            Either b'SUCCESS' or b'FAILURE'.
        data : list of bytes
            A list of data frame to be appended to the message.
        """
        reply = []
        reply.extend(self.idents)
        reply.extend([b'|', self.msg_id, status])
        reply.extend(data)
        return reply

    def _handle_request(self, msg_list):
        """Handle an incoming request.

        The request is received as a multipart message:

        [<idents>, b'|', msg_id, method, <sequence of serialized args/kwargs>]

        The reply depends on if the call was successful or not:

        [<idents>, b'|', msg_id, 'SUCCESS', <sequece of serialized result>]
        [<idents>, b'|', msg_id, 'FAILURE', <JSON dict of ename, evalue, traceback>]

        Here the (ename, evalue, traceback) are utf-8 encoded unicode.
        """
        i = msg_list.index(b'|')
        self.idents = msg_list[0:i]
        self.msg_id = msg_list[i+1]
        method = msg_list[i+2].decode('utf-8')
        data = msg_list[i+3:]
        args, kwargs = self._serializer.deserialize_args_kwargs(data)

        # Find and call the actual handler for message.
        handler = getattr(self, method, None)
        if handler is not None and getattr(handler, 'is_rpc_method', False):
            try:
                result = handler(*args, **kwargs)
            except Exception:
                self._send_error()
            else:
                try:
                    data_list = self._serializer.serialize_result(result)
                except Exception:
                    self._send_error()
                else:
                    reply = self._build_reply(b'SUCCESS', data_list)
                    self.stream.send_multipart(reply)
        else:
            logging.error('Unknown RPC method: %s' % method)
        self.idents = None
        self.msg_id = None

    def _send_error(self):
        """Send an error reply."""
        etype, evalue, tb = sys.exc_info()
        error_dict = {
            'ename' : str(etype.__name__),
            'evalue' : str(evalue),
            'traceback' : format_exc(tb)
        }
        data_list = [jsonapi.dumps(error_dict)]
        reply = self._build_reply(b'FAILURE', data_list)
        self.stream.send_multipart(reply)

    def start(self):
        """Start the event loop for this RPC service."""
        self.loop.start()
Esempio n. 37
0
class NodeService(App):
    __actions__ = []
    # _api_port = 5000


    def __init__(self, api_port):
        super().__init__()        
        # self.api_port = api_port
        self.__api_port = api_port

        nodemodel = Node.select().first()
        if not nodemodel:
          settings = {"discovery": True, "discoverable": True}
          nodemodel = Node(uuid=uuid.uuid4().hex, settings=settings)
          nodemodel.node_name = "Ancilla"
          nodemodel.save(force_insert=True)
        self.model = nodemodel
        self.identity = self.model.uuid.encode('utf-8')

        self.name = self.model.name #identity.decode('utf-8')

        self.config.update({
            "catchall": True
        })

        self.settings = self.config._make_overlay()
        self.settings.load_dict(self.model.settings)

        self.ctx = zmq.Context.instance()

        self.setup_router()

        self.discovery = Discovery(self)


        self.settings._add_change_listener(
            functools.partial(self.settings_changed, 'settings'))
        

        # self.pubsocket = self.ctx.socket(zmq.PUB)
        # self.pubsocket.bind("ipc://publisher")
        # self.publisher = ZMQStream(self.pubsocket)
        self.setup_publisher()
        
        # publisher = self.ctx.socket(zmq.PUB)
        # publisher.setsockopt( zmq.LINGER, 0 )
        # # publisher.setsockopt(zmq.DONTWAIT, True)
        # publisher.bind("ipc://publisher.ipc")
        # self.publisher = ZMQStream(publisher)
        

        collector = self.ctx.socket(zmq.PULL)
        collector.bind("ipc://collector.ipc")
        collector.setsockopt( zmq.LINGER, 1 )
        self.collector = ZMQStream(collector)
        self.collector.on_recv(self.handle_collect)

        
        self.file_service = None
        self.layerkeep_service = None
        self._services = []
        self.init_services()
        
        self.api = NodeApi(self)

        post_save.connect(self.post_save_handler, name=f'service_model', sender=Service)
        post_delete.connect(self.post_delete_service_handler, name=f'camera_model', sender=Service)
        post_save.connect(self.post_save_node_handler, name=f'node_model', sender=Node)
        # post_delete.connect(self.post_delete_camera_handler, name=f'camera_model', sender=Camera)

        self.limit_memory()
        soft, hard = resource.getrlimit(resource.RLIMIT_AS) 
        print(f'Node MEM limit NOW = {soft}, {hard}')
        


    def _hangle_sig_memory(self, signum, stack):
      print("Memory Warning: Node Service")
      gc.collect()


    def limit_memory(self): 
      maxhard = psutil.virtual_memory().available
      maxsoft = maxhard
      p = psutil.Process(pid=os.getpid())
      soft, hard = resource.getrlimit(resource.RLIMIT_AS) 
      if hard > 0:
        h = min([maxhard, hard])
      else:
        h = maxhard
      if h > 0:
        s = min([maxsoft, h])
      else:
        s = maxsoft

      if hasattr(p, 'rlimit'):
        # soft, hard = p.rlimit(resource.RLIMIT_AS) 
        print(f'Node MEM limit = {soft}, {hard}: {h}')
        
        p.rlimit(resource.RLIMIT_AS, (s, hard))
      else:
        
        print(f'Node MEM limit = {soft}, {hard}:  {h}')
        resource.setrlimit(resource.RLIMIT_AS, (s, hard))
      self._old_usr1_hdlr = signal.signal(signal.SIGUSR1, self._hangle_sig_memory)

    

    def cleanup(self):
      print('Clean Up Node and Services')
      for s in self._mounts:
        s.cleanup()
      self._mounts = []
      self.discovery.stop()      
      self.file_service = None
      self.layerkeep_service = None
      self.zmq_router.close()
      self.collector.close(linger=1)
      self.publisher.close(linger=1)
      self.ctx.destroy()


    @property
    def api_port(self):
        return self.__api_port

    @api_port.setter
    def api_port(self, value):
      self.discovery.update_port(value)
      self.__api_port = value
    

    def setup_router(self):
      trybind = 30
      bound = False
      self.router_port = 5555
      self.bind_address = f"tcp://*:{self.router_port}"
      self.router_address = f"tcp://127.0.0.1:{self.router_port}"

      self.zrouter = self.ctx.socket(zmq.ROUTER)
      self.zrouter.identity = self.identity

      while not bound and trybind > 0:
        try:
          self.bind_address = f"tcp://*:{self.router_port}"
          
          self.zrouter.bind(self.bind_address)
          self.router_address = f"tcp://127.0.0.1:{self.router_port}"
          print(f"Node Bound to {self.bind_address}")
          bound = True
        except zmq.error.ZMQError:
          trybind -= 1
          self.router_port += 1

      self.zmq_router = ZMQStream(self.zrouter, IOLoop.current())
      self.zmq_router.on_recv(self.router_message)
      self.zmq_router.on_send(self.router_message_sent)


    def setup_publisher(self):
      trybind = 30
      bound = False
      self.publisher_port = 5556
      self.bind_address = f"tcp://*:{self.publisher_port}"
      self.publisher_address = f"tcp://127.0.0.1:{self.publisher_port}"

      publisher = self.ctx.socket(zmq.PUB)
      publisher.setsockopt( zmq.LINGER, 0 )
        # publisher.setsockopt(zmq.DONTWAIT, True)

      while not bound and trybind > 0:
        try:
          self.pub_bind_address = f"tcp://*:{self.publisher_port}"
          
          publisher.bind(self.pub_bind_address)
          self.publisher_address = f"tcp://127.0.0.1:{self.publisher_port}"
          print(f"Node Pub Bound to {self.pub_bind_address}")
          bound = True
        except zmq.error.ZMQError:
          trybind -= 1
          self.publisher_port += 1

      self.publisher = ZMQStream(publisher)


    def list_actions(self, *args):
      return self.__actions__

    def list_plugins(self):
      import os
      for module in os.listdir(os.path.dirname(__file__)):
        if module == '__init__.py' or module[-3:] != '.py':
          continue
        __import__(module[:-3], locals(), globals())

    def mount_service(self, model):
      kwargs = {"publisher_address": self.publisher_address}
      prefix = model.api_prefix #f"/services/{model.kind}/{model.id}/"
      res = next((item for item in self._mounts if item.config['_mount.prefix'] == prefix), None)
      if res:
        return ["exist", res]
      LayerkeepCls = getattr(importlib.import_module("ancilla.foundation.node.plugins"), "LayerkeepPlugin")    
      ServiceCls = getattr(importlib.import_module("ancilla.foundation.node.services"), model.class_name)  
      service = ServiceCls(model, **kwargs)
      service.install(LayerkeepCls())
      self._services.append(model)
      self.mount(prefix, service)
      return ["created", service]

    def handle_service_name_change(self, oldname, newname):
      sc = Service.event_listeners.children().alias('children')
      services = Service.select().from_(Service, sc).where(sc.c.Key.startswith(oldname))[:]      
      for s in services:
        evhandlers = {}
        for (k, v) in s.event_listeners.items():
          if k.startswith(oldname + "."):
            newkey = newname + k[len(oldname):]
            evhandlers[newkey] = v
          else:
            evhandlers[k] = v
        s.event_listeners = evhandlers
        s.save()

    # services = Service.update(Service.settings["event_handlers"].update(evhandlers)).where
    # .from_(Service, sc).where(sc.c.Key.startswith(oldname))[:]      
    def settings_changed(self, event, oldval, key, newval):
      # print(f'evt: {event} key= {key}  OldVal = {oldval}  NewVal: {newval}')
      if key == "discovery":
        self.discovery.run(newval)
      elif key == "discoverable":
        self.discovery.make_discoverable(newval)
      pass

    def post_save_node_handler(self, sender, instance, *args, **kwargs):
      print(f"Post save Node handler {sender}", flush=True)
      if self.model.name != self.name:
        self.name = instance.name
        self.discovery.update_name(self.name)
      
      old_settings = self.settings.to_json()
      new_settings = ConfigDict().load_dict(self.model.settings).to_json() 
      if old_settings != new_settings:
        # print(f"OldSet = {old_settings}", flush=True)
        # print(f"NEWSet = {new_settings}", flush=True)
        self.settings.update(new_settings)
        oldkeys = old_settings.keys()
        newkeys = new_settings.keys()
        for key in oldkeys - newkeys:
          if key not in self.settings._virtual_keys:
            del self.settings[key]


    def post_save_handler(self, sender, instance, *args, **kwargs):
      print(f"Post save Service handler {sender} {instance}", flush=True)
      model = None
      for idx, item in enumerate(self._services):
        if item.id == instance.id:
            model = item
            self._services[idx] = instance
      
      if model:
        oldmodel = model
        srv = next((item for item in self._mounts if item.model.id == instance.id), None)
        oldname = model.name
        model = instance
        # print(f"PostSaveHandler OLDName = {oldname}, instan= {instance.name}", flush=True)
        if oldname != instance.name:
          self.handle_service_name_change(oldname, instance.name)
          
        if srv:
          srv.update_model(model)

          old_config = ConfigDict().load_dict(oldmodel.configuration).to_json() 
          # old_config = oldmodel.configuration
          old_settings = srv.settings.to_json()
          old_event_listeners = srv.event_handlers.to_json()
          

          
          # print(f"NEWListeners = {json.dumps(srv.model.event_listeners)}", flush=True)
          # print(f"OldListeners = {json.dumps(oldmodel.event_listeners)}", flush=True)
          new_config = ConfigDict().load_dict(srv.model.configuration).to_json() 
          new_settings = ConfigDict().load_dict(srv.model.settings).to_json() 
          new_event_listeners = ConfigDict().load_dict(srv.model.event_listeners).to_json() 
          # if cur_settings != json.dumps(srv.model.settings):
          

          if old_config != new_config:
            # print(f"OldConfig = {old_config}", flush=True)
            # print(f"NEWConfig = {new_config}", flush=True)  
            # print(f"ConfVke {srv.config._virtual_keys}", flush=True)
            srv.config.update(new_config)
            oldkeys = old_config.keys()
            newkeys = new_config.keys()
            for key in oldkeys - newkeys:
              if key not in srv.config._virtual_keys:                
                del srv.config[key]

          if old_settings != new_settings:
            # print(f"OldSet = {old_settings}", flush=True)
            # print(f"NEWSet = {new_settings}", flush=True)
            # print(f"SettingsVke {srv.settings._virtual_keys}", flush=True)
            srv.settings.update(new_settings)
            oldkeys = old_settings.keys()
            newkeys = new_settings.keys()
            for key in oldkeys - newkeys:
              if key not in srv.settings._virtual_keys:
                del srv.settings[key]

          if old_event_listeners != new_event_listeners:            
            srv.event_handlers.update(new_event_listeners)

            # print(f"OldListeners = {old_event_listeners}", flush=True)
            # print(f"NEWListeners = {new_event_listeners}", flush=True)
            oldkeys = old_event_listeners.keys()            
            newkeys = new_event_listeners.keys()
            for key in oldkeys - newkeys:
              if key not in srv.settings._virtual_keys:
                del srv.event_handlers[key]


    def post_delete_service_handler(self, sender, instance, *args, **kwargs):
      # service_path = "/".join([Env.ancilla, "services", f"service{instance.id}"])
      if os.path.exists(instance.directory):
          shutil.rmtree(instance.directory)
      # self.delete_recording(instance)


    # def post_delete_camera_handler(self, sender, instance, *args, **kwargs):
    #   service_id = instance.service_id
    #   cam_path = "/".join([Env.ancilla, "services", f"service{instance.service_id}"])
    #   if os.path.exists(cam_path):
    #       shutil.rmtree(cam_path)


    def init_services(self):
      LayerkeepCls = getattr(importlib.import_module("ancilla.foundation.node.plugins"), "LayerkeepPlugin")    
      self.install(LayerkeepCls())
      lkmodel = Service.select().where(Service.kind == "layerkeep").first()
      if not lkmodel:
        self.__create_layerkeep_service()

      filemodel = Service.select().where(Service.kind == "file").first()
      if not filemodel:
        self.__create_file_service()
      
      kwargs = {"publisher_address": self.publisher_address}

      for s in Service.select():
        self._services.append(s)
        if s.kind == "file":
          ServiceCls = getattr(importlib.import_module("ancilla.foundation.node.services"), s.class_name)  
          service = ServiceCls(s, **kwargs)
          service.install(LayerkeepCls())
          self.file_service = service
          self.mount(f"/api/services/{s.kind}/{s.id}/", service)
        elif s.kind == "layerkeep":          
          ServiceCls = getattr(importlib.import_module("ancilla.foundation.node.services"), s.class_name)  
          service = ServiceCls(s, **kwargs)
          self.layerkeep_service = service
          self.mount(f"/api/services/{s.kind}/{s.id}/", service)
        else:
          ServiceCls = getattr(importlib.import_module("ancilla.foundation.node.services"), s.class_name)  
          service = ServiceCls(s, **kwargs)
          service.install(LayerkeepCls())
          self.mount(f"/api/services/{s.kind}/{s.id}/", service)
          
    def delete_service(self, service):
      self._services = [item for item in self._services if item.id != service.id]
      srv = next((item for item in self._mounts if item.model.id == service.id), None)
      if srv:
        self.unmount(srv)

    def delete_recording(self, msg):
      if isinstance(msg, CameraRecording):
        recording = msg
      else:
        data = msg.get("data") or None
        if data:
          if data.get("id"):
            recording = CameraRecording.get_by_id(data.get("id"))     
      
      if recording:
        try:
          
          if os.path.exists(recording.image_path):
            shutil.rmtree(recording.image_path)
          if os.path.exists(recording.video_path):
            os.remove(recording.video_path)

          res = recording.delete_instance(recursive=True)
          return True
        except Exception as e:
          print(f"delete exception {str(e)}")
          raise e
      
      return False

    def stop_service(self, service):
      srv = next((item for item in self._mounts if item.model.id == service.id), None)
      if srv:
        self.unmount(srv)

    def unmount(self, app):
      curmounts = self._mounts
      curmounts.remove(app)
      self.reset_app()
      self.api.setup()
      print("reseting app ", flush=True)
      app.cleanup()
      self.remount_apps(curmounts)
        
      
    def __create_layerkeep_service(self):
      service = Service(name="layerkeep", kind="layerkeep", class_name="Layerkeep")
      service.save(force_insert=True)
      return service

    def __create_file_service(self):
      service = Service(name="local", kind="file", class_name="FileService")
      service.save(force_insert=True)
      return service

    def send(self, environ = {}, **kwargs):
      res = self._handle(environ)
      return res

    def run_action(self, action, payload, target = None, **kwargs):      
      
      if not target:
        target = self
      else:
        target = next((item for item in self._mounts if item.name == target), self)
      
      # print(f'Actions= {action}, payload={payload} and target ={target}')
      try:
        if action in target.list_actions():
          method = getattr(target, action)
          res = method(payload)
          if yields(res):
            future = asyncio.run_coroutine_threadsafe(res, asyncio.get_running_loop())
            # while not future.done():
            #   time.sleep(0.01)
            # return future.result(0.)

            # print("FUTURE = ", future)
            # zmqrouter = self.zmq_router
            # def onfinish(fut):
            #   newres = fut.result(1)
            #   status = b'success'
            #   if "error" in newres:
            #     status = b'error'
            #   zmqrouter.send_multipart([replyto, status, json.dumps(newres).encode('ascii')])

            # future.add_done_callback(onfinish)

          else:
            return res  

        else:
          return {"status": "error", "message": "Action Doesnt Exist"}
      except Exception as e:
        return {"status": "error", "message": f'Error Running Action: {str(e)}' }
      

    def sendto(self, action):
      node_identity, request_id, device_identity, action, *msgparts = msg

      # msg = msg[2]
      # if len(msg) > 2:
      #   subtree = msg[2]
      message = ""
      if len(msgparts) > 0:
        message = msgparts[0]

      response = {"request_id": request_id.decode('utf-8'), "action": action.decode('utf-8')}

      if device_identity:
        curdevice = self.active_devices.get(device_identity)
        if curdevice:
          res = curdevice.send([request_id, action, message])

    def router_message_sent(self, msg, status):
      print("NODE INSIDE ROUTE MESSageSEND", flush=True)


    def router_message(self, msg):
      # print(f"Router Msg = {msg}", flush=True)
      
      replyto, seq_s, brequest, *args = msg
      # seq = struct.unpack('!q',seq_s)[0]
      # action = action.decode('utf-8')
      request = brequest.decode('utf-8')
      try:
        req = json.loads(request)
        classname = req.get('__class__')

        module_name, class_name = classname.rsplit(".", 1)
        MyClass = getattr(importlib.import_module(module_name), class_name)

        instance = MyClass(**req.get('data', {}))
        self.handle_route(replyto, seq_s, instance)
      except Exception as e:
        self.logger.error(f'Node Router Exception: {str(e)}')

    # def router_message(self, msg):
    #   print("NOde Unpack here", flush=True)
    #   print(f"Router Msg = {msg}", flush=True)
      
    #   replyto, method, path, *args = msg
    #   method = method.decode('utf-8')
    #   path = path.decode('utf-8')
    #   params = {}
    #   if len(args):
    #     try:
    #       params = json.loads(args.pop().decode('utf-8'))
    #     except Exception as e:
    #       print(f"Could not load params: {str(e)}", flush=True)
      
    #   environ = {"REQUEST_METHOD": method.upper(), "PATH": path, "params": params}
    #   res = self._handle(environ)
    #   # print(typing.co, flush=True)

    #   if yields(res):
    #     future = asyncio.run_coroutine_threadsafe(res, asyncio.get_running_loop())
        
    #     zmqrouter = self.zmq_router
    #     def onfinish(fut):
    #       newres = fut.result(1)
    #       status = b'success'
    #       if "error" in newres:
    #         status = b'error'
    #       zmqrouter.send_multipart([replyto, status, json.dumps(newres).encode('ascii')])

    #     future.add_done_callback(onfinish)

    #   else:
    #     status = b'success'
    #     if "error" in res:
    #       status = b'error'
    #     self.zmq_router.send_multipart([replyto, status, json.dumps(res).encode('ascii')])
    #   # node_identity, request_id, device_identity, action, *msgparts = msg
    #   return "Routed"


    # def request(self, request):
    #   pass

    def handle_collect(self, msg):
      # print(f'HandleCol Pub to {msg}', flush=True)
      # self.publisher.send_multipart(msg)
      if len(msg) >= 3:
          topic, service, *other = msg
          # topic, device, payload, *other = msg
          # if topic.startswith(b'events.'):
          # print(f"HandleCol inside, {topic} and {service}", flush=True)
          
          if topic.startswith(service):
            newtopic = self.identity + b'.' + topic
          else:
            newtopic = self.identity + b'.' + service + b'.' + topic
          # print(f"HandleCol inside, {newtopic} and {service}", flush=True)  
          self.publisher.send_multipart([newtopic, service] + other)
      pass


    def service_change(self, *args):
      tree = Service.settings["event_handlers"].tree().alias('tree')
      Service.select().from_(Service, tree).where(tree.c.Key.startswith("servicename.events.print"))

      sc = Service.settings["event_handlers"].children().alias('children')      
      q = Service.select().from_(Service, sc).where(sc.c.Key.startswith("servicename.events.print"))

      q = (Service.select(sc.c.key, sc.c.value, sc.c.fullkey)
         .from_(Service, sc)
         .order_by(sc.c.key)
         .tuples())
      q[:]
Esempio n. 38
0
class Worker(object):

    """Class for the MDP worker side.

    Thin encapsulation of a zmq.DEALER socket.
    Provides a send method with optional timeout parameter.

    Will use a timeout to indicate a broker failure.
    """
    max_forks = 10

    ipc = 'ipc:///tmp/zmq-rpc-'+str(uuid4())
    HB_INTERVAL = 1000  # in milliseconds
    HB_LIVENESS = 3    # HBs to miss before connection counts as dead

    def __init__(self, context, endpoint, service, multicasts=()):
        """Initialize the MDPWorker.

        :param context:    is the zmq context to create the socket from
        :type context:     zmq.Context
        :param service:    service name - you can put hostname here
        :type service:     str
        :param multicasts: list of groups to subscribe
        :type multicasts:  list
        """
        self.context = context
        self.endpoint = endpoint
        self.service = service.encode('utf-8')  # convert to byte-string - required in python 3
        self.multicasts = [m.encode('utf-8') for m in multicasts]  # convert to byte-string
        self.stream = None
        self._tmo = None
        self.need_handshake = True
        self.ticker = None
        self._delayed_cb = None
        self._create_stream()
        self.forks = []
        self.curr_liveness = self.HB_LIVENESS

        socket = self.context.socket(zmq.ROUTER)
        socket.bind(self.ipc)
        self.stream_w = ZMQStream(socket)
        self.stream_w.on_recv(self._on_fork_response)
        self.reply_socket = None
        return

    def _create_stream(self):
        """Helper to create the socket and the stream.
        """
        self.on_log_event('broker.connect', 'Trying to connect do broker')
        socket = self.context.socket(zmq.DEALER)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.connect(self.endpoint)
        self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL)
        self._send_ready()
        for m in self.multicasts:
            self._register_worker_to_multicast(m)
        self.ticker.start()
        return

    def _tick(self):
        """Method called every HB_INTERVAL milliseconds.
        """
        self.curr_liveness -= 1
        self.send_hb()
        if self.curr_liveness >= 0:
            return
        # ouch, connection seems to be dead
        self.on_log_event('broker.timeout', 'Connection to broker timeouted, disconnecting')
        self.shutdown(False)
        # try to recreate it
        self._delayed_cb = DelayedCallback(self._create_stream, 5000)
        self._delayed_cb.start()
        return

    def send_hb(self):
        """Construct and send HB message to broker.
        """
        msg = [b'', MDP_WORKER_VERSION, b'\x05']
        self.stream.send_multipart(msg)
        return

    def shutdown(self, final=True):
        """Method to deactivate the worker connection completely.

        Will delete the stream and the underlying socket.

        :param final: if shutdown is final and we want to close all sockets
        :type final:  bool
        """

        if self.ticker:
            self.ticker.stop()
            self.ticker = None
        if not self.stream:
            return

        self.stream.on_recv(None)
        self.disconnect()
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        self.need_handshake = True

        if final:
            self.stream_w.socket.close()
            self.stream_w.close()
            self.stream = None
        return

    def disconnect(self):
        """Helper method to send the workers DISCONNECT message.
        """
        self.stream.socket.send_multipart([b'', MDP_WORKER_VERSION, b'\x06' ])
        self.curr_liveness = self.HB_LIVENESS
        return

    def _send_ready(self):
        """Helper method to prepare and send the workers READY message.
        """
        self.on_log_event('broker.ready', 'Sending ready to broker.')
        ready_msg = [b'', MDP_WORKER_VERSION, b'\x01', self.service]
        self.stream.send_multipart(ready_msg)
        self.curr_liveness = self.HB_LIVENESS
        return

    def _register_worker_to_multicast(self, name):
        """Helper method to register worker to multicast group

        :param name:  group name
        :type name:   str
        """
        self.on_log_event('broker.register-group', 'Subscribing to group \'%s\'.' % name)
        reg_msg = [b'', MDP_WORKER_VERSION, b'\x07', name]
        self.stream.send_multipart(reg_msg)
        self.curr_liveness = self.HB_LIVENESS
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        :param msg:  message parts
        :type msg:   list
        """
        # 1st part is empty
        msg.pop(0)
        # 2nd part is protocol version
        protocol_version = msg.pop(0)
        if protocol_version != MDP_WORKER_VERSION:  # version check, ignore old versions
            return
        # 3rd part is message type
        msg_type = msg.pop(0)
        # any message resets the liveness counter
        self.need_handshake = False
        self.curr_liveness = self.HB_LIVENESS
        if msg_type == b'\x06':  # disconnect
            self.curr_liveness = 0  # reconnect will be triggered by hb timer
        elif msg_type == b'\x02':  # request
            # remaining parts are the user message
            addresses, msg = self.split_address(msg)
            self._on_request(addresses, msg)
        elif msg_type == b'\x05':
            # received hardbeat - timer handled above
            pass
        else:
            # invalid message ignored
            pass
        return

    def _on_fork_response(self, to_send):
        """Helper method to send message from forked worker.
        This message will be received by main worker process and resend to broker.

        :param to_send  address and data to send
        :type to_send   list
        """
        self.stream.send_multipart(to_send)
        return

    def send_reply(self, addresses, msg, partial=False, exception=False):
        """Send reply from forked worker process.
        This method can be called only from do_work() method!
        This method will send messages to main worker listening on local socket in /tmp/zmq-rpc-...

        :param addresses: return address stack
        :type addresses:  list of str
        :param msg:       return value from called method
        :type msg:        mixed
        :param partial:   if the message is partial or final
        :type partial:    bool
        :param exception: if the message is exception, msg format is: {'class':'c', 'message':'m', 'traceback':'t'}
        :type exception:  bool
        """
        if not self.reply_socket:
            context = zmq.Context()
            self.reply_socket = context.socket(zmq.DEALER)
            self.reply_socket.connect(self.ipc)
        msg = msgpack.Packer().pack(msg)
        if exception:
            to_send = [b'', MDP_WORKER_VERSION, b'\x08']
        elif partial:
            to_send = [b'', MDP_WORKER_VERSION, b'\x03']
        else:
            to_send = [b'', MDP_WORKER_VERSION, b'\x04']
        to_send.extend(addresses)
        to_send.append(b'')
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        m = self.reply_socket.send_multipart(to_send, track=True, copy=False)
        m.wait()
        if not partial:
            self.reply_socket.close()
            self.reply_socket = None
        return

    def send_message(self, addresses, msg, partial=False, error=False):
        """Send response message from main worker process.
        Please do not call this method from do_work()

        :param addresses: return address stack
        :type addresses:  list of str
        :param msg:       return value from called method
        :type msg:        mixed
        :param partial:   if the message is partial or final
        :type partial:    bool
        :param error:     if the message is error
        :type error:      bool
        """
        to_send = [b'', MDP_WORKER_VERSION]
        if partial:
            to_send.append(b'\x03')
        elif error:
            to_send.append(b'\x09')
        else:
            to_send.append(b'\x04')
        to_send.extend(addresses)
        to_send.append(b'')
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self.stream.send_multipart(to_send)
        return

    def _on_request(self, addresses, message):
        """Helper method called on RPC message receive.
        """
        # remove finished forks
        self._remove_finished_processes()
        # test max forks
        if len(self.forks) >= self.max_forks:
            self.send_message(addresses, b'max workers limit exceeded', error=True)
            self.on_max_forks(addresses, message)
            return

        name = message[0]
        args = msgpack.unpackb(message[1])
        kwargs = msgpack.unpackb(message[2])

        p = Process(target=self.do_work, args=(addresses, name, args, kwargs))
        p.start()
        p._args = None  # free memory
        self.forks.append(p)
        return

    def _remove_finished_processes(self):
        """Helper method dedicated to cleaning list of forked workers
        """
        for f in [f for f in self.forks if not f.is_alive()]:
            self.forks.remove(f)
        return

    def split_address(self, msg):
        """Function to split return Id and message received by ROUTER socket.

        Returns 2-tuple with return Id and remaining message parts.
        Empty frames after the Id are stripped.
        """
        ret_ids = []
        for i, p in enumerate(msg):
            if p:
                ret_ids.append(p)
            else:
                break
        return ret_ids, msg[i + 1:]

    def on_log_event(self, event, message):
        """Override this method if you want to log events from broker

        :type event:    str
        :param event:   event type - used for filtering
        :type message:  str
        :param message: log message

        :rtype: None
        """
        pass

    def on_max_forks(self, addresses, message):
        """This method is called when max_forks limit is reached
        You can override this method.
        """
        pass

    def do_work(self, addresses, name, args, kwargs):
        """Main method responsible for handling rpc calls, and sending response messages.
         Please override this method!

        :param addresses: return address stack
        :type addresses:  list of str
        :param name:      name of task
        :type name:       str
        :param args:      positional task arguments
        :type args:       list
        :param kwargs:    key-value task arguments
        :type kwargs:     dict
        """
        # this is example of simple response message
        self.send_reply(addresses, 'method not implemented')  # and send message to main worker
        # you can also send partial message and exception - read 'send_reply' docs
        return
Esempio n. 39
0
    port = int(msg[0])
    print port
    pair_socket = ctx.socket(zmq.PAIR)
    pair_socket.connect("tcp://176.31.243.99:%d" % port)
    pair_stream = ZMQStream(pair_socket, command_stream.io_loop)
    pair_stream.on_recv(pair_recv)
    send()


def pair_recv(msg):
    print "Message: " + ''.join(msg)
    connection.sendall(''.join(msg))


while True:
    # Wait for a connection
    print >> sys.stderr, 'waiting for a connection'
    sock.setblocking(0)
    sock.settimeout(0.0)
    done = False
    while not done:
        try:
            connection, client_address = sock.accept()
            done = True
        except socket.error:
            pass
    print "Not Blocking"
    command_stream.on_recv(recv)
    team = raw_input("Team name: ")
    command_stream.send_multipart(["CLIENT", team])
    command_stream.io_loop.start()
Esempio n. 40
0
class MDPClient(object):

    """Class for the MDP client side.

    Thin asynchronous encapsulation of a zmq.REQ socket.
    Provides a :func:`request` method with optional timeout.

    Objects of this class are ment to be integrated into the
    asynchronous IOLoop of pyzmq.

    :param context:  the ZeroMQ context to create the socket in.
    :type context:   zmq.Context
    :param endpoint: the enpoint to connect to.
    :type endpoint:  str
    :param service:  the service the client should use
    :type service:   str
    """

    _proto_version = C_CLIENT

    def __init__(self, context, endpoint):
        """Initialize the MDPClient.
        """
        
        self.context = context
        self.endpoint = endpoint
        
    def start(self):
        """
        Initialize the zmq sockets on a ioloop stream.
        The separation of this part from the init is useful if
        we start the client on a separate thread with a new ioloop
        (for example to enable use in an ipython notebook)
        """
        socket = self.context.socket(zmq.DEALER)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self._proto_prefix = [EMPTY_FRAME, self._proto_version]
        self._delayed_timeout = None
        self.timed_out = False
        socket.connect(self.endpoint)

    def shutdown(self):
        """Method to deactivate the client connection completely.

        Will delete the stream and the underlying socket.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        
        if not self.stream:
            return
        
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.socket.close()
        self.stream.close()
        self.stream = None

    def request(self, service, msg, msg_extra=STANDARD, timeout=None):
        """Send the given message.

        :param msg:     message parts to send.
        :type msg:      list of str
        :param msg_extra: Extra message flags (e.g. STANDARD or BROADCAST)
        :type msg_extra: int
        :param timeout: time to wait in milliseconds.
        :type timeout:  int
        
        :rtype None:
        """
        if type(msg) in (bytes, unicode):
            msg = [msg]
            
        #
        # prepare full message
        #
        to_send = [chr(msg_extra)] + self._proto_prefix[:]
        to_send.extend([service])
        to_send.extend(msg)
        
        self.stream.send_multipart(to_send)
        
        if timeout:
            self._start_timeout(timeout)

    def _on_timeout(self):
        """Helper called after timeout.
        """
        
        self.timed_out = True
        self._delayed_timeout = None
        self.on_timeout()

    def _start_timeout(self, timeout):
        """Helper for starting the timeout.

        :param timeout:  the time to wait in milliseconds.
        :type timeout:   int
        """
        
        self._delayed_timeout = DelayedCallback(self._on_timeout, timeout)
        self._delayed_timeout.start()

    def _on_message(self, msg):
        """Helper method called on message receive.

        :param msg:   list of message parts.
        :type msg:    list of str
        """
        
        if self._delayed_timeout:
            # 
            # disable timout
            #
            self._delayed_timeout.stop()
            self._delayed_timeout = None
            
        self.on_message(msg)

    def on_message(self, msg):
        """Public method called when a message arrived.

        .. note:: Does nothing. Should be overloaded!
        """

        raise NotImplementedError('on_message must be implemented by the subclass.')
        
    def on_timeout(self):
        """Public method called when a timeout occured.

        .. note:: Does nothing. Should be overloaded!
        """
        raise NotImplementedError('on_timeout must be implemented by the subclass.')
Esempio n. 41
0
class ServiceProcess():
    def __init__(self, identity, service_id, child_conn, handler, **kwargs):

        self.identity = identity
        self.model = Service.get(Service.id == service_id)

        logger_name = f"{self.model.name}"
        self.logger = logging.getLogger(logger_name)
        self.logger.setup_logger(self.model)

        self.child_conn = child_conn
        self.data_handlers = []
        self.handler = handler(self)

        self.loop = None

        self.router_address = None

        self.setup_event_loop()
        self.setup()

        self.ctx = zmq.Context.instance()
        self.port = 5557
        self.pub_port = 5558

        self.setup_router()
        self.zmq_router = ZMQStream(self.zrouter, self.loop)
        self.zmq_router.on_recv(self.router_message)

        self.setup_publiser()
        self.zmq_pub = ZMQStream(self.zpub, self.loop)

        self.setup_data()

        self.limit_memory()
        soft, hard = resource.getrlimit(resource.RLIMIT_AS)
        self.logger.debug(f'MEM limit NOW = {soft}, {hard}')

    @classmethod
    def start_process(cls, identity, service_id, child_conn, handler):
        inst = cls(identity, service_id, child_conn, handler)
        inst.run()

    def _hangle_sig_memory(self, signum, stack):
        self.logger.warning("Memory Warning")
        gc.collect()

    def limit_memory(self):
        maxhard = psutil.virtual_memory().available
        maxsoft = maxhard
        p = psutil.Process(pid=os.getpid())
        soft, hard = resource.getrlimit(resource.RLIMIT_AS)
        h = min([maxhard, hard])
        if h > 0:
            s = min([maxsoft, h])
        else:
            s = maxsoft

        self.logger.debug(f'Service MEM limit = {soft}, {hard}: {h}')
        if hasattr(p, 'rlimit'):
            # soft, hard = p.rlimit(resource.RLIMIT_AS)
            p.rlimit(resource.RLIMIT_AS, (s, h))
        else:
            resource.setrlimit(resource.RLIMIT_AS, (s, h))
        self._old_usr1_hdlr = signal.signal(signal.SIGUSR1,
                                            self._hangle_sig_memory)

    def setup(self):
        from ..env import Env
        from ..data.db import Database
        from playhouse.sqlite_ext import SqliteExtDatabase
        import zmq
        Env.setup()

        conn = SqliteExtDatabase(
            Database.path,
            pragmas=(
                ('cache_size', -1024 * 64),  # 64MB page-cache.
                ('journal_mode',
                 'wal'),  # Use WAL-mode (you should always use this!).
                ('foreign_keys', 1),
                ('threadlocals', True)))
        Database.conn.close()
        Database.conn = conn
        Database.connect()

        self.state = ConfigDict()._make_overlay()
        self.state._add_change_listener(partial(self.state_changed, 'state'))
        self.task_queue = Queue()
        self.current_tasks = {}

    def model_updated(self):
        self.logger.debug(f"ServiceProcess POST SAVE HANDLER")

        self.model = Service.get_by_id(self.model.id)
        self.logger.setup_logger(self.model)
        if hasattr(self.handler, "model_updated"):
            self.handler.model_updated()

    def setup_event_loop(self):
        self.evtloop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.evtloop)

        IOLoop.clear_current()
        if hasattr(IOLoop, '_current'):
            del IOLoop._current

        if self.loop is None:
            if not IOLoop.current(instance=False):
                self.loop = IOLoop.current()  #IOLoop()
            else:
                self.loop = IOLoop.current()

    def stop_process(self):
        if self.handler:
            self.handler.close()

        for k, v in self.current_tasks.items():
            if hasattr(v, "stop"):
                v.stop()

        self.data_stream.close()
        self.zrouter.close()
        # if we close zpub here we wont get the last messages
        # self.zpub.close()

    def setup_router(self):
        self.zrouter = self.ctx.socket(zmq.ROUTER)
        self.zrouter.identity = self.identity
        self.logger.debug(f"Setup Router {self.model.name}")
        trybind = 30
        bound = False
        while not bound and trybind > 0:
            try:
                self.bind_address = f"tcp://*:{self.port}"

                self.zrouter.bind(self.bind_address)
                self.router_address = f"tcp://127.0.0.1:{self.port}"
                # print(f"Bound to {self.bind_address}")

                self.logger.info(f"Router Bound to {self.bind_address}")
                bound = True
            except zmq.error.ZMQError:
                trybind -= 1
                self.port += 1

        if not bound:
            raise Exception("Could Not Bind To Address")

    def setup_publiser(self):
        self.zpub = self.ctx.socket(zmq.PUB)
        trybind = 30
        bound = False
        while not bound and trybind > 0:
            try:
                self.pub_bind_address = f"tcp://*:{self.pub_port}"

                self.zpub.bind(self.pub_bind_address)
                self.pubsub_address = f"tcp://127.0.0.1:{self.pub_port}"
                self.logger.info(f"Publisher Bound to {self.pub_bind_address}")
                bound = True
            except zmq.error.ZMQError:
                trybind -= 1
                self.pub_port += 1

        if not bound:
            raise Exception("Could Not Bind To Address")

    def register_data_handlers(self, obj):
        self.data_handlers.append(obj)

    def setup_data(self):
        deid = f"inproc://{self.identity}_collector"
        self.data_stream = self.ctx.socket(zmq.PULL)
        self.data_stream.bind(deid)

        self.data_stream = ZMQStream(self.data_stream)
        self.data_stream.linger = 0
        self.data_stream.on_recv(self.on_data)

    def state_changed(self, event, oldval, key, newval):
        # print(f"state changed {self.state}, key={key} OLDVAL: {oldval}, {newval}", flush=True)
        dict.__setitem__(oldval, key, newval)
        self.fire_event(StateEvent.changed, oldval)

    async def run_loop(self):

        child_conn = self.child_conn
        while self.running:
            res = child_conn.poll(0)
            if res:
                try:
                    payload = child_conn.recv()
                except:
                    break
                if payload:
                    (key, args) = payload
                    if key == "router_address":
                        child_conn.send((key, self.router_address))
                    elif key == "pubsub_address":
                        child_conn.send((key, self.pubsub_address))
                    elif key == "model_updated":
                        self.model_updated()
                    elif key == "stop":
                        self.stop_process()
                        child_conn.send((key, "success"))
                        time.sleep(2)
                        self.running = False
                        break
            await asyncio.sleep(2)

    def run(self):
        self.running = True
        try:
            self.evtloop.run_until_complete(self.run_loop())
        except KeyboardInterrupt:
            self.stop_process()
            self.child_conn.send(("stop", "success"))
            self.running = False
            self.logger.info('\nProcessFinished (interrupted)')

    def on_data(self, data):
        for d in self.data_handlers:
            data = d.handle(data)

        if hasattr(self, "zmq_pub"):
            self.zmq_pub.send_multipart(data)

    def handle_route(self, replyto, seq, request):
        action = request["action"]
        if hasattr(self.handler, action):
            method = getattr(self.handler, action)
            if method:
                res = b''
                try:
                    res = method(request["body"])
                except AncillaResponse as ar:
                    res = ar
                except Exception as e:
                    res = AncillaError(404, {"error": str(e)})
            else:
                # newres = b'{"error": "No Method"}'
                res = AncillaError(404, {"error": "No Method"})

        if yields(res):
            future = asyncio.run_coroutine_threadsafe(
                res, asyncio.get_running_loop())

            zmqrouter = self.zmq_router

            def onfinish(fut):
                res = b''
                try:
                    newres = fut.result(1)
                    if isinstance(newres, AncillaResponse):
                        res = newres.encode()
                    else:
                        res = AncillaResponse(newres).encode()
                except AncillaResponse as ar:
                    res = ar.encode()

                zmqrouter.send_multipart([replyto, seq, res])

            future.add_done_callback(onfinish)

        else:
            if not res:
                res = {"success": "ok"}
            if isinstance(res, AncillaResponse):
                res = res.encode()
            else:
                res = AncillaResponse(res).encode()

            self.zmq_router.send_multipart([replyto, seq, res])

    def router_message(self, msg):
        # print(f"Router Msg = {msg}", flush=True)

        replyto, seq_s, brequest, *args = msg
        # seq = struct.unpack('!q',seq_s)[0]
        # action = action.decode('utf-8')
        request = brequest.decode('utf-8')
        try:
            req = json.loads(request)
            classname = req.get('__class__')

            module_name, class_name = classname.rsplit(".", 1)
            MyClass = getattr(importlib.import_module(module_name), class_name)

            instance = MyClass(**req.get('data', {}))
            self.handle_route(replyto, seq_s, instance)
        except Exception as e:
            self.logger.error(f'PROCESS EXCEPTION {str(e)}')

    def fire_event(self, evtname, payload):
        # print(f"fire event {evtname}", flush=True)
        if not hasattr(self, "zmq_pub"):
            return

        if isinstance(evtname, Event):
            evtname = evtname.value()
        evtname = evtname.encode('ascii')
        # payload["device"] = self.name
        pstring = json.dumps(payload, cls=ServiceJsonEncoder)
        pstring = pstring.encode('ascii')
        self.zmq_pub.send_multipart(
            [b'events.' + evtname, self.identity, pstring])

    def add_task(self, task):
        self.task_queue.put(task)
        loop = IOLoop().current()
        loop.add_callback(self._process_tasks)

    async def _process_tasks(self):
        async for dtask in self.task_queue:
            print(f'process task started {dtask.name} ')
            self.current_tasks[dtask.name] = dtask
            res = await dtask.run(self)
            rj = json.dumps(res, cls=ServiceJsonEncoder).encode('ascii')
            self.zmq_pub.send_multipart(
                [self.identity + b'.task', b'finished', rj])

            del self.current_tasks[dtask.name]
            self.logger.debug(
                f"PROCESS TASK-{dtask.name} {self.identity} DONE= {res}")
class GameManagerServer(object):
    client_router_sock = None
    worker_router_sock = None
    address = None
    manager = None
    io_loop = None

    def __init__(self, addr="tcp://*:", port=config.GAME_MANAGER_PORT):
        super(GameManagerServer, self).__init__()

        self.context = zmq.Context()
        self.io_loop = ZMQIOLoop.instance()

        self.client_router_sock = self.context.socket(zmq.ROUTER)
        self.address = addr + str(port)
        self.client_router_sock.bind(self.address)

        self.worker_router_sock = self.context.socket(zmq.ROUTER)
        self.worker_router_sock.bind("tcp://*:6000")

        self.client_router_sock = ZMQStream(self.client_router_sock)
        self.client_router_sock.on_recv(self.recv_from_client)

        self.worker_router_sock = ZMQStream(self.worker_router_sock)
        self.worker_router_sock.on_recv(self.recv_from_game)

        self.manager = GameManager(self.send_to_client, self.send_to_game)

    def start(self):
        try:
            self.io_loop.start()
        except KeyboardInterrupt:
            pass

        self.client_router_sock.close()
        self.worker_router_sock.close()

    def send_to_client(self, msg):
        routable_msg = [msg[mpwp.MSG_TO]] + msg  # prepend routing IDENTITY
        self.client_router_sock.send_multipart(routable_msg)

    def send_to_game(self, msg):
        routable_msg = [msg[mpwp.MSG_TO]] + msg  # prepend routing IDENTITY
        self.worker_router_sock.send_multipart(routable_msg)

    def recv_from_client(self, msg):
        if msg:
            router_id = msg[0]
            actual_msg = msg[1:]  # trim off router info
            if router_id == actual_msg[mpwp.MSG_FROM]:
                if actual_msg[mpwp.MSG_VERSION] == mpwp.VERSION:
                    self.manager.handle_client_incoming(actual_msg)
                else:
                    pass  # send VERSION_MISMATCH_ERROR
            else:
                if actual_msg[mpwp.MSG_TO] == mpwp.GAME_MANAGER_ID and actual_msg[mpwp.MSG_FROM] == mpwp.MATCHMAKER_ID:
                    to_id = router_id
                    from_id = actual_msg[mpwp.MSG_FROM]
                    msg_type = actual_msg[mpwp.MSG_TYPE]
                    msg_content = mpwp.msg_content(actual_msg)
                    self.manager.handle_matchmaker_incoming(to_id, from_id, msg_type, msg_content)
                else:
                    pass  # error, invalid message
        else:
            return  # fatal error

    def recv_from_game(self, msg):
        router_id = msg[0]
        actual_msg = msg[1:]  # trim off router info
        self.manager.recv_from_game(actual_msg)
Esempio n. 43
0
class LRUQueue(object):
    """LRUQueue class using ZMQStream/IOLoop for event dispatching"""

    def __init__(self, backend_socket, frontend_socket):
        self.available_workers = 0
        self.workers = []
        self.client_nbr = NBR_CLIENTS

        self.backend = ZMQStream(backend_socket)
        self.frontend = ZMQStream(frontend_socket)
        self.backend.on_recv(self.handle_backend)

        self.loop = IOLoop.instance()

    def handle_backend(self, msg):
        # Queue worker address for LRU routing
        worker_addr, empty, client_addr = msg[:3]

        assert self.available_workers < NBR_WORKERS

        # add worker back to the list of workers
        self.available_workers += 1
        self.workers.append(worker_addr)

        #   Second frame is empty
        assert empty == b""

        # Third frame is READY or else a client reply address
        # If client reply, send rest back to frontend
        if client_addr != b"READY":
            empty, reply = msg[3:]

            # Following frame is empty
            assert empty == b""

            self.frontend.send_multipart([client_addr, b'', reply])

            self.client_nbr -= 1

            if self.client_nbr == 0:
                # Exit after N messages
                self.loop.add_timeout(time.time() + 1, self.loop.stop)

        if self.available_workers == 1:
            # on first recv, start accepting frontend messages
            self.frontend.on_recv(self.handle_frontend)

    def handle_frontend(self, msg):
        # Now get next client request, route to LRU worker
        # Client request is [address][empty][request]
        client_addr, empty, request = msg

        assert empty == b""

        #  Dequeue and drop the next worker address
        self.available_workers -= 1
        worker_id = self.workers.pop()

        self.backend.send_multipart([worker_id, b'', client_addr, b'', request])
        if self.available_workers == 0:
            # stop receiving until workers become available again
            self.frontend.stop_on_recv()
Esempio n. 44
0
class zmq_bonjour_connect_wrapper(object):
    """Connects to a ZMQ socket by the name, handles callbacks for pubsub topics etc"""
    context = None
    socket = None
    stream = None
    heartbeat_received = None
    heartbeat_timeout = 5000
    topic_callbacks = {}
    recv_callbacks = []
    uuid = None
    identity = None

    def __init__(self, socket_type, service_name, service_port=None, service_type=None, identity=None):
        self.uuid = uuid.uuid4()
        if not identity:
            self.identity = self.uuid.hex
        else:
            self.identity = identity

        self.reconnect(socket_type, service_name, service_port=None, service_type=None)
        if socket_type == zmq.SUB:
            # TODO: how to handle this with ROUTER/DEALER combinations...
            self.add_topic_callback("HEARTBEAT", self._heartbeat_callback)
            # TODO: add heartbeat watcher callback

    def _heartbeat_callback(self, *args):
        self.heartbeat_received = time.time()
        #print "Heartbeat time %d" % self.heartbeat_received

    def _topic_callback_wrapper(self, datalist):
        for f in self.recv_callbacks:
            if f:
                f(*datalist)
        topic = datalist[0]
        if len(datalist) > 1:
            args = datalist[1:]
        else:
            args = []
        #print "DEBUG: _topic_callback_wrapper(%s, %s)" % (topic, repr(args))
        if not self.topic_callbacks.has_key(topic):
            return
        for f in self.topic_callbacks[topic]:
            if f:
                f(*args)

    def reconnect(self, socket_type, service_name, service_port=None, service_type=None):
        self.context = None
        self.socket = None
        self.stream = None
        self.heartbeat_received = None

        if not service_type:
            service_type = socket_type_to_service(socket_type)
        if isinstance(service_name, (list, tuple)):
            rr = [None, service_name[0], service_name[1]]
        else:
            rr = bonjour_utilities.resolve(service_type, service_name)
            #print ("DEBUG: bonjour_utilities.resolve(%s, %s) returned %s" % (service_type,service_name,rr))
        if not rr:
            # TODO raise error or wait ??
            return

        self.context = zmq.Context()
        self.socket = self.context.socket(socket_type)
        self.socket.setsockopt(zmq.IDENTITY, self.identity)
        self.stream = ZMQStream(self.socket)
        connection_str =  "tcp://%s:%s" % (rr[1], rr[2])
        #print("DEBUG: reconnect connection_str=%s (service_type=%s)" % (connection_str, service_type))
        self.socket.connect(connection_str)

        # re-register the subscriptions
        if socket_type == zmq.SUB:
            for topic in self.topic_callbacks.keys():
                self._subscribe_topic(topic)

        # And set the callback
        self.stream.on_recv(self._topic_callback_wrapper)

    def _subscribe_topic(self, topic):
        self.socket.setsockopt(zmq.SUBSCRIBE, topic)

    def add_recv_callback(self, callback):
        self.recv_callbacks.append(callback)        

    def add_topic_callback(self, topic, callback):
        if not self.topic_callbacks.has_key(topic):
            self.topic_callbacks[topic] = []
            self._subscribe_topic(topic)
        self.topic_callbacks[topic].append(callback)

    def call(self, method, *args):
        """Async method calling wrapper, does not return anything you will need to catch any responses the server might send some other way"""
        self.stream.send_multipart([method, ] + list(args))
Esempio n. 45
0
class MDPWorker(object):
    """Class for the MDP worker side.

    Thin encapsulation of a zmq.DEALER socket.
    Provides a send method with optional timeout parameter.

    Will use a timeout to indicate a broker failure.
    """

    _proto_version = b'MDPW01'

    # TODO: integrate that into API
    HB_INTERVAL = 1000  # in milliseconds
    HB_LIVENESS = 3  # HBs to miss before connection counts as dead

    def __init__(self, context, endpoint, service):
        """Initialize the MDPWorker.

        context is the zmq context to create the socket from.
        service is a byte-string with the service name.
        """
        self.context = context
        self.endpoint = endpoint
        self.service = service
        self.stream = None
        self._tmo = None
        self.need_handshake = True
        self.ticker = None
        self._delayed_cb = None
        self._create_stream()
        return

    def _create_stream(self):
        """Helper to create the socket and the stream.
        """
        socket = self.context.socket(zmq.DEALER)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.connect(self.endpoint)
        self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL)
        self._send_ready()
        self.ticker.start()
        return

    def _send_ready(self):
        """Helper method to prepare and send the workers READY message.
        """
        ready_msg = [b'', self._proto_version, b'\x01', self.service]
        self.stream.send_multipart(ready_msg)
        self.curr_liveness = self.HB_LIVENESS
        return

    def _tick(self):
        """Method called every HB_INTERVAL milliseconds.
        """
        self.curr_liveness -= 1
        ##         print '%.3f tick - %d' % (time.time(), self.curr_liveness)
        self.send_hb()
        if self.curr_liveness >= 0:
            return
        ## print '%.3f lost connection' % time.time()
        # ouch, connection seems to be dead
        self.shutdown()
        # try to recreate it
        self._delayed_cb = DelayedCallback(self._create_stream, 5000)
        self._delayed_cb.start()
        return

    def send_hb(self):
        """Construct and send HB message to broker.
        """
        msg = [b'', self._proto_version, b'\x04']
        self.stream.send_multipart(msg)
        return

    def shutdown(self):
        """Method to deactivate the worker connection completely.

        Will delete the stream and the underlying socket.
        """
        if self.ticker:
            self.ticker.stop()
            self.ticker = None
        if not self.stream:
            return
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        self.timed_out = False
        self.need_handshake = True
        self.connected = False
        return

    def reply(self, msg):
        """Send the given message.

        msg can either be a byte-string or a list of byte-strings.
        """
        ##         if self.need_handshake:
        ##             raise ConnectionNotReadyError()
        # prepare full message
        to_send = self.envelope
        self.envelope = None
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self.stream.send_multipart(to_send)
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        msg is a list w/ the message parts
        """
        # 1st part is empty
        msg.pop(0)
        # 2nd part is protocol version
        # TODO: version check
        proto = msg.pop(0)
        # 3rd part is message type
        msg_type = msg.pop(0)
        # XXX: hardcoded message types!
        # any message resets the liveness counter
        self.need_handshake = False
        self.curr_liveness = self.HB_LIVENESS
        if msg_type == b'\x05':  # disconnect
            self.curr_liveness = 0  # reconnect will be triggered by hb timer
        elif msg_type == b'\x02':  # request
            # remaining parts are the user message
            envelope, msg = split_address(msg)
            envelope.append(b'')
            envelope = [b'', self._proto_version, b'\x03'] + envelope  # REPLY
            self.envelope = envelope
            self.on_request(msg)
        else:
            # invalid message
            # ignored
            pass
        return

    def on_request(self, msg):
        """Public method called when a request arrived.

        Must be overloaded!
        """
        pass
Esempio n. 46
0
class MDPBroker(object):

    """The MDP broker class.

    The broker routes messages from clients to appropriate workers based on the
    requested service.

    This base class defines the overall functionality and the API. Subclasses are
    meant to implement additional features (like logging).

    The broker uses ZMQ ROUTER sockets to deal with clients and workers. These sockets
    are wrapped in pyzmq streams to fit well into IOLoop.

    .. note::

      The workers will *always* be served by the `main_ep` endpoint.

      In a two-endpoint setup clients will be handled via the `opt_ep`
      endpoint.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param main_ep:    the primary endpoint for workers.
    :type main_ep:     str
    :param client_ep:  the clients endpoint
    :type client_ep:   str
    :param hb_ep:      the heart beat endpoint for workers.
    :type hb_ep:       str
    :param service_q:  the class to be used for the service worker-queue.
    :type service_q:   class
    """

    CLIENT_PROTO = C_CLIENT  #: Client protocol identifier
    WORKER_PROTO = W_WORKER  #: Worker protocol identifier


    def __init__(self, context, main_ep, client_ep, hb_ep, service_q=None):
        """Init MDPBroker instance.
        """

        if service_q is None:
            self.service_q = ServiceQueue
        else:
            self.service_q = service_q

        #
        # Setup the zmq sockets.
        #
        socket = context.socket(zmq.ROUTER)
        socket.bind(main_ep)
        self.main_stream = ZMQStream(socket)
        self.main_stream.on_recv(self.on_message)

        socket = context.socket(zmq.ROUTER)
        socket.bind(client_ep)
        self.client_stream = ZMQStream(socket)
        self.client_stream.on_recv(self.on_message)

        socket = context.socket(zmq.ROUTER)
        socket.bind(hb_ep)
        self.hb_stream = ZMQStream(socket)
        self.hb_stream.on_recv(self.on_message)

        self._workers = {}

        #
        # services contain the service queue and the request queue
        #
        self._services = {}

        #
        # Mapping of worker commands and callbacks.
        #
        self._worker_cmds = {
            W_READY: self.on_ready,
            W_REPLY: self.on_reply,
            W_HEARTBEAT: self.on_heartbeat,
            W_DISCONNECT: self.on_disconnect,
        }

        #
        # 'Cleanup' timer for workers without heartbeat.
        #
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """

        if wid in self._workers:
            logging.info('Worker {} already registered'.format(service))
            return

        logging.info('Registering new worker {}'.format(service))

        self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service, self.main_stream)

        if service in self._services:
            wq, wr = self._services[service]
            wq.put(wid)
        else:
            q = self.service_q()
            q.put(wid)
            self._services[service] = (q, [])

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """

        try:
            wrep = self._workers[wid]
        except KeyError:
            #
            # Not registered, ignore
            #
            return

        logging.info('Unregistering worker {}'.format(wrep.service))

        wrep.shutdown()

        service = wrep.service
        if service in self._services:
            wq, wr = self._services[service]
            wq.remove(wid)

        del self._workers[wid]

    def disconnect(self, wid):
        """Send disconnect command and unregister worker.

        If the worker id is not registered, nothing happens.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """

        try:
            wrep = self._workers[wid]
        except KeyError:
            #
            # Not registered, ignore
            #
            return

        logging.info('Disconnecting worker {}'.format(wrep.service))

        to_send = [wid, self.WORKER_PROTO, W_DISCONNECT]
        self.main_stream.send_multipart(to_send)

        self.unregister_worker(wid)

    def client_response(self, rp, service, msg):
        """Package and send reply to client.

        :param rp:       return address stack
        :type rp:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str

        :rtype: None
        """

        if service == MMI_SERVICE:
            logging.debug('Send reply to client from worker {}'.format(service))
        else:
            logging.info('Send reply to client from worker {}'.format(service))

        to_send = rp[:]
        to_send.extend([EMPTY_FRAME, self.CLIENT_PROTO, service])
        to_send.extend(msg)
        self.client_stream.send_multipart(to_send)

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """

        logging.debug('Shutting down')

        self.main_stream.on_recv(None)
        self.main_stream.socket.setsockopt(zmq.LINGER, 0)
        self.main_stream.socket.close()
        self.main_stream.close()
        self.main_stream = None

        self.client_stream.on_recv(None)
        self.client_stream.socket.setsockopt(zmq.LINGER, 0)
        self.client_stream.socket.close()
        self.client_stream.close()
        self.client_stream = None

        self.hb_stream.on_recv(None)
        self.hb_stream.socket.setsockopt(zmq.LINGER, 0)
        self.hb_stream.socket.close()
        self.hb_stream.close()
        self.hb_stream = None

        self._workers = {}
        self._services = {}

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """

        #
        #  Remove 'dead' (not responding to heartbeats) workers.
        #
        for wrep in self._workers.values():
            if not wrep.is_alive():
                self.unregister_worker(wrep.id)

    def on_ready(self, rp, msg):
        """Process worker READY command.

        Registers the worker for a service.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        ret_id = rp[0]
        logging.debug('Worker sent ready msg: {} ,{}'.format(rp, msg))
        self.register_worker(ret_id, msg[0])

    def on_reply(self, rp, msg):
        """Process worker REPLY command.

        Route the `msg` to the client given by the address(es) in front of `msg`.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        ret_id = rp[0]
        wrep = self._workers.get(ret_id)

        if not wrep:
            #
            # worker not found, ignore message
            #
            logging.error(
                "Worker with return id {} not found. Ignore message.".format(
                    ret_id))
            return

        service = wrep.service
        logging.info("Worker {} sent reply.".format(service))

        try:
            wq, wr = self._services[service]

            #
            # Send response to client
            #
            cp, msg = split_address(msg)
            self.client_response(cp, service, msg)

            #
            # make worker available again
            #
            wq.put(wrep.id)

            if wr:
                logging.info("Sending queued message to worker {}".format(service))
                proto, rp, msg = wr.pop(0)
                self.on_client(proto, rp, msg)
        except KeyError:
            #
            # unknown service
            #
            self.disconnect(ret_id)

    def on_heartbeat(self, rp, msg):
        """Process worker HEARTBEAT command.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        #
        # Note:
        # The modified heartbeat of the worker is sent over a separate socket
        # stream (self.hb_stream). Therefore the ret_id is wrong. Instead the
        # worker sends its id in the message.
        #
        if len(msg) > 0:
            ret_id = msg[0]
        else:
            ret_id = rp[0]

        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            #
            # Ignore HB for unknown worker
            #
            pass

    def on_disconnect(self, rp, msg):
        """Process worker DISCONNECT command.

        Unregisters the worker who sent this message.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        wid = rp[0]
        self.unregister_worker(wid)

    def on_mmi(self, rp, service, msg):
        """Process MMI request.

        mmi.service is used for querying if a specific service is available.
        mmi.services is used for querying the list of services available.

        :param rp:      return address stack
        :type rp:       list of str
        :param service: the protocol id sent
        :type service:  str
        :param msg:     message parts
        :type msg:      list of str

        :rtype: None
        """

        if service == MMI_SERVICE:
            s = msg[0]
            ret = [UNKNOWN_SERVICE]

            for wr in self._workers.values():
                if s == wr.service:
                    ret = [KNOWN_SERVICE]
                    break

        elif service == MMI_SERVICES:
            #
            # Return list of services
            #
            ret = [wr.service for wr in self._workers.values()]

        elif service == MMI_TUNNELS:
            #
            # Read the tunnel files, and send back the network info.
            #
            tunnel_paths = glob.glob(os.path.expanduser("~/tunnel_port_*.txt"))
            tunnels_data = {}
            for path in tunnel_paths:
                filename = os.path.split(path)[-1]
                service_name = filename[-7:-4]
                with open(path, 'r') as f:
                    tunnels_data[service_name] = json.load(f)
            ret = [cPickle.dumps(tunnels_data)]
        else:
            #
            # Unknown command.
            #
            ret = [UNKNOWN_COMMAND]

        self.client_response(rp, service, ret)

    def on_client(self, proto, rp, msg):
        """Method called on client message.

        Frame 0 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        .. note::

           If currently no worker is available for a known service,
           the message is queued for later delivery.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker. The worker in
        question is removed from the pool of available workers.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:    return address stack
        :type rp:     list of str
        :param msg:   message parts
        :type msg:    list of str

        :rtype: None
        """

        service = msg.pop(0)

        if service.startswith(b'mmi.'):
            logging.debug("Got MMI message from client.")
            self.on_mmi(rp, service, msg)
            return

        logging.info("Client sends message (possibly queued) to worker {}".format(service))

        try:
            wq, wr = self._services[service]
            wid = wq.get()

            if not wid:
                #
                # No worker ready. Queue message
                #
                logging.info("Worker {} missing. Queuing message.".format(service))
                msg.insert(0, service)
                wr.append((proto, rp, msg))
                return

            wrep = self._workers[wid]
            to_send = [wrep.id, EMPTY_FRAME, self.WORKER_PROTO, W_REQUEST]
            to_send.extend(rp)
            to_send.append(EMPTY_FRAME)
            to_send.extend(msg)
            self.main_stream.send_multipart(to_send)

        except KeyError:
            #
            # Unknwon service. Ignore request
            #
            logging.info('broker has no service "{}"'.format(service))

    def on_worker(self, proto, rp, msg):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        cmd = msg.pop(0)
        if cmd in self._worker_cmds:
            fnc = self._worker_cmds[cmd]
            fnc(rp, msg)
        else:
            #
            # Ignore unknown command. Disconnect worker.
            #
            logging.error("Unknown worker command: {}".format(cmd))
            self.disconnect(rp[0])

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        rp, msg = split_address(msg)

        try:
            #
            # Dispatch on first frame after path
            #
            t = msg.pop(0)
            if t.startswith(b'MDPW'):
                logging.debug('Recieved message from worker {}'.format(rp))
                self.on_worker(t, rp, msg)
            elif t.startswith(b'MDPC'):
                logging.debug('Recieved message from client {}'.format(rp))
                self.on_client(t, rp, msg)
            else:
                logging.error('Broker unknown Protocol: "{}"'.format(t))
        except:
            logging.error(
                "An error occured while trying to process message: rp: {}, msg: {}\n{}".format(
                    rp, msg, traceback.format_exc()
                )
            )
Esempio n. 47
0
class Server(object):

    ctx = None
    loop = None
    stats = None
    spiders_in = None
    spiders_out = None
    sw_in = None
    sw_out = None
    db_in = None
    db_out = None

    def __init__(self, hostname, base_port):
        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()
        self.stats = {
            'started': time(),
            'spiders_out_recvd': 0,
            'spiders_in_recvd': 0,
            'db_in_recvd': 0,
            'db_out_recvd': 0,
            'sw_in_recvd': 0,
            'sw_out_recvd': 0
        }

        socket_config = SocketConfig(hostname, base_port)

        spiders_in_s = self.ctx.socket(zmq.XPUB)
        spiders_out_s = self.ctx.socket(zmq.XSUB)
        sw_in_s = self.ctx.socket(zmq.XPUB)
        sw_out_s = self.ctx.socket(zmq.XSUB)
        db_in_s = self.ctx.socket(zmq.XPUB)
        db_out_s = self.ctx.socket(zmq.XSUB)

        spiders_in_s.bind(socket_config.spiders_in())
        spiders_out_s.bind(socket_config.spiders_out())
        sw_in_s.bind(socket_config.sw_in())
        sw_out_s.bind(socket_config.sw_out())
        db_in_s.bind(socket_config.db_in())
        db_out_s.bind(socket_config.db_out())

        self.spiders_in = ZMQStream(spiders_in_s)
        self.spiders_out = ZMQStream(spiders_out_s)
        self.sw_in = ZMQStream(sw_in_s)
        self.sw_out = ZMQStream(sw_out_s)
        self.db_in = ZMQStream(db_in_s)
        self.db_out = ZMQStream(db_out_s)

        self.spiders_out.on_recv(self.handle_spiders_out_recv)
        self.sw_out.on_recv(self.handle_sw_out_recv)
        self.db_out.on_recv(self.handle_db_out_recv)

        self.sw_in.on_recv(self.handle_sw_in_recv)
        self.db_in.on_recv(self.handle_db_in_recv)
        self.spiders_in.on_recv(self.handle_spiders_in_recv)
        logging.basicConfig(format="%(asctime)s %(message)s",
                            datefmt="%Y-%m-%d %H:%M:%S",
                            level=logging.INFO)
        self.logger = logging.getLogger(
            "distributed_frontera.messagebus.zeromq.broker.Server")

    def start(self):
        self.logger.info("Distributed Frontera ZeroMQ broker is started.")
        self.log_stats()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def log_stats(self):
        self.logger.info(self.stats)
        self.loop.add_timeout(timedelta(seconds=10), self.log_stats)

    def handle_spiders_out_recv(self, msg):
        self.sw_in.send_multipart(msg)
        self.db_in.send_multipart(msg)
        self.stats['spiders_out_recvd'] += 1

    def handle_sw_out_recv(self, msg):
        self.db_in.send_multipart(msg)
        self.stats['sw_out_recvd'] += 1

    def handle_db_out_recv(self, msg):
        self.spiders_in.send_multipart(msg)
        self.stats['db_out_recvd'] += 1

    def handle_db_in_recv(self, msg):
        self.stats['db_in_recvd'] += 1
        if msg[0][0] in ['\x01', '\x00']:
            action, identity, partition_id = self.decode_subscription(msg[0])
            if identity == 'sl':
                self.spiders_out.send_multipart(msg)
                return
            if identity == 'us':
                self.sw_out.send_multipart(msg)
                return
            raise AttributeError('Unknown identity in channel subscription.')

    def handle_sw_in_recv(self, msg):
        if msg[0][0] in ['\x01', '\x00']:
            self.spiders_out.send_multipart(msg)
        self.stats['sw_in_recvd'] += 1

    def handle_spiders_in_recv(self, msg):
        if msg[0][0] in ['\x01', '\x00']:
            self.db_out.send_multipart(msg)
        self.stats['spiders_in_recvd'] += 1

    def decode_subscription(self, msg):
        """

        :param msg:
        :return: tuple of action, identity, partition_id
        where
        action is 1 - subscription, 0 - unsubscription,
        identity - 2 characters,
        partition_id - 8 bit unsigned integer (None if absent)
        """
        if len(msg) == 4:
            return unpack(">B2sB", msg)
        elif len(msg) == 3:
            action, identity = unpack(">B2s", msg)
            return action, identity, None
        raise ValueError("Can't decode subscription correctly.")
Esempio n. 48
0
class MDPBroker(object):
    """The MDP broker class.

    The broker routes messages from clients to appropriate workers based on the
    requested service.

    This base class defines the overall functionality and the API. Subclasses are
    ment to implement additional features (like logging).

    The broker uses ØMQ ROUTER sockets to deal witch clients and workers. These sockets
    are wrapped in pyzmq streams to fit well into IOLoop.

    .. note::

      The workers will *always* be served by the `main_ep` endpoint.

      In a two-endpoint setup clients will be handled via the `opt_ep`
      endpoint.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param main_ep:    the primary endpoint for workers and clients.
    :type main_ep:     str
    :param opt_ep:     is an optional 2nd endpoint.
    :type opt_ep:      str
    :param worker_q:   the class to be used for the worker-queue.
    :type worker_q:    class
    """

    CLIENT_PROTO = b'MDPC01'  #: Client protocol identifier
    WORKER_PROTO = b'MDPW01'  #: Worker protocol identifier

    def __init__(self, context, main_ep, opt_ep=None):
        """Init MDPBroker instance.
        """
        l = logger.Logger('mq_broker')
        self.log = l.get_logger()
        self.log.info("MDP broker startup...")

        socket = ZmqSocket(context, zmq.ROUTER)
        socket.bind(main_ep)
        self.main_stream = ZMQStream(socket)
        self.main_stream.on_recv(self.on_message)
        if opt_ep:
            socket = ZmqSocket(context, zmq.ROUTER)
            socket.bind(opt_ep)
            self.client_stream = ZMQStream(socket)
            self.client_stream.on_recv(self.on_message)
        else:
            self.client_stream = self.main_stream
        self.log.debug("Socket created...")
        self._workers = {}
        # services contain the worker queue and the request queue
        self._services = {}
        self._worker_cmds = {
            b'\x01': self.on_ready,
            b'\x03': self.on_reply,
            b'\x04': self.on_heartbeat,
            b'\x05': self.on_disconnect,
        }
        self.log.debug("Launch the timer...")
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()
        self.log.info("MDP broker started")
        return

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """
        self.log.debug(
            "Try to register a worker : wid={0}, service={1}".format(
                wid, service))
        try:
            if wid in self._workers:
                self.log.debug("Worker %s already registered" % service)
                return
            self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service,
                                           self.main_stream)
            if service in self._services:
                wq, wr = self._services[service]
                wq.put(wid)
            else:
                q = ServiceQueue()
                q.put(wid)
                self._services[service] = (q, [])
            self.log.info("Registered worker : wid={0}, service={1}".format(
                wid, service))
        except:
            self.log.error(
                "Error while registering a worker : wid={0}, service={1}, trace={2}"
                .format(wid, service, traceback.format_exc()))
        return

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        self.log.debug("Try to unregister a worker : wid={0}".format(wid))
        try:
            try:
                wrep = self._workers[wid]
            except KeyError:
                # not registered, ignore
                self.log.warning(
                    "The worker wid={0} is not registered, ignoring the unregister request"
                    .format(wid))
                return
            wrep.shutdown()
            service = wrep.service
            if service in self._services:
                wq, wr = self._services[service]
                wq.remove(wid)
            del self._workers[wid]
            self.log.info("Unregistered worker : wid={0}".format(wid))
        except:
            self.log.error(
                "Error while unregistering a worker : wid={0}, trace={1}".
                format(wid, traceback.format_exc()))
        return

    def disconnect(self, wid):
        """Send disconnect command and unregister worker.

        If the worker id is not registered, nothing happens.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        self.log.debug("Try to disconnect a worker : wid={0}".format(wid))
        try:
            try:
                wrep = self._workers[wid]
            except KeyError:
                # not registered, ignore
                self.log.warning(
                    "The worker wid={0} service={1} is not registered, ignoring the disconnect request"
                    .format(wid, wrep.service))
                return
            to_send = [wid, self.WORKER_PROTO, b'\x05']
            self.main_stream.send_multipart(to_send)
            self.log.info(
                "Request to unregister a worker : wid={0} service={1}".format(
                    wid, wrep.service))
        except:
            self.log.error(
                "Error while disconnecting a worker : wid={0}, trace={1}".
                format(wid, traceback.format_exc()))
        self.unregister_worker(wid)
        return

    def client_response(self, rp, service, msg):
        """Package and send reply to client.

        :param rp:       return address stack
        :type rp:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str

        :rtype: None
        """
        to_send = rp[:]
        to_send.extend([b'', self.CLIENT_PROTO, service])
        to_send.extend(msg)
        self.client_stream.send_multipart(to_send)
        return

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        self.log.info("Shutdown starting...")
        try:
            self.log.debug("Closing the socket...")
            if self.client_stream == self.main_stream:
                self.client_stream = None
            self.main_stream.on_recv(None)
            self.main_stream.socket.setsockopt(zmq.LINGER, 0)
            self.main_stream.socket.close()
            self.main_stream.close()
            self.main_stream = None
            if self.client_stream:
                self.client_stream.on_recv(None)
                self.client_stream.socket.setsockopt(zmq.LINGER, 0)
                self.client_stream.socket.close()
                self.client_stream.close()
                self.client_stream = None
            self.log.debug("Clean workers and services...")
            self._workers = {}
            self._services = {}
        except:
            self.log.error("Error during shutdown : trace={0}".format(
                traceback.format_exc()))
        return

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """
        self.log.debug("Check for dead workers...")
        for wrep in list(self._workers.values()):
            if not wrep.is_alive():
                self.log.info(
                    "A worker seems to be dead : wid={0} service={1}".format(
                        wrep.id, wrep.service))
                self.unregister_worker(wrep.id)
        return

    def on_ready(self, rp, msg):
        """Process worker READY command.

        Registers the worker for a service.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        self.register_worker(ret_id, msg[0])
        return

    def on_reply(self, rp, msg):
        """Process worker REPLY command.

        Route the `msg` to the client given by the address(es) in front of `msg`.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        # make worker available again
        try:
            wrep = self._workers[ret_id]
            service = wrep.service
            wq, wr = self._services[service]
            cp, msg = split_address(msg)
            self.client_response(cp, service, msg)
            wq.put(wrep.id)
            if wr:
                proto, rp, msg = wr.pop(0)
                self.on_client(proto, rp, msg)
        except KeyError:
            # unknown service
            self.disconnect(ret_id)
        return

    def on_heartbeat(self, rp, msg):
        """Process worker HEARTBEAT command.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            # ignore HB for unknown worker
            pass
        return

    def on_disconnect(self, rp, msg):
        """Process worker DISCONNECT command.

        Unregisters the worker who sent this message.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        wid = rp[0]
        self.log.info("A worker disconnects itself : wid={0}".format(wid))
        self.unregister_worker(wid)
        return

    def on_mmi(self, rp, service, msg):
        """Process MMI request.

        For now only mmi.service is handled.

        :param rp:      return address stack
        :type rp:       list of str
        :param service: the protocol id sent
        :type service:  str
        :param msg:     message parts
        :type msg:      list of str

        :rtype: None
        """
        if service == b'mmi.service':
            s = msg[0]
            ret = b'404'
            for wr in list(self._workers.values()):
                if s == wr.service:
                    ret = b'200'
                    break
            self.client_response(rp, service, [ret])
        elif service == b'mmi.services':
            ret = []
            for wr in list(self._workers.values()):
                ret.append(wr.service)
            self.client_response(rp, service, [b', '.join(ret)])
        else:
            self.client_response(rp, service, [b'501'])
        return

    def on_client(self, proto, rp, msg):
        """Method called on client message.

        Frame 0 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        .. note::

           If currently no worker is available for a known service,
           the message is queued for later delivery.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker. The worker in
        question is removed from the pool of available workers.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:    return address stack
        :type rp:     list of str
        :param msg:   message parts
        :type msg:    list of str

        :rtype: None
        """
        service = msg.pop(0)
        if service.startswith(b'mmi.'):
            self.on_mmi(rp, service, msg)
            return
        try:
            wq, wr = self._services[service]
            wid = wq.get()
            if not wid:
                # no worker ready
                # queue message
                msg.insert(0, service)
                wr.append((proto, rp, msg))
                return
            wrep = self._workers[wid]
            to_send = [wrep.id, b'', self.WORKER_PROTO, b'\x02']
            to_send.extend(rp)
            to_send.append(b'')
            to_send.extend(msg)
            self.main_stream.send_multipart(to_send)
        except KeyError:
            # unknwon service
            # ignore request
            msg = "broker has no service {0}".format(service)
            print(msg)
            self.log.warning(msg)
        return

    def on_worker(self, proto, rp, msg):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        cmd = msg.pop(0)
        if cmd in self._worker_cmds:
            fnc = self._worker_cmds[cmd]
            fnc(rp, msg)
        else:
            # ignore unknown command
            # DISCONNECT worker
            self.log.warning(
                "Unknown command from worker (it will be disconnect) : wid={0}, cmd={1}"
                .format(rp[0], cmd))
            self.disconnect(rp[0])
        return

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        self.log.debug("Message received: {0}".format(msg))
        rp, msg = split_address(msg)
        # dispatch on first frame after path
        t = msg.pop(0)
        if t.startswith(b'MDPW'):
            self.on_worker(t, rp, msg)
        elif t.startswith(b'MDPC'):
            self.on_client(t, rp, msg)
        else:
            self.log.warning("Broker unknown Protocol: {0}".format(t))
        return
Esempio n. 49
0
class GlinAppZmqCollector:
    """Collects ZeroMQ messages from clients"""
    def __init__(self, app, ctx, port=6607):
        self.app = app
        self.ctx = ctx

        self.collector = self.ctx.socket(zmq.REP)
        self.collector.bind("tcp://*:" + str(port))
        self.collector = ZMQStream(self.collector)
        self.collector.on_recv(self.handle_collect)

    def handle_collect(self, msg):
        """handle an incoming message"""
        (success, sequence_number, comment) = self._handle_collect(msg)
        self.collector.send_multipart(msgs.MessageWriter().bool(success).uint64(sequence_number).string(comment).get())

    def _handle_collect(self, msg):
        try:
            if len(msg) < 1:
                err_msg = "Got empty message. Ignoring."
                logging.info(err_msg)
                return(False, 0, err_msg)
            if msg[0] == b"brightness":
                return self._handle_collect_brightness(msg)

            # "mainswitch.state" <bool>
            elif msg[0] == b"mainswitch.state":
                return self._handle_collect_mainswitch_state(msg)

            # "mainswitch.toogle"
            elif msg[0] == b"mainswitch.toggle":
                return self._handle_collect_mainswitch_toggle(msg)

            # "scene.add" <animation_id> <name> <config>
            elif msg[0] == b"scene.add":
                return self._handle_collect_scene_add(msg)

            # "scene.config" <scene_id> <config>
            elif msg[0] == b"scene.config":
                return self._handle_collect_scene_reconfig(msg)

            elif msg[0] == b"scene.color":
                return self._handle_collect_scene_recolor(msg)

            elif msg[0] == b"scene.velocity":
                return self._handle_collect_scene_velocity(msg)

            # "scene.name" <scene_id> <name>
            elif msg[0] == b"scene.name":
                return self._handle_collect_scene_rename(msg)

            # "scene.rm" <scene_id>
            elif msg[0] == b"scene.rm":
                return self._handle_collect_scene_rm(msg)

            # "scene.setactive" <scene_id>
            elif msg[0] == b"scene.setactive":
                return self._handle_collect_scene_setactive(msg)

            else:
                logging.info("Invalid Command: {cmd}".format(cmd=(msg[0].decode('utf-8', 'replace'))))
                return (False, 0, "Invalid Command")

        except Exception as inst:
            logging.error(inst)
            raise

    def _handle_collect_brightness(self, msg):
        try:
            (brightness,) = msgs.MessageParser.brightness(msg)
            return self.app.set_brightness(brightness)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)

    def _handle_collect_mainswitch_state(self, msg):
        # "mainswitch.state" <bool>
        try:
            (state,) = msgs.MessageParser.mainswitch_state(msg)
            return self.app.set_mainswitch_state(state)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)

    def _handle_collect_mainswitch_toggle(self, msg):
        # "mainswitch.toggle"
        if len(msg) != 1:
            err_msg = "Invalid mainswitch.toggle message. Expected 1 frame"
            logging.info(err_msg)
            return (False, 0, err_msg)
        return self.app.toggle_mainswitch_state()

    def _handle_collect_scene_add(self, msg):
        # "scene.add" <animation_id> <name> <color> <velocity> <config>
        try:
            (animation_id, name, color, velocity, config) = msgs.MessageParser.scene_add(msg)
            return self.app.add_scene(animation_id, name, color, velocity, config)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)

    def _handle_collect_scene_recolor(self, msg):
        # "scene.color" <scene_id> <color>
        try:
            (scene_id, color) = msgs.MessageParser.scene_color(msg)
            return self.app.set_scene_color(scene_id, color)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)

    def _handle_collect_scene_velocity(self, msg):
        # "scene.velocity" <scene_id> <velocity>
        try:
            (scene_id, velocity) = msgs.MessageParser.scene_velocity(msg)
            return self.app.set_scene_velocity(scene_id, velocity)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)

    def _handle_collect_scene_reconfig(self, msg):
        # "scene.config" <scene_id> <config>
        try:
            (scene_id, config) = msgs.MessageParser.scene_config(msg)
            return self.app.set_scene_config(scene_id, config)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)

    def _handle_collect_scene_rename(self, msg):
        # "scene.name" <scene_id> <name>
        try:
            (scene_id, name) = msgs.MessageParser.scene_name(msg)
            return self.app.set_scene_name(scene_id, name)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)

    def _handle_collect_scene_rm(self, msg):
        # "scene.rm" <scene_id>
        try:
            (scene_id,) = msgs.MessageParser.scene_remove(msg)
            return self.app.remove_scene(scene_id)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)

    def _handle_collect_scene_setactive(self, msg):
        # "scene.setactive" <scene_id>
        try:
            (scene_id,) = msgs.MessageParser.scene_active(msg)
            return self.app.set_scene_active(scene_id)
        except msgs.MessageParserError as err:
            err_msg = str(err)
            logging.info(err_msg)
            return (False, 0, err_msg)
Esempio n. 50
0
class PubSub(BasePubSub):
    """
    This class manages application PUB/SUB logic.
    """

    def __init__(self, application):
        super(PubSub, self).__init__(application)
        self.sub_stream = None

    def initialize(self):

        self.zmq_context = zmq.Context()
        options = self.application.settings["options"]

        self.zmq_pub_sub_proxy = options.zmq_pub_sub_proxy

        # create PUB socket to publish instance events into it
        publish_socket = self.zmq_context.socket(zmq.PUB)

        # do not try to send messages after closing
        publish_socket.setsockopt(zmq.LINGER, 0)

        if self.zmq_pub_sub_proxy:
            # application started with XPUB/XSUB proxy
            self.zmq_xsub = options.zmq_xsub
            publish_socket.connect(self.zmq_xsub)
        else:

            # application started without XPUB/XSUB proxy
            if options.zmq_pub_port_shift:
                # calculate zmq pub port number
                zmq_pub_port = options.port - options.zmq_pub_port_shift
            else:
                zmq_pub_port = options.zmq_pub_port

            self.zmq_pub_port = zmq_pub_port

            publish_socket.bind("tcp://%s:%s" % (options.zmq_pub_listen, str(self.zmq_pub_port)))

        # wrap pub socket into ZeroMQ stream
        self.pub_stream = ZMQStream(publish_socket)

        # create SUB socket listening to all events from all app instances
        subscribe_socket = self.zmq_context.socket(zmq.SUB)

        if self.zmq_pub_sub_proxy:
            # application started with XPUB/XSUB proxy
            self.zmq_xpub = options.zmq_xpub
            subscribe_socket.connect(self.zmq_xpub)
        else:
            # application started without XPUB/XSUB proxy
            self.zmq_sub_address = options.zmq_sub_address
            for address in self.zmq_sub_address:
                subscribe_socket.connect(address)

        subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, six.u(CONTROL_CHANNEL))

        subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, six.u(ADMIN_CHANNEL))

        def listen_socket():
            # wrap sub socket into ZeroMQ stream and set its on_recv callback
            self.sub_stream = ZMQStream(subscribe_socket)
            self.sub_stream.on_recv(self.dispatch_published_message)

        tornado.ioloop.IOLoop.instance().add_callback(listen_socket)

        if self.zmq_pub_sub_proxy:
            logger.info("ZeroMQ XPUB: {0}, XSUB: {1}".format(self.zmq_xpub, self.zmq_xsub))
        else:
            logger.info("ZeroMQ PUB - {0}; subscribed to {1}".format(self.zmq_pub_port, self.zmq_sub_address))

    def publish(self, channel, message, method=None):
        """
        Publish message into channel of stream.
        """
        method = method or self.DEFAULT_PUBLISH_METHOD
        message["message_type"] = method
        message = json_encode(message)
        to_publish = [utf8(channel), utf8(message)]
        self.pub_stream.send_multipart(to_publish)

    @coroutine
    def dispatch_published_message(self, multipart_message):
        """
        Got message, decide what is it and dispatch into right
        application handler.
        """
        channel = multipart_message[0]
        message_data = multipart_message[1]
        if six.PY3:
            channel = channel.decode()
            message_data = message_data.decode()
        if channel == CONTROL_CHANNEL:
            yield self.handle_control_message(message_data)
        elif channel == ADMIN_CHANNEL:
            yield self.handle_admin_message(message_data)
        else:
            yield self.handle_channel_message(channel, message_data)

    def subscribe_key(self, subscription_key):
        self.sub_stream.setsockopt_string(zmq.SUBSCRIBE, six.u(subscription_key))

    def unsubscribe_key(self, subscription_key):
        self.sub_stream.setsockopt_string(zmq.UNSUBSCRIBE, six.u(subscription_key))
Esempio n. 51
0
class ManagementIntegrationTest(unittest.TestCase):


    def setUp(self):
        self._io_loop = IOLoop.instance()
        self._ctx = zmq.Context(1)

        sock = self._ctx.socket(zmq.PUB)
        sock.bind('inproc://master/worker/coordination')
        self._master_pub_sock = sock
        self._master_pub = ZMQStream(sock, self._io_loop)

        self._worker_sub = self._ctx.socket(zmq.SUB)
        self._worker_sub.setsockopt(zmq.SUBSCRIBE, "")
        self._worker_sub.connect('inproc://master/worker/coordination')

        self._worker_pub = self._ctx.socket(zmq.PUB)
        self._worker_pub.bind( 'inproc://worker/master/coordination' )

        sock = self._ctx.socket(zmq.SUB)
        sock.setsockopt(zmq.SUBSCRIBE, "")
        sock.connect( 'inproc://worker/master/coordination' )
        self._master_sub_sock = sock
        self._master_sub = ZMQStream(sock, self._io_loop)

        self._topic = ZMQ_SPYDER_MGMT_WORKER + 'testtopic'

    def tearDown(self):
        self._master_pub.close()
        self._master_pub_sock.close()
        self._worker_sub.close()
        self._worker_pub.close()
        self._master_sub.close()
        self._master_sub_sock.close()
        self._ctx.term()

    def call_me(self, msg):
        self.assertEqual(self._topic, msg.topic)
        self.assertEqual('test'.encode(), msg.data)
        death = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER,
                data=ZMQ_SPYDER_MGMT_WORKER_QUIT)
        self._master_pub.send_multipart(death.serialize())

    def on_end(self, msg):
        self.assertEqual(ZMQ_SPYDER_MGMT_WORKER_QUIT, msg.data)
        self._io_loop.stop()


    def test_simple_mgmt_session(self):
        
        mgmt = ZmqMgmt(self._worker_sub, self._worker_pub, io_loop=self._io_loop)
        mgmt.start()

        self.assertRaises(ValueError, mgmt.add_callback, "test", "test")

        mgmt.add_callback(self._topic, self.call_me)
        mgmt.add_callback(ZMQ_SPYDER_MGMT_WORKER, self.on_end)

        test_msg = MgmtMessage(topic=self._topic, data='test'.encode())
        self._master_pub.send_multipart(test_msg.serialize())

        def assert_correct_mgmt_answer(raw_msg):
            msg = MgmtMessage(raw_msg)
            self.assertEqual(ZMQ_SPYDER_MGMT_WORKER_QUIT_ACK, msg.data)
            mgmt.remove_callback(self._topic, self.call_me)
            mgmt.remove_callback(ZMQ_SPYDER_MGMT_WORKER, self.on_end)
            self.assertEqual({}, mgmt._callbacks)

        self._master_sub.on_recv(assert_correct_mgmt_answer)

        self._io_loop.start()
Esempio n. 52
0
class Master(object):
    def __init__(self,
                 frontier,
                 data_in_sock='ipc:///tmp/robot-data-w2m.sock',
                 data_out_sock='ipc:///tmp/robot-data-m2w.sock',
                 msg_in_sock='ipc:///tmp/robot-msg-w2m.sock',
                 msg_out_sock='ipc:///tmp/robot-msg-m2w.sock',
                 io_loop=None):
        self.identity = 'master:%s:%s' % (socket.gethostname(), os.getpid())

        context = zmq.Context()

        self._io_loop = io_loop or IOLoop.instance()

        self._in_socket = context.socket(zmq.SUB)
        self._in_socket.setsockopt(zmq.SUBSCRIBE, '')
        self._in_socket.bind(data_in_sock)
        self._in_stream = ZMQStream(self._in_socket, io_loop)

        self._out_socket = context.socket(zmq.PUSH)
        self._out_socket.bind(data_out_sock)
        self._out_stream = ZMQStream(self._out_socket, io_loop)

        self._online_workers = set()
        self._running = False

        self._updater = PeriodicCallback(self._send_next, 100, io_loop=io_loop)
        self._reloader = PeriodicCallback(self.reload, 1000, io_loop=io_loop)

        self.frontier = frontier
        self.messenger = ServerMessenger(msg_in_sock, msg_out_sock, context,
                                         io_loop)

    def start(self):
        logging.info('[%s] starting', self.identity)
        self.messenger.add_callback(CTRL_MSG_WORKER, self._on_worker_msg)
        self.messenger.start()

        self._in_stream.on_recv(self._on_receive_processed)
        self._updater.start()
        self._reloader.start()
        self._running = True

    def stop(self):
        self._running = False
        self._reloader.stop()
        self._updater.stop()
        self.messenger.stop()
#        self.messenger.publish(CTRL_MSG_WORKER, self.identity,
#                CTRL_MSG_WORKER_QUIT)

    def close(self):
        self._in_stream.close()
        self._in_socket.close()
        self._out_stream.close()
        self._out_socket.close()
        self.messenger.close()

    def reload(self):
        pass

    def _on_worker_msg(self, msg):
        if msg.data == CTRL_MSG_WORKER_ONLINE:
            self._online_workers.add(msg.identity)
            logging.info('[%s] append [%s]', self.identity, msg.identity)
            self._send_next()


#        if msg.data == CTRL_MSG_WORKER_QUIT_ACK:
#            if msg.identity in self._online_workers:
#                self._online_workers.remove(msg.identity)

    def _send_next(self):
        if not self._running:
            return

        worker_num = len(self._online_workers)

        if self._running and worker_num > 0:
            while self._out_stream._send_queue.qsize() < worker_num * 4:
                request = self.frontier.get_next_request()
                if not request:
                    break

                msg = RequestMessage(self.identity, request)
                self._out_stream.send_multipart(msg.serialize())
                logging.debug('[%s] send request(%s)', self.identity,
                              request.url)

                self.frontier.reload_request(request)

    def _on_receive_processed(self, zmq_msg):
        msg = ResponseMessage.deserialize(zmq_msg)
        request = msg.response.request
        logging.debug('[%s] receive response(%s)', self.identity, request.url)
        self._send_next()
Esempio n. 53
0
class Forwarder(object):
    def __init__(self, local_host, local_port, remote_host, remote_port,
                 port_type, local_socket_type, remote_socket_type, printout,
                 bind_local, bind_remote):
        self.printout = printout
        self.context = zmq.Context()

        socketi, socketi_port = self._new_socket(port_type, local_socket_type,
                                                 local_host, local_port,
                                                 bind_local)
        socketo, socketo_port = self._new_socket(port_type, remote_socket_type,
                                                 remote_host, remote_port,
                                                 bind_remote)

        self.local_port = socketi_port
        self.remote_port = socketo_port

        self.port_type = port_type
        ioloop_instance = ioloop.IOLoop.instance(
        )  # don't let ZMQStream create its own
        self.streamo = ZMQStream(socketo, io_loop=ioloop_instance)
        self.streami = ZMQStream(socketi, io_loop=ioloop_instance)

        self.streami.on_recv(lambda msg: self.relay_from_server(msg))
        self.streamo.on_recv(lambda msg: self.relay_from_kernel(msg))

    def _new_socket(self, port_type, socket_type, host, port, bind):
        s = self.context.socket(socket_type)
        if socket_type == zmq.SUB:
            s.setsockopt(zmq.SUBSCRIBE, b'')
        if bind == True and port is not None:
            url = 'tcp://%s:%s' % (host, port)
            self.printout("Bind %s on %s" % (port_type, url))
            s.bind(url)
        elif bind == True and port is None:
            url = 'tcp://%s' % host
            self.printout("Bind %s on random port of %s" % (port_type, url))
            port = s.bind_to_random_port(url)
            self.printout("  > picked port %s" % port)
        elif bind == False:
            url = 'tcp://%s:%s' % (host, port)
            self.printout("Connect %s on %s" % (port_type, url))
            s.connect(url)
        else:
            raise Exception("Unexpected socket setup for %s" % port_type)
        return (s, port)

    def dump_msg(self, msg, direction):
        pass
        """
        self.printout(self.port_type + direction)
        msg_str = str(msg)
        if len(msg_str) <= 50:
            self.printout(msg_str)
        else:
            self.printout(msg_str[:50] + "...")
        """

    def relay_from_server(self, msg):
        self.dump_msg(msg, " > ")
        self.streamo.send_multipart(msg)

    def relay_from_kernel(self, msg):
        self.dump_msg(msg, " < ")
        self.streami.send_multipart(msg)
Esempio n. 54
0
class MDPBroker(object):

    """The MDP broker class.

    The broker routes messages from clients to appropriate workers based on the
    requested service.

    This base class defines the overall functionality and the API. Subclasses are
    ment to implement additional features (like logging).

    The broker uses ØMQ XREQ sockets to deal witch clients and workers. These sockets
    are wrapped in pyzmq streams to fit well into IOLoop.

    .. note::

      The workers will *always* be served by the `main_ep` endpoint.

      In a two-endpoint setup clients will be handled via the `opt_ep`
      endpoint.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param main_ep:    the primary endpoint for workers and clients.
    :type main_ep:     str
    :param opt_ep:     is an optional 2nd endpoint.
    :type opt_ep:      str
    :param worker_q:   the class to be used for the worker-queue.
    :type worker_q:    class
    """

    CLIENT_PROTO = b'MDPC01'  #: Client protocol identifier
    WORKER_PROTO = b'MDPW01'  #: Worker protocol identifier


    def __init__(self, context, main_ep, opt_ep=None, worker_q=None):
        """Init MDPBroker instance.
        """
        socket = context.socket(zmq.XREP)
        socket.bind(main_ep)
        self.main_stream = ZMQStream(socket)
        self.main_stream.on_recv(self.on_message)
        if opt_ep:
            socket = context.socket(zmq.XREP)
            socket.bind(opt_ep)
            self.client_stream = ZMQStream(socket)
            self.client_stream.on_recv(self.on_message)
        else:
            self.client_stream = self.main_stream
        self._workers = {}
        # services contain the worker queue and the request queue
        self._services = {}
        self._worker_cmds = { '\x01': self.on_ready,
                              '\x03': self.on_reply,
                              '\x04': self.on_heartbeat,
                              '\x05': self.on_disconnect,
                              }
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()
        return

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """
        if wid in self._workers:
            return
        self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service, self.main_stream)
        if service in self._services:
            wq, wr = self._services[service]
            wq.put(wid)
        else:
            q = ServiceQueue()
            q.put(wid)
            self._services[service] = (q, [])
        return

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        try:
            wrep = self._workers[wid]
        except KeyError:
            # not registered, ignore
            return
        wrep.shutdown()
        service = wrep.service
        if service in self._services:
            wq, wr = self._services[service]
            wq.remove(wid)
        del self._workers[wid]
        return

    def disconnect(self, wid):
        """Send disconnect command and unregister worker.

        If the worker id is not registered, nothing happens.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        try:
            wrep = self._workers[wid]
        except KeyError:
            # not registered, ignore
            return
        to_send = [ wid, self.WORKER_PROTO, b'\x05' ]
        self.main_stream.send_multipart(to_send)
        self.unregister_worker(wid)
        return

    def client_response(self, rp, service, msg):
        """Package and send reply to client.

        :param rp:       return address stack
        :type rp:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str

        :rtype: None
        """
        to_send = rp[:]
        to_send.extend([b'', self.CLIENT_PROTO, service])
        to_send.extend(msg)
        self.client_stream.send_multipart(to_send)
        return

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        if self.client_stream == self.main_stream:
            self.client_stream = None
        self.main_stream.on_recv(None)
        self.main_stream.socket.setsockopt(zmq.LINGER, 0)
        self.main_stream.socket.close()
        self.main_stream.close()
        self.main_stream = None
        if self.client_stream:
            self.client_stream.on_recv(None)
            self.client_stream.socket.setsockopt(zmq.LINGER, 0)
            self.client_stream.socket.close()
            self.client_stream.close()
            self.client_stream = None
        self._workers = {}
        self._services = {}
        return

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """
        for wrep in self._workers.values():
            if not wrep.is_alive():
                self.unregister_worker(wrep.id)
        return

    def on_ready(self, rp, msg):
        """Process worker READY command.

        Registers the worker for a service.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        self.register_worker(ret_id, msg[0])
        return

    def on_reply(self, rp, msg):
        """Process worker REPLY command.

        Route the `msg` to the client given by the address(es) in front of `msg`.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        wrep = self._workers[ret_id]
        service = wrep.service
        # make worker available again
        try:
            wq, wr = self._services[service]
            cp, msg = split_address(msg)
            self.client_response(cp, service, msg)
            wq.put(wrep.id)
            if wr:
                proto, rp, msg = wr.pop(0)
                self.on_client(proto, rp, msg)
        except KeyError:
            # unknown service
            self.disconnect(ret_id)
        return

    def on_heartbeat(self, rp, msg):
        """Process worker HEARTBEAT command.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            # ignore HB for unknown worker
            pass
        return

    def on_disconnect(self, rp, msg):
        """Process worker DISCONNECT command.

        Unregisters the worker who sent this message.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        wid = rp[0]
        self.unregister_worker(wid)
        return

    def on_mmi(self, rp, service, msg):
        """Process MMI request.

        For now only mmi.service is handled.

        :param rp:      return address stack
        :type rp:       list of str
        :param service: the protocol id sent
        :type service:  str
        :param msg:     message parts
        :type msg:      list of str

        :rtype: None
        """
        if service == b'mmi.service':
            s = msg[0]
            ret = b'404'
            for wr in self._workers.values():
                if s == wr.service:
                    ret = b'200'
                    break
            self.client_response(rp, service, [ret])
        else:
            self.client_response(rp, service, [b'501'])
        return

    def on_client(self, proto, rp, msg):
        """Method called on client message.

        Frame 0 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        .. note::

           If currently no worker is available for a known service,
           the message is queued for later delivery.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker. The worker in
        question is removed from the pool of available workers.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:    return address stack
        :type rp:     list of str
        :param msg:   message parts
        :type msg:    list of str

        :rtype: None
        """
##         print 'client message:'
##         pprint(msg)
        service = msg.pop(0)
        if service.startswith(b'mmi.'):
            self.on_mmi(rp, service, msg)
            return
        try:
            wq, wr = self._services[service]
            wid = wq.get()
            if not wid:
                # no worker ready
                # queue message
                msg.insert(0, service)
                wr.append((proto, rp, msg))
                return
            wrep = self._workers[wid]
            to_send = [ wrep.id, b'', self.WORKER_PROTO, b'\x02']
            to_send.extend(rp)
            to_send.append(b'')
            to_send.extend(msg)
            self.main_stream.send_multipart(to_send)
        except KeyError:
            # unknwon service
            # ignore request
            print 'broker has no service "%s"' % service
        return

    def on_worker(self, proto, rp, msg):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        cmd = msg.pop(0)
        if cmd in self._worker_cmds:
            fnc = self._worker_cmds[cmd]
            fnc(rp, msg)
        else:
            # ignore unknown command
            # DISCONNECT worker
            self.disconnect(rp[0])
        return

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        rp, msg = split_address(msg)
        # dispatch on first frame after path
        t = msg.pop(0)
        if t.startswith(b'MDPW'):
            self.on_worker(t, rp, msg)
        elif t.startswith(b'MDPC'):
            self.on_client(t, rp, msg)
        else:
            print 'Broker unknown Protocol: "%s"' % t
        return
Esempio n. 55
0
class MqAsyncReq(object):

    """Class for the MDP client side.

    Thin asynchronous encapsulation of a zmq.REQ socket.
    Provides a :func:`request` method with optional timeout.

    Objects of this class are ment to be integrated into the
    asynchronous IOLoop of pyzmq.

    :param context:  the ZeroMQ context to create the socket in.
    :type context:   zmq.Context
    :param endpoint: the enpoint to connect to.
    :type endpoint:  str
    :param service:  the service the client should use
    :type service:   str
    """

    _proto_version = b'MDPC01'

    def __init__(self, context, service):
        """Initialize the MDPClient.
        """
        if ("domogik.common.configloader" in sys.modules):
            cfg = Loader('mq').load()
            confi = dict(cfg[1])
            self.endpoint = "tcp://{0}:{1}".format(config['ip'], config['req_rep_port'])
        else:
            ip = Parameter.objects.get(key='mq-ip')
            port = Parameter.objects.get(key='mq-req_rep_port')
            self.endpoint = "tcp://{0}:{1}".format(ip.value, port.value)
        socket = ZmqSocket(context, zmq.REQ)
        ioloop = IOLoop.instance()
        self.service = service
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.can_send = True
        self._proto_prefix = [ PROTO_VERSION, service]
        self._tmo = None
        self.timed_out = False
        socket.connect(self.endpoint)
        return

    def shutdown(self):
        """Method to deactivate the client connection completely.

        Will delete the stream and the underlying socket.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        if not self.stream:
            return
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        return

    def request(self, msg, timeout=None):
        """Send the given message.

        :param msg:     message parts to send.
        :type msg:      list of str
        :param timeout: time to wait in milliseconds.
        :type timeout:  int
        
        :rtype None:
        """
        if not self.can_send:
            raise InvalidStateError()
        if type(msg) in (bytes, str):
            msg = [msg]
        # prepare full message
        to_send = self._proto_prefix[:]
        to_send.extend(msg)
        self.stream.send_multipart(to_send)
        self.can_send = False
        if timeout:
            self._start_timeout(timeout)
        return

    def _on_timeout(self):
        """Helper called after timeout.
        """
        self.timed_out = True
        self._tmo = None
        self.on_timeout()
        return

    def _start_timeout(self, timeout):
        """Helper for starting the timeout.

        :param timeout:  the time to wait in milliseconds.
        :type timeout:   int
        """
        self._tmo = DelayedCallback(self._on_timeout, timeout)
        self._tmo.start()
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        :param msg:   list of message parts.
        :type msg:    list of str
        """
        if self._tmo:
            # disable timout
            self._tmo.stop()
            self._tmo = None
        # setting state before invoking on_message, so we can request from there
        self.can_send = True
        self.on_message(msg)
        return

    def on_message(self, msg):
        """Public method called when a message arrived.

        .. note:: Does nothing. Should be overloaded!
        """
        pass

    def on_timeout(self):
        """Public method called when a timeout occured.

        .. note:: Does nothing. Should be overloaded!
        """
        pass
Esempio n. 56
0
class IpcTornadoChannel(IpcChannel):
    """Inter-process communication channel class for use with Tornado IO Loop.

    """
    CONNECTED = IpcChannel.EVENT_ACCEPTED
    DISCONNECTED = IpcChannel.EVENT_DISCONNECTED

    def __init__(self, channel_type, endpoint=None, context=None, identity=None):
        """Initalise the IpcChannel object.

        :param channel_type: ZeroMQ socket type, using CHANNEL_TYPE_xxx constants
        :param endpoint: URI of channel endpoint, can be specified later
        :param context: ZeroMQ context, will be initialised if not given
        :param identity: channel identity for DEALER type sockets
        """
        super(IpcTornadoChannel, self).__init__(channel_type, endpoint, context, identity)
        self._callback = None
        self._monitor_callback = None
        self._stream = None

    def register_callback(self, callback):
        """Register a callback with this IpcChannel.  This will result in the
        construction of a ZMQStream and the callback will be registered with
        the stream object.

        :param: data to send on channel
        """
        self._callback = callback
        if not self._stream:
            self._stream = ZMQStream(self.socket)
        self._stream.on_recv(callback)

    def send(self, data):
        """Send data to the IpcChannel.

        :param: data to send on channel
        """

        # If the data are unicode (like all Python3 native strings), convert to a
        # byte stream to be sent on the socket
        if isinstance(data, unicode):
            data = cast_bytes(data)

        # If a Stream is registered send the data out on the tornado IO Loop
        if self._stream:
            self._stream.send(data)
        else:
            super(IpcTornadoChannel, self).send(data)

    def send_multipart(self, data):
        """
        Send data to the IpcChannel, in multiple parts.
        :param: data to send, as an iterable object
        """

        for idx, part in enumerate(data):
            if isinstance(part, unicode):
                data[idx] = cast_bytes(part)

        if self._stream:
            self._stream.send_multipart(data)
        else:
            super(IpcTornadoChannel, self).send_multipart(data)

    def register_monitor(self, callback):
        self._monitor_callback = callback
        self._monitor_socket = self.socket.get_monitor_socket(IpcChannel.EVENT_ACCEPTED | IpcChannel.EVENT_DISCONNECTED)
        # Create the socket
        self._monitor_stream = ZMQStream(self._monitor_socket)
        self._monitor_stream.on_recv(self._internal_monitor_callback)

    def _internal_monitor_callback(self, msg):
        if self._monitor_callback is not None:
            self._monitor_callback(parse_monitor_message(msg))
Esempio n. 57
0
class Server(object):

    ctx = None
    loop = None
    stats = None
    spiders_in = None
    spiders_out = None
    sw_in = None
    sw_out = None
    db_in = None
    db_out = None

    def __init__(self, address, base_port):
        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()
        self.stats = {
            'started': time(),
            'spiders_out_recvd': 0,
            'spiders_in_recvd': 0,
            'db_in_recvd': 0,
            'db_out_recvd': 0,
            'sw_in_recvd': 0,
            'sw_out_recvd': 0
        }

        socket_config = SocketConfig(address, base_port)

        if socket_config.is_ipv6:
            self.ctx.setsockopt(zmq.IPV6, True)

        spiders_in_s = self.ctx.socket(zmq.XPUB)
        spiders_out_s = self.ctx.socket(zmq.XSUB)
        sw_in_s = self.ctx.socket(zmq.XPUB)
        sw_out_s = self.ctx.socket(zmq.XSUB)
        db_in_s = self.ctx.socket(zmq.XPUB)
        db_out_s = self.ctx.socket(zmq.XSUB)

        spiders_in_s.bind(socket_config.spiders_in())
        spiders_out_s.bind(socket_config.spiders_out())
        sw_in_s.bind(socket_config.sw_in())
        sw_out_s.bind(socket_config.sw_out())
        db_in_s.bind(socket_config.db_in())
        db_out_s.bind(socket_config.db_out())

        self.spiders_in = ZMQStream(spiders_in_s)
        self.spiders_out = ZMQStream(spiders_out_s)
        self.sw_in = ZMQStream(sw_in_s)
        self.sw_out = ZMQStream(sw_out_s)
        self.db_in = ZMQStream(db_in_s)
        self.db_out = ZMQStream(db_out_s)

        self.spiders_out.on_recv(self.handle_spiders_out_recv)
        self.sw_out.on_recv(self.handle_sw_out_recv)
        self.db_out.on_recv(self.handle_db_out_recv)

        self.sw_in.on_recv(self.handle_sw_in_recv)
        self.db_in.on_recv(self.handle_db_in_recv)
        self.spiders_in.on_recv(self.handle_spiders_in_recv)
        logging.basicConfig(format="%(asctime)s %(message)s",
                            datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
        self.logger = logging.getLogger("distributed_frontera.messagebus"
                                        ".zeromq.broker.Server")
        self.logger.info("Using socket: {}:{}".format(socket_config.ip_addr,
                                                      socket_config.base_port))

    def start(self):
        self.logger.info("Distributed Frontera ZeroMQ broker is started.")
        self.log_stats()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def log_stats(self):
        self.logger.info(self.stats)
        self.loop.add_timeout(timedelta(seconds=10), self.log_stats)

    def handle_spiders_out_recv(self, msg):
        self.sw_in.send_multipart(msg)
        self.db_in.send_multipart(msg)
        self.stats['spiders_out_recvd'] += 1

    def handle_sw_out_recv(self, msg):
        self.db_in.send_multipart(msg)
        self.stats['sw_out_recvd'] += 1

    def handle_db_out_recv(self, msg):
        self.spiders_in.send_multipart(msg)
        self.stats['db_out_recvd'] += 1

    def handle_db_in_recv(self, msg):
        self.stats['db_in_recvd'] += 1
        if b'\x01' in msg[0] or b'\x00' in msg[0]:
            action, identity, partition_id = self.decode_subscription(msg[0])
            if identity == b'sl':
                self.spiders_out.send_multipart(msg)
                return
            if identity == b'us':
                self.sw_out.send_multipart(msg)
                return
            raise AttributeError('Unknown identity in channel subscription.')

    def handle_sw_in_recv(self, msg):
        if b'\x01' in msg[0] or b'\x00' in msg[0]:
            self.spiders_out.send_multipart(msg)
        self.stats['sw_in_recvd'] += 1

    def handle_spiders_in_recv(self, msg):
        if b'\x01' in msg[0] or b'\x00' in msg[0]:
            self.db_out.send_multipart(msg)
        self.stats['spiders_in_recvd'] += 1

    def decode_subscription(self, msg):
        """

        :param msg:
        :return: tuple of action, identity, partition_id
        where
        action is 1 - subscription, 0 - unsubscription,
        identity - 2 characters,
        partition_id - 8 bit unsigned integer (None if absent)
        """
        if len(msg) == 4:
            return unpack(">B2sB", msg)
        elif len(msg) == 3:
            action, identity = unpack(">B2s", msg)
            return action, identity, None
        raise ValueError("Can't decode subscription correctly.")
Esempio n. 58
0
class ZmqPubSub(object):
    """
    This class manages application PUB/SUB logic.
    """

    def __init__(self, application):
        self.application = application
        self.subscriptions = {}
        self.sub_stream = None

    def init_sockets(self):
        self.zmq_context = zmq.Context()
        options = self.application.settings["options"]

        self.zmq_pub_sub_proxy = options.zmq_pub_sub_proxy

        # create PUB socket to publish instance events into it
        publish_socket = self.zmq_context.socket(zmq.PUB)

        # do not try to send messages after closing
        publish_socket.setsockopt(zmq.LINGER, 0)

        if self.zmq_pub_sub_proxy:
            # application started with XPUB/XSUB proxy
            self.zmq_xsub = options.zmq_xsub
            publish_socket.connect(self.zmq_xsub)
        else:

            # application started without XPUB/XSUB proxy
            if options.zmq_pub_port_shift:
                # calculate zmq pub port number
                zmq_pub_port = options.port - options.zmq_pub_port_shift
            else:
                zmq_pub_port = options.zmq_pub_port

            self.zmq_pub_port = zmq_pub_port

            publish_socket.bind("tcp://%s:%s" % (options.zmq_pub_listen, str(self.zmq_pub_port)))

        # wrap pub socket into ZeroMQ stream
        self.pub_stream = ZMQStream(publish_socket)

        # create SUB socket listening to all events from all app instances
        subscribe_socket = self.zmq_context.socket(zmq.SUB)

        if self.zmq_pub_sub_proxy:
            # application started with XPUB/XSUB proxy
            self.zmq_xpub = options.zmq_xpub
            subscribe_socket.connect(self.zmq_xpub)
        else:
            # application started without XPUB/XSUB proxy
            self.zmq_sub_address = options.zmq_sub_address
            for address in self.zmq_sub_address:
                subscribe_socket.connect(address)

        subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, six.u(CONTROL_CHANNEL))

        subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, six.u(ADMIN_CHANNEL))

        def listen_socket():
            # wrap sub socket into ZeroMQ stream and set its on_recv callback
            self.sub_stream = ZMQStream(subscribe_socket)
            self.sub_stream.on_recv(self.dispatch_published_message)

        tornado.ioloop.IOLoop.instance().add_callback(listen_socket)

        if self.zmq_pub_sub_proxy:
            logger.info("ZeroMQ XPUB: {0}, XSUB: {1}".format(self.zmq_xpub, self.zmq_xsub))
        else:
            logger.info("ZeroMQ PUB - {0}; subscribed to {1}".format(self.zmq_pub_port, self.zmq_sub_address))

    def publish(self, channel, message, method=None):
        """
        Publish message into channel of stream.
        """
        method = method or DEFAULT_PUBLISH_METHOD
        to_publish = [utf8(channel), utf8(method), utf8(message)]
        self.pub_stream.send_multipart(to_publish)

    def get_subscription_key(self, project_id, namespace, channel):
        """
        Create subscription name to catch messages from specific
        project, namespace and channel.
        """
        return str(CHANNEL_NAME_SEPARATOR.join([project_id, namespace, channel, CHANNEL_SUFFIX]))

    @coroutine
    def dispatch_published_message(self, multipart_message):
        """
        Got message, decide what is it and dispatch into right
        application handler.
        """
        channel = multipart_message[0]
        method = multipart_message[1]
        message_data = multipart_message[2]
        if six.PY3:
            message_data = message_data.decode()
        if channel == CONTROL_CHANNEL:
            yield self.handle_control_message(message_data)
        elif channel == ADMIN_CHANNEL:
            yield self.handle_admin_message(message_data)
        else:
            yield self.handle_channel_message(channel, method, message_data)

    @coroutine
    def handle_admin_message(self, message):
        for uid, connection in six.iteritems(self.application.admin_connections):
            if uid in self.application.admin_connections:
                connection.send(message)

    @coroutine
    def handle_channel_message(self, channel, method, message):
        if channel not in self.subscriptions:
            raise Return((True, None))

        response = Response(method=method, body=message)
        prepared_response = response.as_message()

        for uid, client in six.iteritems(self.subscriptions[channel]):
            if channel in self.subscriptions and uid in self.subscriptions[channel]:
                client.send(prepared_response)

    @coroutine
    def handle_control_message(self, message):
        """
        Handle control message.
        """
        message = json_decode(message)

        app_id = message.get("app_id")
        method = message.get("method")
        params = message.get("params")

        if app_id and app_id == self.application.uid:
            # application id must be set when we don't want to do
            # make things twice for the same application. Setting
            # app_id means that we don't want to process control
            # message when it is appear in application instance if
            # application uid matches app_id
            raise Return((True, None))

        func = getattr(self.application, "handle_%s" % method, None)
        if not func:
            raise Return((None, "method not found"))

        result, error = yield func(params)
        raise Return((result, error))

    def add_subscription(self, project_id, namespace_name, channel, client):
        """
        Subscribe application on channel if necessary and register client
        to receive messages from that channel.
        """
        subscription_key = self.get_subscription_key(project_id, namespace_name, channel)
        self.sub_stream.setsockopt_string(zmq.SUBSCRIBE, six.u(subscription_key))

        if subscription_key not in self.subscriptions:
            self.subscriptions[subscription_key] = {}

        self.subscriptions[subscription_key][client.uid] = client

    def remove_subscription(self, project_id, namespace_name, channel, client):
        """
        Unsubscribe application from channel if necessary and unregister client
        from receiving messages from that channel.
        """
        subscription_key = self.get_subscription_key(project_id, namespace_name, channel)

        try:
            del self.subscriptions[subscription_key][client.uid]
        except KeyError:
            pass

        try:
            if not self.subscriptions[subscription_key]:
                self.sub_stream.setsockopt_string(zmq.UNSUBSCRIBE, six.u(subscription_key))
                del self.subscriptions[subscription_key]
        except KeyError:
            pass