class InternalRouter(object):

    def __init__(self, context):
        socket = context.socket(zmq.ROUTER)
        socket.setsockopt(zmq.LINGER, 0)
        identity = 'router'
        socket.identity = identity.encode('ascii')

        session = Session()
        router_internal = session.query(RoutingDefiniton.route) \
                                 .filter_by(name="internal_router") \
                                 .scalar()
        session.close()
        socket.bind(router_internal)

        ioloop = IOLoop.instance()
        self.internal_router = ZMQStream(socket, ioloop)
        self.internal_router.on_recv(callback=self.router_recv)
        return

    def shutdown(self):
        self.internal_router.on_recv(callback=None)
        self.internal_router = None
        return

    def router_recv(self, msg):

        return
Esempio n. 2
0
class SoundServer(object):

    def __init__(self):
        self._context = zmq.Context()
        self._socket = self._context.socket(zmq.REP)
        self._socket.bind('tcp://127.0.0.1:{0}'.format(CONTROL_PORT))
        self._stream = ZMQStream(self._socket)
        self._stream.on_recv(self._handle_msg)

    def start(self):
        IOLoop.instance().start()

    def _handle_msg(self, msg):
        method = '_handler_{0}'.format(msg[0].decode("utf-8"))

        try:
            print("Trying method {0}".format(method))
            getattr(self, method)()
        except AttributeError:
            sys.exit(1)

    def _handler_hi(self):
        self._socket.send_string('sup?')

    def _handler_exit(self):
        pass
def Run():
    try:
        print "Running web server on port: "+str(Config["Web server"]["Port"])
        sessionsManager = SessionsManager()
        
        socket = zmq.Context.instance().socket(zmq.SUB)
        socket.bind("tcp://*:"+str(Config["Web server"]["IncomingZmqPort"]))
        socket.setsockopt(zmq.SUBSCRIBE, "")
        stream = ZMQStream(socket)
        stream.on_recv(sessionsManager.ProcessZmqMessages)
        
        settings = {
            "debug" : False,
            "template_path":os.path.join(os.path.dirname(__file__), "../Frontend"),
        }

        app = tornado.web.Application([
            (r'/Static/(.*)', tornado.web.StaticFileHandler, {'path': os.path.join(os.path.dirname(__file__), "../Frontend/Static")}),
            (r'/Tmp/(.*)', tornado.web.StaticFileHandler, {'path': Config["Web server"]["Temporary files folder"]}),
            (r'/RCP', IndexHandler),
            (r'/RCP/', IndexHandler),
            (r'/StreamsTree/(.*)', StreamsTreeRequestHandler, dict(sessionsManager=sessionsManager)),
            (r'/WebSockets/', WebSocketHandler, dict(sessionsManager=sessionsManager)),
            
        ], **settings)
        
        periodic = ioloop.PeriodicCallback(CheckServerStopRequests, 500)
        periodic.start()
        
        app.listen(Config["Web server"]["Port"])
        tornado.ioloop.IOLoop.instance().start() 
    except:
        print traceback.format_exc()
Esempio n. 4
0
class MonitorEvents(SockJSConnection):
    def _zmq_msg(self, msg):
        #logging.debug(msg)
        try:
            msg_obj = json.loads(msg[0])
            logging.debug(msg_obj)
            if self.monitor != 'All':
                if 'mon_id' in msg_obj and msg_obj['mon_id'] == self.monitor:
                    self.send(msg_obj)
            else:
                self.send(msg_obj)
        except Exception as ex:
            logging.error(ex)
        
    def on_open(self, info):
        logging.debug("Monitor ticker open: "+self.monitor)
        zmq_socket = zmq.Context.instance().socket(zmq.SUB)
        zmq_socket.connect(zmq_local_endpoint)
        zmq_socket.setsockopt(zmq.SUBSCRIBE, '')
        
        self.stream = ZMQStream(zmq_socket)
        self.stream.on_recv(self._zmq_msg)

    def on_close(self):
        logging.debug("Monitor ticker close: "+self.monitor)
        self.stream.stop_on_recv()
Esempio n. 5
0
class Client(object):
    def __init__(self):
        self.stream = None
        self.result = None

    def connect(self, port):
        context = zmq.Context()
        socket = context.socket(zmq.REQ)
        socket.connect(port)
        self.stream = ZMQStream(socket)

    def __getattr__(self, item):
        def wrapper(*args):
            request = [item]
            for param in args:
                request.append(param)
            return self._run(tuple(request))
        return wrapper

    def _run(self, request):
        def on_response(message):
            response = msgpack.unpackb(message[0], use_list=False)
            if response[0] == 'OK':
                self.result = response[1]
            elif response[0] == 'ERR':
                raise Exception(response[2])
            ZMQIOLoop.instance().stop()

        self.stream.send(msgpack.packb(request))
        self.stream.on_recv(on_response)
        ZMQIOLoop.instance().start()
        return self.result

    def disconnect(self):
        self.stream.close()
Esempio n. 6
0
class SwysSearchRequestHandler(BaseHandler):
    def initialize(self):
        socket = context.socket(zmq.REQ)
        socket.connect(conf.SEARCH_WORKER_ZMQ_ENDPOINT)

        self._zmq_stream = ZMQStream(socket)
        self._zmq_stream.on_recv(self._recv_result, copy=True)

    @tornado.web.asynchronous
    def handle_request_async(self, *args, **kwargs):

        files = self.request.files.get('image', [])

        if len(files) == 0:
            raise Exception("there is no file attached")

        file = files[0]

        temp_file = tempfile.NamedTemporaryFile('wb', delete=False)
        temp_file.write(file.body)

        self._zmq_stream.send_json({'filename': temp_file.name})

    def _recv_result(self, msg):

        result_str = "".join(( part.decode('utf-8') for part in msg ))
        result = json.loads(result_str)['data']

        return self.on_complete(result)
Esempio n. 7
0
    def start(self):
        identifier = self.identifier
        job = self.job
        def execute_next(msg):
            # We can't use zodb object from outside here because
            # this code is executed in another thread (eventloop)
            # We don't have site or interaction, so the job must be created
            # before.
            # we can't use push_event_callback_after_commit here because
            # it will never commit in this thread (eventloop)
            if identifier in callbacks:
                callbacks[identifier].close()
                del callbacks[identifier]

            job.args = (msg, )
            # wait 2s that the throw event transaction has committed
            dc = DelayedCallback(job, 2000)
            dc.start()

        ctx = get_zmq_context()
        s = ctx.socket(zmq.SUB)
        s.setsockopt_string(zmq.SUBSCRIBE, u'')
        s.connect(get_signal_socket_url())
        stream = ZMQStream(s)
        callbacks[identifier] = stream
        stream.on_recv(execute_next)
Esempio n. 8
0
    def enable_depth_frames(self, kinect_id):
        """Enable streaming of depth frames. *kinect_id* is the id of the
        device which should have streaming enabled.

        :raises ValueError: if *kinect_id* does not correspond to a connected device

        """
        try:
           record = self._kinect_records[kinect_id]
        except KeyError:
            raise ValueError('Kinect id "{0}" does not correspond to a connected device'.format(
                kinect_id))

        # Create subscriber stream
        socket = self._zmq_ctx.socket(zmq.SUB)
        socket.connect(record.endpoints[EndpointType.depth])
        socket.setsockopt_string(zmq.SUBSCRIBE, u'')
        stream = ZMQStream(socket, self._io_loop)
        record.streams[EndpointType.depth] = stream

        # Fire signal on incoming depth frame
        def on_recv(msg, kinect_id=kinect_id):
            # TODO: decompress frame
            self.on_depth_frame.send(self, kinect_id=kinect_id, depth_frame=msg)

        # Wire up callback
        stream.on_recv(on_recv)
Esempio n. 9
0
class ZBus(object):
    def __init__(self):
        self._context = zmq.Context()
        self._callback = {}
        self._zstream = None

    @staticmethod
    def instance():
        if not hasattr(ZBus, '_instance'):
            ZBus._instance = ZBus()
        return ZBus._instance

    @staticmethod
    def initialized():
        return hasattr(ZBus, '_instance')

    def connect(self, dist):
        if self._zstream:
            self._zstream.close()
        self._zsock = self._context.socket(zmq.XREQ)
        self._zsock.connect('tcp://{dist}'.format(dist=dist))
        self._zstream = ZMQStream(self._zsock)
        self._zstream.on_recv(self.on_recv)

    def send(self, request, callback):
        self._callback[request.seed_id] = callback
        self._zstream.send_multipart(request.box())

    def on_recv(self, frame):
        response = ZResponse(frame)
        callback = self._callback.pop(response.seed_id) if self._callback.get(response.seed_id) else None
        if callback and callable(callback):
            callback(response)
Esempio n. 10
0
class UartzStream(object):

    def __init__(self, addr, io_loop=None):
        self.addr = addr
        self.ioloop = io_loop

        self.stream = None
        self.reset_stream()

    def reset_stream(self):
        ctx = zmq.Context.instance()
        sock = ctx.socket(zmq.SUB)
        sock.connect(self.addr)
        sock.setsockopt(zmq.SUBSCRIBE, b'')

        self.stream = ZMQStream(sock, self.ioloop)
        self.stream.on_recv(self._handle_msg)

    def _handle_msg(self, msg):
        assert len(msg) == 1
        msg = msg[0]
        chan_idx = msg.index(b":")

        assert chan_idx > 0
        self.handle_msg(UartzMsg(dev=msg[:chan_idx],
                                 msg=msg[chan_idx + 1:]))
Esempio n. 11
0
def main(pat):
    
    fname = find_connection_file(pat)
    with open(fname) as f:
        cfg = json.load(f)
    
    url = "%s://%s:%s" % (cfg.get('transport', 'tcp'), cfg['ip'], cfg['iopub_port'])
    
    session = Session(key=cfg['key'])
    
    ctx = zmq.Context.instance()
    sub = ctx.socket(zmq.SUB)
    sub.subscribe = b''
    sub.connect(url)
    # import IPython
    # IPython.embed()
    # return
    
    stream = ZMQStream(sub)
    
    stream.on_recv(lambda msg_list: log_msg(session, msg_list))
    
    pc = PeriodicCallback(print_time, 5 * 60 * 1000)
    pc.start()
    IOLoop.instance().start()
Esempio n. 12
0
class Subscriber(object):

    def __init__(self, context, sub_address, sub_topics):
        self.context = context
        self.subscriber_address = sub_address
        self.subscriber_topics = sub_topics

        socket = self.context.socket(zmq.SUB)
        ioloop = IOLoop.instance()
        self.subscriber = ZMQStream(socket, ioloop)
        self.subscriber.setsockopt(zmq.LINGER, 0)
        self.subscriber.on_recv(callback=self.subscriber_recv)
        self.subscriber.setsockopt(zmq.SUBSCRIBE, "")
        self.subscriber.connect(self.subscriber_address)

        return

    def shutdown(self):

        self.subscriber.on_recv(callback=None)
        self.subscriber.socket.disconnect(self.subscriber_address)
        self.subscriber = None

        return

    def subscriber_recv(self, msg):

        return
Esempio n. 13
0
class LRUQueue(object):
    def __init__(self, backend_socket, frontend_socket, clients, workers):
        self.avaliable_workers = 0
        self.workers = []
        self.worker_num = workers
        self.client_num = clients
        self.backend = ZMQStream(backend_socket)
        self.frontend = ZMQStream(frontend_socket)
        self.backend.on_recv(self.handle_backend)
        self.loop = IOLoop.instance()

    def handle_backend(self, msg):
        worker_addr, empty, client_addr = msg[:3]
        assert self.avaliable_workers < self.worker_num
        self.avaliable_workers += 1
        self.workers.append(worker_addr)
        assert empty == ""
        if client_addr != "READY":
            empty, reply = msg[3:]
            assert empty == ""
            self.frontend.send_multipart([client_addr, "", reply])
            self.client_num -= 1
            if 0 == self.client_num:
                self.loop.add_timeout(time.time() + 1, self.loop.stop)
        if self.avaliable_workers == 1:
            self.frontend.on_recv(self.handle_frontend)

    def handle_frontend(self, msg):
        client_addr, empty, request = msg
        assert empty == ""
        self.avaliable_workers -= 1
        worker_id = self.workers.pop()
        self.backend.send_multipart([worker_id, "", client_addr, "", request])
        if self.avaliable_workers == 0:
            self.frontend.stop_on_recv()
Esempio n. 14
0
class Connection(object):
	"""The base class for the connection between node and  master
	"""

	def __init__(self, endpoint):
		self.endpoint = endpoint
		# init zeromq
		self.context = zmq.Context()
		self.socket = self.context.socket(zmq.REQ)
		self.stream = ZMQStream(self.socket)
		self.stream.on_recv(self.OnRecvMsg)
		# get local endpoint
		self.socket.bind("tcp://eth0:*")
		self.local_endpoint = str(self.socket.getsockopt(zmq.LAST_ENDPOINT))
		print "Local endpoint [%s]" % self.local_endpoint
		# connect to target
		self.socket.connect(endpoint)
		print "Connected to [%s]" % endpoint

	def SendMsg(self, msg):
		self.socket.send(msg, copy=False)
		print "Sending message [%s]" % msg
		#msg_rsp = self.socket.recv( copy = False )
		#print "Receiving message [%s]" % msg_rsp

	def OnRecvMsg(self, msg):
		#msg_rsp = self.socket.recv( copy = False )
		#print "Receiving message [%s]" % msg_rsp
		print "Receiving message ========== [%s]" % msg

	def GetLocalEndpoint(self):
		return self.local_endpoint

	def GetEndpoint(self):
		return self.endpoint
Esempio n. 15
0
class SocketConnection(sockjs.tornado.SockJSConnection):

    clients = set()

    def on_open(self, request):
        self.clients.add(self)

        subscriber = context.socket(zmq.SUB)
        subscriber.connect("tcp://localhost:%s" % str(ZMQ_PORT))
        subscriber.setsockopt(zmq.SUBSCRIBE, '')
        self.subscribe_stream = ZMQStream(subscriber)
        self.subscribe_stream.on_recv(self.on_message_published)

    def on_message(self, message):
        logging.info(
            'message received, publish it to %d clients' % len(self.clients)
        )
        publish_stream.send_unicode(message)

    def on_message_published(self, message):
        logging.info('client received new published message')
        self.send(message)

    def on_close(self):
        self.clients.remove(self)
        # Properly close ZMQ socket
        self.subscribe_stream.close()
Esempio n. 16
0
class Broker (object):
    def __init__(self,
            pub_uri=defaults.broker_pub_uri,
            sub_uri=defaults.broker_sub_uri,
            patterns=None,
            ):

        self.pub_uri = pub_uri
        self.sub_uri = sub_uri
        
        if patterns:
            self.patterns = patterns
        else:
            self.patterns = []
        
        self.setup_logging()
        self.setup_zmq()
        self.setup_sockets()
        self.setup_subscriptions()
        self.setup_events()

    def setup_logging(self):
        self.log = logging.getLogger('zmqevt.broker')

    def setup_zmq(self):
        self.context = zmq.Context()

    def setup_sockets(self):
        self.sub = ZMQStream(self.context.socket(zmq.SUB))
        self.sub.bind(self.sub_uri)

        self.pub = ZMQStream(self.context.socket(zmq.PUB))
        self.pub.bind(self.pub_uri)

    def setup_subscriptions(self):
        if self.patterns:
            for p in self.patterns:
                self.subscribe(p)

    def subscribe(self, pattern):
        self.log.debug('Subcribe to "%s".' % pattern)
        self.sub.setsockopt(zmq.SUBSCRIBE, pattern)

    def setup_events(self):
        self.sub.on_recv(self.publish)

    def publish(self, msg):
        assert len(msg) == 2, 'Received invalid message.'

        # This regenerates the event to ensure that we don't
        # pass on invalid data.
        try:
            evt = event.Event.load(msg)
        except Exception, detail:
            self.log.error('Error processing message: %s' % detail)
            return

        self.log.debug('Event: %s' % (str(evt.dump())))
        self.pub.send_multipart(evt.dump())
Esempio n. 17
0
 def connect(self):
     if self.connected:
         return
     self.socket = ctx.socket(zmq.SUBSCRIBE)
     self.socket.connect(self.socket_addr)
     stream = ZMQStream(self.socket, self.io_loop)
     stream.on_recv(self.on_message)
     self.connected = True
Esempio n. 18
0
class SubQueue(BaseQueue):

    def __init__(self, connect, callback, ioloop=None):
        super().__init__(zmq.PULL)
        self.socket.connect(connect)
        self.stream = ZMQStream(self.socket, ioloop)
        self.stream.on_recv(callback)
        self.stream.flush()
    def call(self, message, callback):
        context = zmq.Context()
        socket = context.socket(zmq.DEALER)
        socket.connect(self.endpoint)
        stream = ZMQStream(socket)
        stream.on_recv(callback)

        socket.send(message)
Esempio n. 20
0
class zmq_bonjour_bind_wrapper(object):
    context = None
    socket = None
    stream = None
    heartbeat_timer = None
    method_callbacks = {}


    def _hearbeat(self):
        #print "Sending heartbeat"
        self.stream.send_multipart(("HEARTBEAT", "1"))

    def __init__(self, socket_type, service_name, service_port=None, service_type=None):
        self.context = zmq.Context()
        self.socket = self.context.socket(socket_type)
        if not service_port:
            service_port = self.socket.bind_to_random_port('tcp://*', min_port=49152, max_port=65535, max_tries=100)
        else:
            self.socket.bind("tcp://*:%d" % service_port)
        print "Bound to port %d" % service_port

        self.stream = ZMQStream(self.socket)
        if not service_type:
            service_type = socket_type_to_service(socket_type)

        if socket_type == zmq.PUB:
            # TODO: how to handle this with ROUTER/DEALER combinations...
            self.heartbeat_timer = ioloop.PeriodicCallback(self._hearbeat, 1000)
            self.heartbeat_timer.start()

        if socket_type == zmq.ROUTER:
            self.stream.on_recv(self._method_callback_wrapper)


        bonjour_utilities.register_ioloop(ioloop.IOLoop.instance(), service_type, service_name, service_port)

    def _method_callback_wrapper(self, datalist):
        #print "_method_callback_wrapper called: %s" % repr(datalist)
        if len(datalist) < 2:
            return
        client_id = datalist[0]
        method = datalist[1]
        args = datalist[2:]
        #print "DEBUG: _method_callback_wrapper(%s, %s)" % (method, repr(args))
        if not self.method_callbacks.has_key(method):
            print "No such method: %s" % method
            print "Methods: %s" % self.method_callbacks.keys()
            return
        for f in self.method_callbacks[method]:
            resp = zmq_client_response(client_id, self.stream)
            # TODO: make a wrapper object for sending responses and pass that instead of the client_id
            #print "Calling f(resp, %s)" % repr(args)
            f(resp, *args)

    def register_method(self, name, callback):
        if not self.method_callbacks.has_key(name):
            self.method_callbacks[name] = []
        self.method_callbacks[name].append(callback)
Esempio n. 21
0
def run():
    """ main method """
    hostname = socket.gethostname()

    context = zmq.Context()


    # Configure the upstream socket
    upstream = context.socket(zmq.SUB)
    upstream_port = upstream.bind_to_random_port('tcp://*')
    upstream_url = "tcp://%s:%d" % (hostname, upstream_port)
    log.info("Upstream bound to %s" % upstream_url)

    upstream.setsockopt_string(zmq.SUBSCRIBE, u"buildlight")

    # Configure the downstream socket
    downstream  = context.socket(zmq.PUB)
    downstream_port = downstream.bind_to_random_port('tcp://*')
    downstream_url = "tcp://%s:%d" % (hostname, downstream_port)
    log.info("Downstream bound to %s" % downstream_url)

    ping_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
    ping_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
    ping_socket.bind(('', 0))

    loop = ioloop.IOLoop.instance()

    def ping(serial_number=0):
        ps = ping_socket
        data = json.dumps({
            'upstream':upstream_url,
            'downstream':downstream_url,
            'serial_number':serial_number
        })
        message = "%s\n%s" % (MAGIC_WORD, data)
        ps.sendto(message.encode(), ('<broadcast>', 6969))

        # Let's do this again some time?
        callback = lambda : ping(serial_number=serial_number + 1)
        loop.add_timeout(datetime.timedelta(seconds=PING_FREQUENCY), callback)
        log.info("Ping: %s" % message)


    loop.add_callback(ping)

    def recieve(msg):
        for m in msg:
            downstream.send(m)
            log.info('Proxied: %s' % (m))

    zstr = ZMQStream(upstream)
    zstr.on_recv(recieve)

    try:
        loop.start()
    finally:
        upstream.close()
        downstream.close()
Esempio n. 22
0
class MQAsyncSub():
    def __init__(self, context, caller_id, category_filters):
        cfg = Loader('mq').load()
        self.cfg_mq = dict(cfg[1])
        if self.cfg_mq['ip'].strip() == "*":
            self.cfg_mq['ip'] = get_ip()
        sub_addr = "tcp://{0}:{1}".format(self.cfg_mq['ip'], self.cfg_mq['sub_port'])
        self.caller_id = caller_id
        self.s_recv = context.socket(zmq.SUB)
        self.s_recv.connect(sub_addr)

        if len(category_filters) == 0:
            self.s_recv.setsockopt_string(zmq.SUBSCRIBE, u'')
        else:
            for category_filter in category_filters:
                self.s_recv.setsockopt_string(zmq.SUBSCRIBE, u"{0}".format(category_filter))
        ioloop = IOLoop.instance()
        self.stream_asyncmq = ZMQStream(self.s_recv, ioloop)
        self.stream_asyncmq.on_recv(self._on_message)
    
    def __del__(self):
        # Not sure this is really mandatory
        self.s_recv.close()
    
    def _on_message(self, msg):
        """Received an event
        will lookup the correct callback to use, and call it
        
        :param: msg = the message received
        """
        if len(msg) < 2:
            # this sometimes happens, no idea why, probebly a bug in pyzmq
            return
        mid = msg[0].decode()
        mid = mid.split('.')
        if len(mid) < 3:
            # this sometimes happens, no idea why, probebly a bug in pyzmq
            return
        # delete msg version
        del mid[-1]
        # delete timestamp
        del mid[-1]
        # build up the id again
        mid = '.'.join(mid)
        try:
            jsons = json.loads(msg[1].decode())
            self.on_message(mid, jsons)
        except ValueError as e:
            pass
  
    def on_message(self, msg_id, content):
        """Public method called when a message arrived.

        .. note:: Does nothing. Should be overloaded!
        """
        pass
Esempio n. 23
0
class RouterPubSubProxy:
    '''
    This is a proxy that has one front end socket, and two backend sockets. The
    front end socket is a router that passes the messages to backend Pub. Pub
    broadcasts them to all subscribers, which respond with results to backend
    Sub. All communications on this proxy are done through IPC.
    '''
    def __init__(self,
                 front,
                 back_out,
                 back_in,
                 loop):
        '''
        Initializes the instance of RouterPubSubProxy.

        @param front - channel name to be the routing stream
        @param back_out - channel name of the publishing stream
        @param back_in - channel name of result receiving stream
        @param loop - IOLoop
        '''
        self._loop = loop

        ctx = zmq.Context.instance()

        # Create the front end stream
        front_address = ZmqAddress(chan_name=front)
        self._front_stream = ZMQStream(ctx.socket(zmq.ROUTER), io_loop=loop)
        self._front_stream.setsockopt(zmq.ROUTER_MANDATORY, 1)
        self._front_stream.bind(front_address.zmq_url())

        # Create the back end streams
        back_out_address = ZmqAddress(chan_name=back_out)
        self._back_out_stream = ZMQStream(ctx.socket(zmq.PUB), io_loop=loop)
        self._back_out_stream.bind(back_out_address.zmq_url())

        back_in_address = ZmqAddress(chan_name=back_in)
        self._back_in_stream = ZMQStream(ctx.socket(zmq.SUB), io_loop=loop)
        self._back_in_stream.setsockopt(zmq.SUBSCRIBE, b'')
        self._back_in_stream.bind(back_in_address.zmq_url())

        def callback(from_name, to_name, zmq_stream, msgs):
            log.info("Routing from {0} to {1} messages {2}"
                     .format(from_name, to_name, msgs))
            zmq_stream.send_multipart(msgs)
            zmq_stream.flush()

        self._front_stream.on_recv(lambda msgs:
                                   callback(front, back_out, self._back_out_stream, msgs))
        self._back_in_stream.on_recv(lambda msgs:
                                     callback(back_in, front, self._front_stream, msgs))

    def start(self):
        '''
        Start this proxy.
        '''
        self._loop.start()
Esempio n. 24
0
def recv(msg):
    	global pair_socket, pair_stream
    	print msg
    	port = int(msg[0])
    	print port
    	pair_socket = ctx.socket(zmq.PAIR)
    	pair_socket.connect("tcp://176.31.243.99:%d" % port)
    	pair_stream = ZMQStream(pair_socket, command_stream.io_loop)
    	pair_stream.on_recv(pair_recv)
    	send()
Esempio n. 25
0
 def start(self):
     loop = self.loop
     loop.add_handler(self.udp.handle.fileno(), self.handle_beacon, loop.READ)
     stream = ZMQStream(self.pipe, loop)
     stream.on_recv(self.control_message)
     pc = PeriodicCallback(self.send_ping, PING_INTERVAL * 1000, loop)
     pc.start()
     pc = PeriodicCallback(self.reap_peers, PING_INTERVAL * 1000, loop)
     pc.start()
     loop.start()
Esempio n. 26
0
        def worker(vent_address,sink_address,sz,dtype):

            import zmq
            import theano
            from zmq.eventloop import ioloop
            ioloop.install()
            from zmq.eventloop.zmqstream import ZMQStream

            # Context
            context = zmq.Context()

            # Socket to receive messages on
            receiver = context.socket(zmq.PULL)
            receiver.connect(vent_address)
            receiver_stream = ZMQStream(receiver)

            # Socket to send messages to
            sender = context.socket(zmq.PUSH)
            sender.connect(sink_address)

            def _worker(msg_list, sz=sz, dtype=dtype, sender=sender):
                import theano
                import numpy as np

                msg = msg_list[0]

                # if normalize_A does any inplace operation, we need to .copy() here:
                new_A = np.frombuffer(buffer(msg), dtype=dtype).reshape(sz).copy()
                new_A = l.model.normalize_A(new_A)

                l.model.A.set_value(new_A.astype(theano.config.floatX))

                x = l.get_databatch()
                dA = l.model.gradient(x)['dA']
                dA *= l.eta

                param_max = np.max(np.abs(l.model.A.get_value()), axis=0)
                update_max = np.max(np.abs(dA), axis=0)
                update_max = np.max(update_max / param_max)

                l._adapt_eta(update_max)

                # no subset selection:
                sender.send(dA,copy=False)

                # subset selection:
                #inds = np.argwhere(dA.sum(0) != 0.).ravel()
                #subset_dA = dA[:, inds]
                #sender.send_pyobj(dict(inds=inds, subset_dA=subset_dA))

            receiver_stream.on_recv(_worker,copy=False)
            iolooper = ioloop.IOLoop.instance()
            iolooper.start()

            return
Esempio n. 27
0
class MQAsyncSub():
    def __init__(self, context, caller_id, category_filters):
        if ("domogik.common.configloader" in sys.modules):
            cfg = Loader('mq').load()
            self.cfg_mq = dict(cfg[1])
            sub_addr = "tcp://{0}:{1}".format(self.cfg_mq['ip'], self.cfg_mq['sub_port'])
        else:
            ipaddr = Parameter.objects.get(key='mq-ip')
            port = Parameter.objects.get(key='mq-sub_port')
            sub_addr = "tcp://{0}:{1}".format(ipaddr.value, port.value)
        self.caller_id = caller_id
        self.s_recv = context.socket(zmq.SUB)
        self.s_recv.connect(sub_addr)

        if len(category_filters) == 0:
            self.s_recv.setsockopt(zmq.SUBSCRIBE, '')
        else:
            for category_filter in category_filters:
                self.s_recv.setsockopt(zmq.SUBSCRIBE, category_filter)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(self.s_recv, ioloop)
        self.stream.on_recv(self._on_message)
    
    def __del__(self):
        # Not sure this is really mandatory
        self.s_recv.close()
    
    def _on_message(self, msg):
        """Received an event
        will lookup the correct callback to use, and call it
        
        :param: msg = the message received
        """
        mid = msg[0]
        mid = mid.split('.')
        # delete msg version
        del mid[-1]
        # delete timestamp
        del mid[-1]
        # build up the id again
        mid = '.'.join(mid)

	try:
	    jsons = json.loads(msg[1])
            self.on_message(mid, jsons)
        except ValueError as e:
            pass
  
    def on_message(self, msg_id, content):
        """Public method called when a message arrived.

        .. note:: Does nothing. Should be overloaded!
        """
        pass
Esempio n. 28
0
class Worker(object):

    def __init__(self, robot,
            data_in_sock='ipc:///tmp/robot-data-m2w.sock',
            data_out_sock='ipc:///tmp/robot-data-w2m.sock',
            msg_in_sock='ipc:///tmp/robot-msg-m2w.sock',
            msg_out_sock='ipc:///tmp/robot-msg-w2m.sock',
            io_loop=None):
        self.identity = 'worker:%s:%s' % (socket.gethostname(), os.getpid())

        context = zmq.Context()

        self._io_loop = io_loop or IOLoop.instance()

        self._in_socket = context.socket(zmq.PULL)
        self._in_socket.connect(data_in_sock)
        self._in_stream = ZMQStream(self._in_socket, io_loop)

        self._out_socket = context.socket(zmq.PUB)
        self._out_socket.connect(data_out_sock)
        self._out_stream = ZMQStream(self._out_socket, io_loop)

        self._running = False

        self.robot = robot
        self.robot.set_worker_identity(self.identity)
        self.messenger = ClientMessenger(msg_in_sock, msg_out_sock,
                context, io_loop)

    def start(self):
        logging.info('[%s] starting', self.identity)
        self.messenger.start()
        self.messenger.publish(CTRL_MSG_WORKER, self.identity,
                CTRL_MSG_WORKER_ONLINE)

        self._in_stream.on_recv(self._on_receive_request)
        self._running = True

    def stop(self):
        self._running = False
        self.messenger.stop()

    def close(self):
        self._in_stream.close()
        self._in_socket.close()
        self._out_stream.close()
        self._out_socket.close()
        self.messenger.close()

    def _on_receive_request(self, zmq_msg):
        msg = RequestMessage.deserialize(zmq_msg)
        request = msg.request
        logging.debug('[%s] receive request(%s)', self.identity, request.url)
        self.robot.fetch(request)
Esempio n. 29
0
class MainHandler(websocket.WebSocketHandler):
    _first = True

    @property
    def ref(self):
        return id(self)

    def initialize(self):
        print "WebSocket initialize"
        self.push_socket = ctx.socket(zmq.PUSH)
        self.sub_socket = ctx.socket(zmq.SUB)

        self.push_socket.connect("ipc:///tmp/ws_push")
        #self.sub_socket.connect("ipc:///tmp/ws_sub")
        self.sub_socket.bind("ipc:///tmp/ws_sub")
        self.sub_socket.setsockopt(zmq.SUBSCRIBE, "")

        self.zmq_stream = ZMQStream(self.sub_socket)
        self.zmq_stream.on_recv(self.zmq_msg_recv)

    def open(self, *args, **kwargs):
        print "WebSocket opened", args, kwargs

    def on_message(self, message):
        print "WebSocket on_message"
        if self._first:
            msg = {'message': message, 'id':self.ref, 'action':'connect'}
            self._first = False

        else:
            msg = {'message': message, 'id':self.ref, 'action':'message'}

        self.push_socket.send_pyobj(msg)

    def on_close(self):
        print "WebSocket closed"
        msg = {'message': '', 'id': id(self), 'action': 'close'}
        self.push_socket.send_pyobj(msg)
        self.zmq_stream.close()
        self.sub_socket.close()
        self.push_socket.close()

    def zmq_msg_recv(self, data):
        print "zmq_msg_recv: %s" % repr(data)
        for message in data:
            message = pickle.loads(message)
            #_id, _msg = message['id'], message['message']

            print ' = ', repr(message)
            #if _id != self.ref:
            #    continue

            #self.write_message(_msg)
            self.write_message(json.dumps(message, indent=4))
Esempio n. 30
0
def setupZmqSubscriber():
    ctx = zmq.Context()
    s = ctx.socket(zmq.SUB)
    s.connect('tcp://127.0.0.1:5000')
    s.setsockopt(zmq.SUBSCRIBE, "")

    stream = ZMQStream(s)
    def echo(product):
    	print "receiving message: %s" % product
        for socket in socket_connections:
            socket.write_message(product[0])
    stream.on_recv(echo)
Esempio n. 31
0
File: zmq.py Progetto: tomzhang/jina
class ZmqStreamlet(Zmqlet):
    """A :class:`ZmqStreamlet` object can send/receive data to/from ZeroMQ stream and invoke callback function. It
    has three sockets for input, output and control.

    .. warning::
        Starting from v0.3.6, :class:`ZmqStreamlet` replaces :class:`Zmqlet` as one of the key components in :class:`jina.peapods.pea.BasePea`.
        It requires :mod:`tornado` and :mod:`uvloop` to be installed.
    """
    def register_pollin(self):
        use_uvloop()
        import asyncio
        asyncio.set_event_loop(asyncio.new_event_loop())
        try:
            import tornado.ioloop
            self.io_loop = tornado.ioloop.IOLoop.current()
        except (ModuleNotFoundError, ImportError):
            self.logger.error(
                'Since v0.3.6 Jina requires "tornado" as a base dependency, '
                'we use its I/O event loop for non-blocking sockets. '
                'Please try reinstall via "pip install -U jina" to include this dependency'
            )
            raise
        self.in_sock = ZMQStream(self.in_sock, self.io_loop)
        self.out_sock = ZMQStream(self.out_sock, self.io_loop)
        self.ctrl_sock = ZMQStream(self.ctrl_sock, self.io_loop)
        self.in_sock.stop_on_recv()

    def close(self):
        """Close all sockets and shutdown the ZMQ context associated to this `Zmqlet`. """
        if not self.is_closed:
            # wait until the close signal is received
            time.sleep(.01)
            for s in self.opened_socks:
                s.flush()
            super().close()
            try:
                self.io_loop.stop()
                # Replace handle events function, to skip
                # None event after sockets are closed.
                if hasattr(self.in_sock, '_handle_events'):
                    self.in_sock._handle_events = lambda *args, **kwargs: None
                if hasattr(self.out_sock, '_handle_events'):
                    self.out_sock._handle_events = lambda *args, **kwargs: None
                if hasattr(self.ctrl_sock, '_handle_events'):
                    self.ctrl_sock._handle_events = lambda *args, **kwargs: None
            except AttributeError as e:
                self.logger.error(f'failed to stop. {e}')

    def pause_pollin(self):
        """Remove :attr:`in_sock` from the poller """
        self.in_sock.stop_on_recv()

    def resume_pollin(self):
        """Put :attr:`in_sock` back to the poller """
        self.in_sock.on_recv(self._in_sock_callback)

    def start(self, callback: Callable[['jina_pb2.Message'], None]):
        def _callback(msg, sock_type):
            msg, num_bytes = _prepare_recv_msg(sock_type, msg,
                                               self.args.check_version)
            self.bytes_recv += num_bytes
            self.msg_recv += 1

            msg = callback(msg)

            if msg:
                self.send_message(msg)

        self._in_sock_callback = lambda x: _callback(x, self.in_sock_type)
        self.in_sock.on_recv(self._in_sock_callback)
        self.ctrl_sock.on_recv(lambda x: _callback(x, self.ctrl_sock_type))
        if self.out_sock_type == zmq.ROUTER:
            self.out_sock.on_recv(lambda x: _callback(x, self.out_sock_type))
        self.io_loop.start()
        self.io_loop.clear_current()
        self.io_loop.close(all_fds=True)
Esempio n. 32
0
class CloneServer(object):

    # Our server is defined by these properties
    ctx = None  # Context wrapper
    kvmap = None  # Key-value store
    loop = None  # IOLoop reactor
    port = None  # Main port we're working on
    sequence = 0  # How many updates we're at
    snapshot = None  # Handle snapshot requests
    publisher = None  # Publish updates to clients
    collector = None  # Collect updates from clients

    def __init__(self, port=5556):
        self.port = port
        self.ctx = zmq.Context()
        self.kvmap = {}
        self.loop = IOLoop.instance()

        # Set up our clone server sockets
        self.snapshot = self.ctx.socket(zmq.ROUTER)
        self.publisher = self.ctx.socket(zmq.PUB)
        self.collector = self.ctx.socket(zmq.PULL)
        self.snapshot.bind("tcp://*:%d" % self.port)
        self.publisher.bind("tcp://*:%d" % (self.port + 1))
        self.collector.bind("tcp://*:%d" % (self.port + 2))

        # Wrap sockets in ZMQStreams for IOLoop handlers
        self.snapshot = ZMQStream(self.snapshot)
        self.publisher = ZMQStream(self.publisher)
        self.collector = ZMQStream(self.collector)

        # Register our handlers with reactor
        self.snapshot.on_recv(self.handle_snapshot)
        self.collector.on_recv(self.handle_collect)
        self.flush_callback = PeriodicCallback(self.flush_ttl, 1000)

        # basic log formatting:
        logging.basicConfig(format="%(asctime)s %(message)s",
                            datefmt="%Y-%m-%d %H:%M:%S",
                            level=logging.INFO)

    def start(self):
        # Run reactor until process interrupted
        self.flush_callback.start()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def handle_snapshot(self, msg):
        """snapshot requests"""
        if len(msg) != 3 or msg[1] != "ICANHAZ?":
            print "E: bad request, aborting"
            dump(msg)
            self.loop.stop()
            return
        identity, request, subtree = msg
        if subtree:
            # Send state snapshot to client
            route = Route(self.snapshot, identity, subtree)

            # For each entry in kvmap, send kvmsg to client
            for k, v in self.kvmap.items():
                send_single(k, v, route)

            # Now send END message with sequence number
            logging.info("I: Sending state shapshot=%d" % self.sequence)
            self.snapshot.send(identity, zmq.SNDMORE)
            kvmsg = KVMsg(self.sequence)
            kvmsg.key = "KTHXBAI"
            kvmsg.body = subtree
            kvmsg.send(self.snapshot)

    def handle_collect(self, msg):
        """Collect updates from clients"""
        kvmsg = KVMsg.from_msg(msg)
        self.sequence += 1
        kvmsg.sequence = self.sequence
        kvmsg.send(self.publisher)
        ttl = kvmsg.get('ttl')
        if ttl is not None:
            kvmsg['ttl'] = time.time() + ttl
        kvmsg.store(self.kvmap)
        logging.info("I: publishing update=%d", self.sequence)

    def flush_ttl(self):
        """Purge ephemeral values that have expired"""
        for key, kvmsg in self.kvmap.items():
            self.flush_single(kvmsg)

    def flush_single(self, kvmsg):
        """If key-value pair has expired, delete it and publish the fact
        to listening clients."""
        if kvmsg.get('ttl', 0) <= time.time():
            kvmsg.body = ""
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            del self.kvmap[kvmsg.key]
            logging.info("I: publishing delete=%d", self.sequence)
Esempio n. 33
0
class TagHandler(object):
    def __init__(self, connection):
        super(TagHandler, self).__init__()
        self.connection = connection
        self.send_element = connection.send_element
        self.send_string = connection.send_string

        self.jid = None
        self.hostname = None

        self.authenticated = False
        self.session_active = False
        self.publisher = get_publisher()
        self.pull_url = None
        self.pull_socket = None

    def close(self):
        """Is called when the client connection is closed to do cleanup work"""

        # unregister from forwarder
        if self.pull_socket is not None:
            reg_msg = ZMQForwarder_message('UNREGISTER')
            reg_msg.attributes = self.pull_url
            self.publisher.send_pyobj(reg_msg)
            self.processed_stream.close()
            self.pull_socket.close()
            self.pull_socket = None

    def contenthandler(self, tree):
        """Handles an incomming content tree"""

        # set/replace the from attribute in stanzas as required
        # by RFC 6120 Section 8.1.2.1
        if self.authenticated:
            tree.set("from", str(self.jid))

        try:
            if tree.tag == "auth":
                if self.authenticated:
                    raise NotAllowedError
                self.authenticate(tree)

            elif tree.tag == "iq":
                if not self.authenticated:
                    raise NotAuthorizedError
                if self.jid.resource is None:
                    self.set_resource(tree)
                else:
                    if not self.session_active:
                        first_element = tree[0]
                        if first_element.tag == "session" and \
                                first_element.get("xmlns") == SESSION_NS:
                            response_element = ET.Element("iq")
                            response_element.set("type", "result")
                            response_element.set("id", tree.get("id"))
                            session_element = ET.SubElement(
                                response_element, "session")
                            session_element.set("xmlns", SESSION_NS)
                            self.send_element(response_element)
                            log.debug("Sent empty session element")
                            self.processed_stream.stop_on_recv()
                            self.processed_stream.on_recv(
                                self.send_list, False)
                        else:
                            self.publish_stanza(tree)
                    else:
                        self.publish_stanza(tree)

            elif tree.tag in ["message", "presence"]:
                if not self.authenticated:
                    raise NotAuthorizedError
                self.publish_stanza(tree)

        except (StreamError, e):
            self.send_string(unicode(e))
            self.connection.stop_connection()

    def publish_stanza(self, tree):
        log.debug("Publishing Stanza %s" % (ET.tostring(tree)))
        self.publisher.send(cPickle.dumps(tree))

    def masked_send_list(self, msgs):
        """Unmark waiting for a session element if we received another stanza response"""

        self.session_active = True
        self.processed_stream.stop_on_recv()
        self.processed_stream.on_recv(self.send_list, False)
        self.send_list(msgs)

    def send_list(self, msgs):
        try:
            for msg in msgs:
                tmp = cPickle.loads(msg.bytes)
                if tmp.get("to") == str(
                        self.jid) or tmp.get("to") == self.jid.bare:
                    self.send_element(tmp)
        except IOError:
            self.connection.stop_connection()

    def set_resource(self, tree):
        """Set a resource on our JID"""

        bind_element = tree[0]
        if tree.get("type") != "set" or \
                bind_element.tag != 'bind' or\
                bind_element.get("xmlns") != BIND_NS:
            raise NotAuthorizedError

        resource_element = bind_element.find("resource")
        if resource_element is None:
            # No prefered resource was set, generate one
            self.jid.resource = uuid.uuid4().hex
        else:
            self.jid.resource = resource_element.text
        if not self.jid.validate():
            raise BadRequestError

        # Check if given resource is already in use
        known_jids = get_known_jids()
        try:
            known_jids.append(self.jid)
        except ValueError:
            raise ConflictError
        log.info("Bound connection as %s" % str(self.jid))

        # Connect to forwarder to receive stanzas sent back to client
        log.debug('Registering Client at forwarder..')
        self.pull_socket = zmq.Context().socket(zmq.PULL)
        self.processed_stream = ZMQStream(self.pull_socket,
                                          self.connection.stream.io_loop)
        self.processed_stream.on_recv(self.masked_send_list, False)
        port = self.pull_socket.bind_to_random_port('tcp://127.0.0.1')
        self.pull_url = 'tcp://127.0.0.1:' + str(port)

        reg_msg = ZMQForwarder_message('REGISTER')
        reg_msg.attributes = (config.get('ipc',
                                         'password'), self.pull_url, self.jid)
        self.publisher.send_pyobj(reg_msg)

        # Send registered resource back to client
        response_element = ET.Element("iq")
        response_element.set("type", "result")
        response_element.set("id", tree.get("id"))
        bind_element = ET.SubElement(response_element, "bind")
        bind_element.set("xmlns", BIND_NS)
        jid_element = ET.SubElement(bind_element, "jid")
        jid_element.text = str(self.jid)
        self.send_element(response_element)

    def authenticate(self, tree):
        """Authenticates user for session"""

        # Currently RFC specifies only SASL as supported way of auth'ing
        handler = SASLAuthHandler()
        if tree.get('xmlns') != handler.namespace:
            raise MalformedRequestError
        handler.process(tree)
        self.connection.parser.reset()
        self.jid = JID("@".join([handler.authenticated_user, self.hostname]))
        self.authenticated = True
        response_element = ET.Element("success")
        response_element.set("xmlns", handler.namespace)
        self.send_element(response_element)

    def add_auth_options(self, feature_element):
        """Add supported auth mechanisms to feature element"""

        handler = SASLAuthHandler()
        mechtype_element = ET.SubElement(feature_element, "mechanisms")
        mechtype_element.set("xmlns", handler.namespace)
        for mech in handler.supported_mechs:
            mech_element = ET.SubElement(mechtype_element, 'mechanism')
            mech_element.text = mech

    def add_server_features(self, feature_element):
        bind = ET.SubElement(feature_element, "bind")
        bind.set("xmlns", "urn:ietf:params:xml:ns:xmpp-bind")

        # Session establishment is deprecated in RFC6121 but Appendix E
        # suggests to still advertise it as feature for compatibility.
        session = ET.SubElement(feature_element, "session")
        session.set("xmlns", "urn:ietf:params:xml:ns:xmpp-session")

    def streamhandler(self, attrs):
        """Handles a stream start"""

        if attrs == {}:
            # </stream:stream> received
            self.connection.stop_connection()
        else:
            # check if we are responsible for this stream
            self.hostname = attrs.getValue("to")
            if self.hostname not in config.getlist("listeners", "domains"):
                raise HostUnknownError

            # Stream restart
            stream = ET.Element("stream:stream")
            stream.set("xmlns", attrs.getValue("xmlns"))
            stream.set("from", self.hostname)
            stream.set("id", uuid.uuid4().hex)
            stream.set("xml:lang", "en")
            stream.set("xmlns:stream", "http://etherx.jabber.org/streams")

            # only include version in response if client sent its max supported
            # version (RFC6120 Section 4.7.5)
            try:
                if attrs.getValue("version") != "1.0":
                    raise UnsupportedVersionError
                stream.set("version", "1.0")
            except KeyError:
                pass

            try:
                from_jid = JID(attrs.getValue("from"))
                stream.set("to", unicode(from_jid))
            except ValueError:
                raise InvalidFromError
            except KeyError:
                pass

            start_stream = """<?xml version="1.0"?>""" + ET.tostring(stream)
            # Element has subitems but are added later to the stream,
            # so don't mark it a single element
            self.send_string(start_stream.replace("/>", ">"))

            # Make a list of supported features
            features = ET.Element("stream:features")
            if not self.authenticated:
                self.add_auth_options(features)
            else:
                self.add_server_features(features)

            self.send_element(features)
Esempio n. 34
0
class Emperor(object):
    """Обертка вокруг uwsgi-emperor, отвечающая за его запуск и управление вассалами."""
    def __init__(self, root_dir):
        """Инициализирует uwsgi-emperor.

        :param root_dir: Полный путь к корневой директории uwsgi-emperor
        :type root_dir: str
        """
        self.__root_dir__ = root_dir

        if not os.path.exists(self.vassal_dir):
            log_message("Vassal directory does not exist, creating one",
                        component="Emperor")
            os.mkdir(self.vassal_dir)

        emperor_pid = 0

        if os.path.exists(self.pidfile):
            with open(self.pidfile) as pid_file:
                try:
                    emperor_pid = int(pid_file.read())
                    psutil.Process(emperor_pid)

                    log_message("Found running emperor server",
                                component="Emperor")
                except (ValueError, psutil.NoSuchProcess):
                    os.remove(self.pidfile)

        if not emperor_pid:
            emperor = subprocess.Popen([
                self.uwsgi_binary, "--plugins-dir", self.binary_dir,
                "--emperor", self.vassal_dir, "--pidfile", self.pidfile,
                "--logger", "zeromq:tcp://127.0.0.1:5123", "--daemonize",
                "/dev/null", "--emperor-stats", "127.0.0.1:1777",
                "--emperor-required-heartbeat", "40", "--emperor-throttle",
                "10000", "--vassal-set", "plugins-dir={}".format(
                    self.binary_dir)
            ],
                                       bufsize=1,
                                       close_fds=True)
            code = emperor.wait()

            assert code == 0, "Error starting emperor server"
            log_message("Started emperor server", component="Emperor")

        self.vassals = {}

        ctx = zmq.Context()
        s = ctx.socket(zmq.PULL)
        s.bind('tcp://127.0.0.1:5123')
        self.stream = ZMQStream(s)
        self.stream.on_recv(self.log_message)

    @property
    def root_dir(self):
        """Корневая директория uwsgi-emperor.

        :returns: Полный путь к корневой директории
        :rtype: str
        """
        return self.__root_dir__

    @property
    def binary_dir(self):
        """Директория с исполняемыми файлами и плагинами uwsgi-emperor.

        :returns: Полный путь к директории с исполняемыми файлами
        :rtype: str
        """
        return os.path.join(self.root_dir, "bin")

    @property
    def uwsgi_binary(self):
        """Основной исполняемый файл uwsgi.

        :returns: Полный путь к исполняемому файлу uwsgi
        :rtype: str
        """
        return os.path.join(self.binary_dir, "uwsgi")

    @property
    def vassal_dir(self):
        """Директория с вассалами uwsgi.

        :returns: Полный путь к директории вассалов
        :rtype: str
        """
        return os.path.join(self.root_dir, "vassals")

    @property
    def pidfile(self):
        """Pid-файл uwsgi-emperor.

        :returns: Полный путь к pid-файлу
        :rtype: str
        """
        return os.path.join(self.root_dir, "emperor.pid")

    @property
    def vassal_names(self):
        """Возвращает спиоск имен вассалов, активных в данный момент. Имена передаются без расширения.

        :returns: Список имен активных вассалов
        :rtype: list
        """
        raw_names = os.listdir(self.vassal_dir)
        return [name[:-4] for name in raw_names]

    @coroutine
    def call_vassal_rpc(self, vassal, *args):
        """Вызывает rpc-функцию вассала.

        :param vassal: Имя вассала
        :type vassal: str
        :returns: Результат выполнения функции
        :rtype: dict
        """
        stats = self.stats(vassal)
        try:
            assert "pid" in stats

            process = psutil.Process(stats["pid"])
            host, port = process.connections()[0].laddr

            client = yield TCPClient().connect(host, port)
            yield client.write(
                pack('<BHB', 173, sum(2 + len(arg) for arg in args), 0))

            for arg in args:
                yield client.write(pack('<H', len(arg)) + arg)

            data = yield client.read_bytes(4)
            modifier1, datasize, modifier2 = unpack("<BHB", data)

            data = yield client.read_bytes(datasize)
            raise Return({"result": "success", "data": data})

        except (AssertionError, psutil.NoSuchProcess):
            raise Return({"result": "failure", "message": "Not running"})
        except StreamClosedError:
            raise Return({"result": "failure", "message": "Call failure"})

    def stop(self):
        """Останавливает uwsgi-emperor и очищает директорию вассалов."""
        log_message("Stopping uwsgi emperor", component="Emperor")
        subprocess.call([self.uwsgi_binary, "--stop", self.pidfile])
        os.remove(self.pidfile)

        for name in os.listdir(self.vassal_dir):
            os.remove(os.path.join(self.vassal_dir, name))

    def start_vassal(self, vassal):
        """Запускает указанного вассала.

        :param vassal: Запускаемый вассал
        :type vassal: Vassal
        """
        cfg_path = os.path.join(self.vassal_dir, "{}.ini".format(vassal.id))

        self.vassals[str(vassal.id)] = vassal

        if os.path.exists(cfg_path):
            with open(cfg_path, "r") as cfg:
                data = cfg.read()

            if data == vassal.get_config():
                return

            log_message(
                "Leaf {} have stale configuration, will restart".format(
                    vassal.id))

        with open(cfg_path, "w") as cfg:
            cfg.write(vassal.get_config())

    def stop_vassal(self, vassal):
        """Останавливает указанного вассала.

        :param vassal: Останавливаемый вассал
        :type vassal: Vassal
        """
        cfg_path = os.path.join(self.vassal_dir, "{}.ini".format(vassal.id))

        if str(vassal.id) in self.vassals:
            del self.vassals[str(vassal.id)]

        if os.path.exists(cfg_path):
            os.remove(cfg_path)

    def soft_restart_vassal(self, vassal):
        """Выполняет плавный перезапуск вассала.

        :param vassal: Перезапускаемый вассал
        :type vassal: Vassal
        """
        cfg_path = os.path.join(self.vassal_dir, "{}.ini".format(vassal.id))

        if os.path.exists(cfg_path):
            os.utime(cfg_path, None)

    def stats(self, vassal):
        """Возвращает статистику по указанному вассалу.

        :param vassal: Имя вассала
        :type vassal: str
        :returns: Статистика по вассалу
        :rtype: dict
        """
        for l in self.__stats__()["vassals"]:
            if l["id"] == "{}.ini".format(vassal):
                return l

        return {}

    def __stats__(self):
        """Возвращает внутреннюю статистику uwsgi-emperor.

        :returns: Словарь со статистикой
        :rtype: dict
        """
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect(("127.0.0.1", 1777))

        data = ""

        while True:
            new_data = s.recv(4096)
            if len(new_data) < 1:
                break
            data += new_data.decode('utf8')

        return json.loads(data)

    @coroutine
    def log_message(self, message):
        """Обрабатывает входящее сообщение uwsgi-emperor.

        :param message: Входящее сообщение ZeroMQ
        :type message: list
        """
        for m in (_.strip() for _ in message if _.strip()):
            data = logparse_emperor(m)

            if data.get("log_type") == "emperor_vassal_ready":
                vassal_id = data.get("vassal")
                if vassal_id in self.vassals:
                    self.vassals[vassal_id].status = "Running"
            elif data.get("log_type") == "emperor_vassal_removed":
                vassal_id = data.get("vassal")
                if vassal_id in self.vassals:
                    if self.vassals[vassal_id].status in ("Started", "Failed"):
                        self.vassals[vassal_id].status = "Failed"
                    else:
                        self.vassals[vassal_id].status = "Stopped"
Esempio n. 35
0
class UniWorker(object):
    """
    Implementation of "simple" ZeroMQ Paranoid Pirate communication scheme.  This class is the DEALER, and performs the
    "reply" in RPC calls.  By design, only supports one remote client (ROUTER) in order to keep example simple.
    Supports a very basic RPC interface, using MessagePack for encoding/decoding.
    """

    __metaclass__ = ABCMeta

    def __init__(self, endpoint, context=None):
        # type: (str, zmq.Context) -> None
        """
        Initialize the worker.
        :param endpoint: ZeroMQ endpoint to connect to.
        :param context: ZeroMQ Context
        """
        self._context = context or zmq.Context.instance()
        self._endpoint = endpoint
        self._stream = None  # type: Optional[ZMQStream]
        self._tmo = None
        self._need_handshake = True
        self._ticker = None  # type: Optional[PeriodicCallback]
        self._delayed_cb = None
        self._connected_event = Event()
        self._lock = Lock()

        self._create_stream()

        self._curr_liveness = HB_LIVENESS
        self._keep_running = True

    def _create_stream(self):
        # type: () -> None
        """
        Helper function to create the ZMQ stream, configure callbacks.
        """
        self.on_log_event("uniworker.connect", "Trying to connect to client")
        socket = self._context.socket(zmq.DEALER)

        self._stream = ZMQStream(socket, IOLoop())
        self._stream.on_recv(self._on_message)
        self._stream.socket.setsockopt(zmq.LINGER, 0)
        self._stream.connect(self._endpoint)

        self._ticker = PeriodicCallback(self._tick, HB_INTERVAL)
        self._send_ready()
        self._ticker.start()

    def run(self):
        # type: () -> None
        """
        Start the IOLoop, a blocking call to send/recv ZMQ messsages until the IOLoop is stopped.
        Note: The name of this function needs to stay the same so UniWorkerThread's run() is overridden with this function.
        """
        if self._keep_running:
            self._stream.io_loop.start()

    def stop(self):
        # type: () -> None
        """
        Stop the IOLoop.
        """
        with self._lock:
            self._keep_running = False
            if self._stream is not None:
                self._stream.io_loop.stop()
            else:
                logger.warning("Can't stop worker-has shutdown() been called?")

    def shutdown(self):
        # type: () -> None
        """
        Close the stream/socket.  This should be called with the final flag when closing the connection for the last time.
        """

        with self._lock:
            if self._ticker:
                self._ticker.stop()
                self._ticker = None
            if not self._stream:
                return

            self._stream.on_recv(None)
            self._send_disconnect()
            self._stream.close()
            self._stream = None
            self._need_handshake = True

    def wait_for_client(self, timeout):
        # type: (float) -> None
        """
        Wait for the worker to establish a connection with the remote client.
        Will return immediately if already connected.
        Typically, the worker provides a service/responds to requests, so this is really only used for unit testing.
        :param timeout: Max time, in seconds, to wait for the connection to establish.
        """
        event_status = self._connected_event.wait(timeout)
        if not event_status:
            raise LostRemoteError("No worker is connected.")

    def is_connected(self):
        # type: () -> bool
        """
        Returns whether worker is connected to a client.
        :return: A boolean flag to indicate whether a connection to a client is established.
        """
        return not self._need_handshake

    def send_reply(self, msg, partial=False, exception=False):
        # type: (Any, bool, bool) -> None
        """
        Send a ZeroMQ message in reply to a client request.
        This should only be called out of the overridden do_work method.

        :param msg: The message to be sent out.
        :param partial: Flag indicating whether the response is a partial or final ZMQ message.
        """

        msg = msgpack.Packer(default=XeroSerializer.encoder).pack(msg)
        if exception:
            to_send = [WORKER_EXCEPTION]
        elif partial:
            to_send = [WORKER_PARTIAL_REPLY]
        else:
            to_send = [WORKER_FINAL_REPLY]
        to_send.append(b'')
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)

        self._stream.send_multipart(to_send, track=True, copy=False)

    def emit(self, msg):
        # type: (Any) -> None
        if not self.is_connected():
            raise LostRemoteError("No client is connected.")
        msg = msgpack.Packer(default=XeroSerializer.encoder).pack(msg)
        to_send = [WORKER_EMIT]
        to_send.append(b'')
        if isinstance(msg, list):
            to_send.extend(msg)
        else:
            to_send.append(msg)
        self._stream.io_loop.add_callback(
            lambda x: self._stream.send_multipart(x, track=True, copy=False),
            to_send)

    def _tick(self):
        # type: () -> None
        """
        Periodic callback to check connectivity to client.
        """
        if self._curr_liveness >= 0:
            self._curr_liveness -= 1

        if self._curr_liveness > 0:
            self._send_heartbeat()
        elif self._curr_liveness == 0:
            # Connection died, close on our side.
            self.on_log_event(
                "uniworker.tick",
                "Connection to uniclient timed out, disconnecting")
            self._connected_event.clear()
        else:
            self._send_ready()

    def _send_heartbeat(self):
        # type: () -> None
        """
        Send a heartbeat message to the client.
        """
        # Heartbeats should go out immediately, if a lot of messages to be emitted are queued up heartbeats should
        # still be sent out regularly.  Therefore, send it out via the stream's socket, rather than the stream itself
        # See https://pyzmq.readthedocs.io/en/latest/eventloop.html#send
        self._stream.send_multipart([WORKER_HEARTBEAT])

    def _send_disconnect(self):
        # type: () -> None
        """
        Send a disconnect message to the client.
        """
        # Send out via the socket, this message takes priority.
        self._stream.send_multipart([WORKER_DISCONNECT])

    def _send_ready(self):
        # type: () -> None
        """
        Send a ready message to the client.
        """
        self.on_log_event("uniworker.ready", "Sending ready to client.")
        self._stream.send_multipart([WORKER_READY])

    def _on_message(self, msg):
        # type: (List[bytes]) -> None
        """
        Processes a received ZeroMQ message.
        :param msg: List of strings in the format:
            [ ZMQ Client ID, Header, StrMessagePart1, StrMessagePart2...]
        """

        # 2nd part is protocol version
        protocol_version = msg.pop(0)
        if protocol_version != UNI_CLIENT_HEADER:  # version check, ignore old versions
            logger.error(
                "Message doesn't start with {}".format(UNI_CLIENT_HEADER))
            return
        # 3rd part is message type
        msg_type = msg.pop(0)
        # any message resets the liveness counter
        self._need_handshake = False
        self._connected_event.set()
        self._curr_liveness = HB_LIVENESS
        if msg_type == WORKER_DISCONNECT:  # disconnect
            self._curr_liveness = 0  # reconnect will be triggered by hb timer
        elif msg_type == WORKER_REQUEST:  # request
            # remaining parts are the user message
            self._on_request(msg)
        elif msg_type == WORKER_HEARTBEAT:
            # received hardbeat - timer handled above
            pass
        else:
            logger.error("Uniworker received unrecognized message")

    def _on_request(self, message):
        # type: (List[bytes]) -> None
        """
        This gets called on incoming RPC messages, will break up the encoded message into something do_work() can process
        :param message: 
        """
        name = str(message[0], 'utf-8')
        args = msgpack.unpackb(message[1],
                               object_hook=XeroSerializer.decoder,
                               raw=False)
        kwargs = msgpack.unpackb(message[2],
                                 object_hook=XeroSerializer.decoder,
                                 raw=False)
        self.do_work(name, args, kwargs)

    def on_log_event(self, event, message):
        # type: (str, str) -> None
        """
        Called on internal loggable events.  Designed for override.
        :param event: The event type.
        :param message: Loggable message.
        """
        logger.debug("{}: {}".format(event, message))

    @abstractmethod
    def do_work(self, name, args, kwargs):
        # type: (str, List[Any], Dict[Any,Any]) -> None
        """
        Override this method for worker-specific message handling.
        :param name: The 'name' of the function/rpc call.
        :param args: Function call arguments.
        :param kwargs: Function call key arguments.
        """
        raise NotImplementedError()
Esempio n. 36
0
class MainHandler(websocket.WebSocketHandler):
    _first = True

    @property
    def ref(self):
        return id(self)

    def initialize(self):
        print 'Initializing tornado websocket'

        self.context = zmq.Context()
        self.sub_socket = self.context.socket(zmq.SUB)
        self.sub_socket.connect(PUB_ADDRESS)
        self.zmq_stream = ZMQStream(self.sub_socket)
        self.zmq_stream.on_recv(self.recv_func)

    def check_origin(self, origin):
        return True

    def open(self, *args, **kwargs):
        self.write_message("Open_Success")
        self.zmq_subscribe(args[0])

    def on_message(self, message):
        if self._first:
            msg = {'message': message, 'id': self.ref, 'action': 'connect'}
            print 'in if part - tornado server'
            self._first = False

        else:
            msg = {'message': message, 'id': self.ref, 'action': 'message'}
            print 'in else part - tornado server'
            print msg

        self.write_message(msg)

    def on_close(self):
        print("WebSocket closed")
        self.sub_socket.setsockopt(zmq.LINGER, 0)
        self.sub_socket.close()
        self.zmq_stream.close()
        self.context.term()

    def recv_func(self, message):
        #self.process_data(message)
        try:
            message = message[0]
            topic, messagedata = message.split(ZMQ_SEPARATOR)
            messagedata = json.loads(messagedata)
            # self.vip.pubsub.publish('pubsub',topic,message=messagedata)
            print topic, messagedata
            self.write_message({'message': messagedata, 'topic': topic})
        except Exception as er:
            print er
            pass

    def process_data(self, data):
        print data
        zmessage = {'topic': '', 'headers': {}, 'message': ''}
        for item in data:
            if '/agent/ui' in item or '/app/ui' in item:
                zmessage['topic'] = item
            elif 'Date' in str(item):
                mesg = json.loads(item)
                zmessage['headers'] = mesg
            else:
                if '[' in item:
                    item = eval(item)
                    print type(item)
                    item = item[0]
                    if item[0] == '{':
                        item = json.loads(item)
                    zmessage['message'] = item
                else:
                    zmessage['message'] = item

        self.write_message(zmessage)

    def zmq_subscribe(self, agent_id):
        #self.sub_socket.setsockopt(zmq.SUBSCRIBE, "")
        topicfilter = "to/ui/"
        self.sub_socket.setsockopt_string(zmq.SUBSCRIBE,
                                          "to/ui/from/" + agent_id)
Esempio n. 37
0
class BinaryStar(object):
    ctx = None  # Our private context
    loop = None  # Reactor loop
    statepub = None  # State publisher
    statesub = None  # State subscriber
    state = None  # Current state
    event = None  # Current event
    peer_expiry = 0  # When peer is considered 'dead'
    voter_callback = None  # Voting socket handler
    master_callback = None  # Call when become master
    slave_callback = None  # Call when become slave
    heartbeat = None  # PeriodicCallback for

    def __init__(self, primary, local, remote):
        # initialize the Binary Star
        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()
        self.state = STATE_PRIMARY if primary else STATE_BACKUP

        # Create publisher for state going to peer
        self.statepub = self.ctx.socket(zmq.PUB)
        self.statepub.bind(local)

        # Create subscriber for state coming from peer
        self.statesub = self.ctx.socket(zmq.SUB)
        self.statesub.setsockopt(zmq.SUBSCRIBE, '')
        self.statesub.connect(remote)

        # wrap statesub in ZMQStream for event triggers
        self.statesub = ZMQStream(self.statesub, self.loop)

        # setup basic reactor events
        self.heartbeat = PeriodicCallback(self.send_state, HEARTBEAT,
                                          self.loop)
        self.statesub.on_recv(self.recv_state)

        # setup log formmater

    def update_peer_expiry(self):
        """Update peer expiry time to be 2 heartbeats from now."""
        self.peer_expiry = time.time() + 2e-3 * HEARTBEAT

    def start(self):
        self.update_peer_expiry()
        self.heartbeat.start()
        return self.loop.start()

    def execute_fsm(self):
        """Binary Star finite state machine (applies event to state)

        returns True if connections should be accepted, False otherwise.
        """
        accept = True
        if (self.state == STATE_PRIMARY):
            # Primary server is waiting for peer to connect
            # Accepts CLIENT_REQUEST events in this state
            if (self.event == PEER_BACKUP):
                print("I: connected to backup (slave), ready as master")
                self.state = STATE_ACTIVE
                if (self.master_callback):
                    self.loop.add_callback(self.master_callback)
            elif (self.event == PEER_ACTIVE):
                print("I: connected to backup (master), ready as slave")
                self.state = STATE_PASSIVE
                if (self.slave_callback):
                    self.loop.add_callback(self.slave_callback)
            elif (self.event == CLIENT_REQUEST):
                if (time.time() >= self.peer_expiry):
                    print("I: request from client, ready as master")
                    self.state = STATE_ACTIVE
                    if (self.master_callback):
                        self.loop.add_callback(self.master_callback)
                else:
                    # don't respond to clients yet - we don't know if
                    # the backup is currently Active as a result of
                    # a successful failover
                    accept = False
        elif (self.state == STATE_BACKUP):
            # Backup server is waiting for peer to connect
            # Rejects CLIENT_REQUEST events in this state
            if (self.event == PEER_ACTIVE):
                print("I: connected to primary (master), ready as slave")
                self.state = STATE_PASSIVE
                if (self.slave_callback):
                    self.loop.add_callback(self.slave_callback)
            elif (self.event == CLIENT_REQUEST):
                accept = False
        elif (self.state == STATE_ACTIVE):
            # Server is active
            # Accepts CLIENT_REQUEST events in this state
            # The only way out of ACTIVE is death
            if (self.event == PEER_ACTIVE):
                # Two masters would mean split-brain
                print("E: fatal error - dual masters, aborting")
                raise FSMError("Dual Masters")
        elif (self.state == STATE_PASSIVE):
            # Server is passive
            # CLIENT_REQUEST events can trigger failover if peer looks dead
            if (self.event == PEER_PRIMARY):
                # Peer is restarting - become active, peer will go passive
                print("I: primary (slave) is restarting, ready as master")
                self.state = STATE_ACTIVE
            elif (self.event == PEER_BACKUP):
                # Peer is restarting - become active, peer will go passive
                print("I: backup (slave) is restarting, ready as master")
                self.state = STATE_ACTIVE
            elif (self.event == PEER_PASSIVE):
                # Two passives would mean cluster would be non-responsive
                print("E: fatal error - dual slaves, aborting")
                raise FSMError("Dual slaves")
            elif (self.event == CLIENT_REQUEST):
                # Peer becomes master if timeout has passed
                # It's the client request that triggers the failover
                assert (self.peer_expiry > 0)
                if (time.time() >= self.peer_expiry):
                    # If peer is dead, switch to the active state
                    print("I: failover successful, ready as master")
                    self.state = STATE_ACTIVE
                else:
                    # If peer is alive, reject connections
                    accept = False
            # Call state change handler if necessary
            if (self.state == STATE_ACTIVE and self.master_callback):
                self.loop.add_callback(self.master_callback)
        return accept

    # ---------------------------------------------------------------------
    # Reactor event handlers...

    def send_state(self):
        """Publish our state to peer"""
        self.statepub.send("%d" % self.state)

    def recv_state(self, msg):
        """Receive state from peer, execute finite state machine"""
        state = msg[0]
        if state:
            self.event = int(state)
            self.update_peer_expiry()
        self.execute_fsm()

    def voter_ready(self, msg):
        """Application wants to speak to us, see if it's possible"""
        # If server can accept input now, call appl handler
        self.event = CLIENT_REQUEST
        if self.execute_fsm():
            print "CLIENT REQUEST"
            self.voter_callback(self.voter_socket, msg)
        else:
            # Message will be ignored
            pass

    # -------------------------------------------------------------------------
    #

    def register_voter(self, endpoint, type, handler):
        """Create socket, bind to local endpoint, and register as reader for
        voting. The socket will only be available if the Binary Star state
        machine allows it. Input on the socket will act as a "vote" in the
        Binary Star scheme.  We require exactly one voter per bstar instance.

        handler will always be called with two arguments: (socket,msg)
        where socket is the one we are creating here, and msg is the message
        that triggered the POLLIN event.
        """
        assert self.voter_callback is None

        socket = self.ctx.socket(type)
        socket.bind(endpoint)
        self.voter_socket = socket
        self.voter_callback = handler

        stream = ZMQStream(socket, self.loop)
        stream.on_recv(self.voter_ready)
Esempio n. 38
0
class IOPubThread(object):
    """An object for sending IOPub messages in a background thread
    
    prevents a blocking main thread
    
    IOPubThread(pub_socket).background_socket is a Socket-API-providing object
    whose IO is always run in a thread.
    """
    def __init__(self, socket, pipe=False):
        self.socket = socket
        self.background_socket = BackgroundSocket(self)
        self._master_pid = os.getpid()
        self._pipe_flag = pipe
        self.io_loop = IOLoop()
        if pipe:
            self._setup_pipe_in()
        self.thread = threading.Thread(target=self._thread_main)
        self.thread.daemon = True

    def _thread_main(self):
        """The inner loop that's actually run in a thread"""
        self.io_loop.start()
        self.io_loop.close()

    def _setup_pipe_in(self):
        """setup listening pipe for subprocesses"""
        ctx = self.socket.context

        # use UUID to authenticate pipe messages
        self._pipe_uuid = uuid.uuid4().bytes

        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0
        try:
            self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
        except zmq.ZMQError as e:
            warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
                          "\nsubprocess output will be unavailable.")
            self._pipe_flag = False
            pipe_in.close()
            return
        self._pipe_in = ZMQStream(pipe_in, self.io_loop)
        self._pipe_in.on_recv(self._handle_pipe_msg)

    def _handle_pipe_msg(self, msg):
        """handle a pipe message from a subprocess"""
        if not self._pipe_flag or not self._is_master_process():
            return
        if msg[0] != self._pipe_uuid:
            print("Bad pipe message: %s", msg, file=sys.__stderr__)
            return
        self.send_multipart(msg[1:])

    def _setup_pipe_out(self):
        # must be new context after fork
        ctx = zmq.Context()
        pipe_out = ctx.socket(zmq.PUSH)
        pipe_out.linger = 3000  # 3s timeout for pipe_out sends before discarding the message
        pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
        return ctx, pipe_out

    def _is_master_process(self):
        return os.getpid() == self._master_pid

    def _check_mp_mode(self):
        """check for forks, and switch to zmq pipeline if necessary"""
        if not self._pipe_flag or self._is_master_process():
            return MASTER
        else:
            return CHILD

    def start(self):
        """Start the IOPub thread"""
        self.thread.start()
        # make sure we don't prevent process exit
        # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
        atexit.register(self.stop)

    def stop(self):
        """Stop the IOPub thread"""
        if not self.thread.is_alive():
            return
        self.io_loop.add_callback(self.io_loop.stop)
        self.thread.join()

    def close(self):
        self.socket.close()
        self.socket = None

    @property
    def closed(self):
        return self.socket is None

    def send_multipart(self, *args, **kwargs):
        """send_multipart schedules actual zmq send in my thread.
        
        If my thread isn't running (e.g. forked process), send immediately.
        """

        if self.thread.is_alive():
            self.io_loop.add_callback(
                lambda: self._really_send(*args, **kwargs))
        else:
            self._really_send(*args, **kwargs)

    def _really_send(self, msg, *args, **kwargs):
        """The callback that actually sends messages"""
        mp_mode = self._check_mp_mode()

        if mp_mode != CHILD:
            # we are master, do a regular send
            self.socket.send_multipart(msg, *args, **kwargs)
        else:
            # we are a child, pipe to master
            # new context/socket for every pipe-out
            # since forks don't teardown politely, use ctx.term to ensure send has completed
            ctx, pipe_out = self._setup_pipe_out()
            pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
            pipe_out.close()
            ctx.term()
class Test_MDPWorker(unittest.TestCase):

    endpoint = b'tcp://127.0.0.1:7777'
    service = b'test'

    def setUp(self):
        print 'set up'
        sys.stdout.flush()
        self.context = zmq.Context()
        self.broker = None
        self._msgs = []
        return

    def tearDown(self):
        print 'tear down'
        sys.stdout.flush()
        if self.broker:
            self._stop_broker()
        self.broker = None
        ##         self.context.term()
        self.context = None
        return

    def _on_msg(self, msg):
        if _do_print:
            print 'broker received:',
            pprint(msg)
        self.target = msg.pop(0)
        if msg[1] == chr(1):  # ready
            print 'READY'
            self.target = msg[0]
            return
        if msg[1] == chr(4):  # ready
            print 'HB'
            return
        if msg[1] == chr(3):  # reply
            IOLoop.instance().stop()
            return
        return

    def _start_broker(self, do_reply=False):
        """Helper activating a fake broker in the ioloop.
        """
        socket = self.context.socket(zmq.XREP)
        self.broker = ZMQStream(socket)
        self.broker.socket.setsockopt(zmq.LINGER, 0)
        self.broker.bind(self.endpoint)
        self.broker.on_recv(self._on_msg)
        self.broker.do_reply = do_reply
        self.broker.ticker = PeriodicCallback(self._tick, MyWorker.HB_INTERVAL)
        self.broker.ticker.start()
        self.target = None
        return

    def _stop_broker(self):
        if self.broker:
            self.broker.ticker.stop()
            self.broker.ticker = None
            self.broker.socket.close()
            self.broker.close()
            self.broker = None
        return

    def _tick(self):
        if self.broker and self.target:
            msg = [self.target, b'MPDW01', chr(4)]
            self.broker.send_multipart(msg)
        return

    def send_req(self):
        data = ['AA', 'bb']
        msg = [self.target, b'MPDW01', chr(2), self.target, b''] + data
        print 'borker sending:',
        pprint(msg)
        self.broker.send_multipart(msg)
        return

    # tests follow

    def test_01_simple_01(self):
        """Test MDPWorker simple req/reply.
        """
        self._start_broker()
        time.sleep(0.2)
        worker = MyWorker(self.context, self.endpoint, self.service)
        sender = DelayedCallback(self.send_req, 1000)
        sender.start()
        IOLoop.instance().start()
        worker.shutdown()
        self._stop_broker()
        return
Esempio n. 40
0
class MarketDataSubscriber(object):
    """" MarketDataSubscriber. """
    @classmethod
    def instance(cls):
        if not hasattr(cls, "_instance"):
            cls._instance = cls()
        return cls._instance

    def __init__(self, symbol="ALL", application=None):
        self.symbol = str(symbol)
        self.buy_side = []
        self.sell_side = []
        self.volume_dict = {}
        self.inst_status = InstrumentStatusHelper(symbol)
        self.md_pub_socket = None
        self.md_pub_socket_stream = None
        self.is_ready = False
        self.process_later = []
        self.application = application
        self.db_session = application.db_session

    def subscribe(self, zmq_context, trade_pub_connection_string,
                  trade_client):
        """" subscribe. """
        self.md_pub_socket = zmq_context.socket(zmq.SUB)
        self.md_pub_socket.connect(trade_pub_connection_string)
        self.md_pub_socket.setsockopt(zmq.SUBSCRIBE,
                                      "^MD_FULL_REFRESH_" + self.symbol + '$')
        self.md_pub_socket.setsockopt(zmq.SUBSCRIBE,
                                      "^MD_TRADE_" + self.symbol + '$')
        self.md_pub_socket.setsockopt(zmq.SUBSCRIBE,
                                      "^MD_INCREMENTAL_" + self.symbol + ".0$")
        self.md_pub_socket.setsockopt(zmq.SUBSCRIBE,
                                      "^MD_INCREMENTAL_" + self.symbol + ".1$")

        self.md_pub_socket_stream = ZMQStream(self.md_pub_socket)
        self.md_pub_socket_stream.on_recv(self.on_md_publish)

        md_subscription_msg = {
            'MsgType': 'V',
            'MDReqID': '0',  # not important.
            'SubscriptionRequestType': '0',
            'MarketDepth': 0,
            'TradeDate': time.strftime("%Y%m%d", time.localtime()),
            'MDUpdateType': '0',
            'MDEntryTypes': ['0', '1', '2'],
            'Instruments': [self.symbol]
        }

        self.application.log('DEBUG', 'MARKET_DATA_SUBSCRIBER', 'SUBSCRIBE')

        return trade_client.sendJSON(md_subscription_msg)

    def ready(self):
        self.is_ready = True
        for trade in self.process_later:
            self.on_trade(trade)

        self.process_later = []

    @staticmethod
    def get(symbol, application=None):
        """" get. """
        global MDSUBSCRIBEDICT
        if symbol not in MDSUBSCRIBEDICT:
            MDSUBSCRIBEDICT[symbol] = MarketDataSubscriber(symbol, application)
        return MDSUBSCRIBEDICT[symbol]

    def get_last_trades(self):
        """" get_last_trades. """
        return Trade.get_last_trades(self.db_session)

    def get_trades(self, symbol, since):
        """" get_trades. """
        return Trade.get_trades(self.db_session, symbol, since)

    def on_md_publish(self, publish_msg):
        """" on_md_publish. """
        start = datetime.datetime.now()

        topic = publish_msg[0]
        raw_message = publish_msg[1]

        msg = JsonMessage(raw_message)

        if msg.type == 'W':  # Full Refresh
            self.on_md_full_refresh(msg)

        elif msg.type == 'X':  # Incremental
            self.on_md_incremental(msg)

        finish = datetime.datetime.now()
        self.application.log(
            "DEBUG", "PERF",
            str([(finish - start).total_seconds(),
                 "MarketDataSubscriber.on_md_publish", "1",
                 [topic, raw_message]]))

    def on_md_full_refresh(self, msg):
        """" on_md_full_refresh. """
        # TODO: Check if our current order book is sync with the full refresh
        if msg.get('MarketDepth') != 1:  # Has Market Depth
            self.on_book_clear()
            self.on_trade_clear()

            group = msg.get('MDFullGrp')
            for entry in group:
                entry_type = entry.get('MDEntryType')

                if entry_type == '0' or entry_type == '1':
                    self.on_book_new_order(entry)
                elif entry_type == '2':
                    self.on_trade(entry)

    def on_md_incremental(self, msg):
        """" on_md_incremental. """
        if msg.get('MDBkTyp') == '3':  # Order Depth
            group = msg.get('MDIncGrp')

            for entry in group:
                entry_type = entry.get('MDEntryType')

                signal_order_depth_entry(self.symbol + '.3.' + entry_type,
                                         entry)

                if entry_type == '0' or entry_type == '1':
                    update_action = entry.get('MDUpdateAction')
                    if update_action == '0':
                        self.on_book_new_order(entry)
                    elif update_action == '1':
                        self.on_book_update_order(entry)
                    elif update_action == '2':
                        self.on_book_delete_order(entry)
                    elif update_action == '3':
                        self.on_book_delete_orders_thru(entry)
                elif entry_type == '2':
                    self.on_trade(entry)
            signal_publish_md_order_depth_incremental(self.symbol + '.3',
                                                      {"MsgType": "X"})

    def on_book_clear(self):
        """" on_book_clear. """
        self.buy_side = []
        self.sell_side = []

    def on_trade_clear(self):
        """" on_trade_clear. """
        self.volume_dict = {}

    def on_book_delete_orders_thru(self, msg):
        """" on_book_delete_orders_thru. """
        index = msg.get('MDEntryPositionNo')
        side = msg.get('MDEntryType')
        if side == '0':
            self.buy_side = self.buy_side[index:]

            if self.buy_side:
                self.inst_status.set_best_bid(self.buy_side[0]['price'])
            else:
                self.inst_status.set_best_bid(None)

        elif side == '1':
            self.sell_side = self.sell_side[index:]

            if self.sell_side:
                self.inst_status.set_best_ask(self.sell_side[0]['price'])
            else:
                self.inst_status.set_best_ask(None)

    def on_book_delete_order(self, msg):
        """" on_book_delete_order. """
        index = msg.get('MDEntryPositionNo') - 1
        side = msg.get('MDEntryType')

        if side == '0':
            del self.buy_side[index]
            if index == 0:
                if self.buy_side:
                    self.inst_status.set_best_bid(self.buy_side[0]['price'])
                else:
                    self.inst_status.set_best_bid(None)

        elif side == '1':
            del self.sell_side[index]
            if index == 0:
                if self.sell_side:
                    self.inst_status.set_best_ask(self.sell_side[0]['price'])
                else:
                    self.inst_status.set_best_ask(None)

    def on_book_new_order(self, msg):
        """" on_book_new_order. """
        index = msg.get('MDEntryPositionNo') - 1
        order = {
            'price': msg.get('MDEntryPx'),
            'qty': msg.get('MDEntrySize'),
            'username': msg.get('Username'),
            'user_id': msg.get('UserID'),
            'broker': msg.get('Broker'),
            'order_id': msg.get('OrderID'),
            'side': msg.get('MDEntryType'),
            'order_time': msg.get('MDEntryTime'),
            'order_date': msg.get('MDEntryDate')
        }

        if msg.get('MDEntryType') == '0':  # buy
            self.buy_side.insert(index, order)
            if index == 0:
                self.inst_status.set_best_bid(msg.get('MDEntryPx'))

        elif msg.get('MDEntryType') == '1':  # sell
            self.sell_side.insert(index, order)
            if index == 0:
                self.inst_status.set_best_ask(msg.get('MDEntryPx'))

    def on_book_update_order(self, msg):
        """" on_book_new_order. """
        index = msg.get('MDEntryPositionNo') - 1
        order = {
            'price': msg.get('MDEntryPx'),
            'qty': msg.get('MDEntrySize'),
            'username': msg.get('Username'),
            'user_id': msg.get('UserID'),
            'broker': msg.get('Broker'),
            'order_id': msg.get('OrderID'),
            'side': msg.get('MDEntryType'),
            'order_time': msg.get('MDEntryTime'),
            'order_date': msg.get('MDEntryDate')
        }
        if msg.get('MDEntryType') == '0':  # sell
            self.buy_side[index] = order
            if index == 0:
                self.inst_status.set_best_bid(msg.get('MDEntryPx'))

        elif msg.get('MDEntryType') == '1':  # sell
            self.sell_side[index] = order
            if index == 0:
                self.inst_status.set_best_ask(msg.get('MDEntryPx'))

    def on_trade(self, msg):
        if not self.is_ready:
            self.process_later.append(msg)
            return

        trade = {
            "price": msg.get('MDEntryPx'),
            "symbol": msg.get('Symbol'),
            "size": msg.get('MDEntrySize'),
            "trade_date": msg.get('MDEntryDate'),
            "trade_time": msg.get('MDEntryTime'),
            "order_id": msg.get('OrderID'),
            "side": msg.get('Side'),
            "counter_order_id": msg.get('SecondaryOrderID'),
            "id": msg.get('TradeID'),
            "buyer_id": msg.get('MDEntryBuyerID'),
            "seller_id": msg.get('MDEntrySellerID'),
            "buyer_username": msg.get('MDEntryBuyer'),
            "seller_username": msg.get('MDEntrySeller'),
        }

        Trade.create(self.db_session, trade)

        # BTC BRL
        price_currency = self.symbol[3:]
        size_currency = self.symbol[:3]
        if price_currency not in self.volume_dict:
            self.volume_dict[price_currency] = 0
        if size_currency not in self.volume_dict:
            self.volume_dict[size_currency] = 0

        volume_price = int(
            msg.get('MDEntryPx') * msg.get('MDEntrySize') / 1.e8)

        volume_size = msg.get('MDEntrySize')
        self.volume_dict[price_currency] += volume_price
        self.volume_dict[size_currency] += volume_size

        self.volume_dict['MDEntryType'] = '4'
        signal_publish_md_status('MD_STATUS', self.volume_dict)

        self.inst_status.push_trade(trade)
Esempio n. 41
0
class MDPBroker(object):

    """The MDP broker class.

    The broker routes messages from clients to appropriate workers based on the
    requested service.

    This base class defines the overall functionality and the API. Subclasses are
    meant to implement additional features (like logging).

    The broker uses ZMQ ROUTER sockets to deal with clients and workers. These sockets
    are wrapped in pyzmq streams to fit well into IOLoop.

    .. note::

      The workers will *always* be served by the `main_ep` endpoint.

      In a two-endpoint setup clients will be handled via the `opt_ep`
      endpoint.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param main_ep:    the primary endpoint for workers.
    :type main_ep:     str
    :param client_ep:  the clients endpoint
    :type client_ep:   str
    :param hb_ep:      the heart beat endpoint for workers.
    :type hb_ep:       str
    :param service_q:  the class to be used for the service worker-queue.
    :type service_q:   class
    """

    CLIENT_PROTO = C_CLIENT  #: Client protocol identifier
    WORKER_PROTO = W_WORKER  #: Worker protocol identifier


    def __init__(self, context, main_ep, client_ep, hb_ep, service_q=None):
        """Init MDPBroker instance.
        """

        if service_q is None:
            self.service_q = ServiceQueue
        else:
            self.service_q = service_q

        #
        # Setup the zmq sockets.
        #
        socket = context.socket(zmq.ROUTER)
        socket.bind(main_ep)
        self.main_stream = ZMQStream(socket)
        self.main_stream.on_recv(self.on_message)

        socket = context.socket(zmq.ROUTER)
        socket.bind(client_ep)
        self.client_stream = ZMQStream(socket)
        self.client_stream.on_recv(self.on_message)

        socket = context.socket(zmq.ROUTER)
        socket.bind(hb_ep)
        self.hb_stream = ZMQStream(socket)
        self.hb_stream.on_recv(self.on_message)

        self._workers = {}

        #
        # services contain the service queue and the request queue
        #
        self._services = {}

        #
        # Mapping of worker commands and callbacks.
        #
        self._worker_cmds = {
            W_READY: self.on_ready,
            W_REPLY: self.on_reply,
            W_HEARTBEAT: self.on_heartbeat,
            W_DISCONNECT: self.on_disconnect,
        }

        #
        # 'Cleanup' timer for workers without heartbeat.
        #
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """

        if wid in self._workers:
            logging.info('Worker {} already registered'.format(service))
            return

        logging.info('Registering new worker {}'.format(service))

        self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service, self.main_stream)

        if service in self._services:
            wq, wr = self._services[service]
            wq.put(wid)
        else:
            q = self.service_q()
            q.put(wid)
            self._services[service] = (q, [])

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """

        try:
            wrep = self._workers[wid]
        except KeyError:
            #
            # Not registered, ignore
            #
            return

        logging.info('Unregistering worker {}'.format(wrep.service))

        wrep.shutdown()

        service = wrep.service
        if service in self._services:
            wq, wr = self._services[service]
            wq.remove(wid)

        del self._workers[wid]

    def disconnect(self, wid):
        """Send disconnect command and unregister worker.

        If the worker id is not registered, nothing happens.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """

        try:
            wrep = self._workers[wid]
        except KeyError:
            #
            # Not registered, ignore
            #
            return

        logging.info('Disconnecting worker {}'.format(wrep.service))

        to_send = [wid, self.WORKER_PROTO, W_DISCONNECT]
        self.main_stream.send_multipart(to_send)

        self.unregister_worker(wid)

    def client_response(self, rp, service, msg):
        """Package and send reply to client.

        :param rp:       return address stack
        :type rp:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str

        :rtype: None
        """

        if service == MMI_SERVICE:
            logging.debug('Send reply to client from worker {}'.format(service))
        else:
            logging.info('Send reply to client from worker {}'.format(service))

        to_send = rp[:]
        to_send.extend([EMPTY_FRAME, self.CLIENT_PROTO, service])
        to_send.extend(msg)
        self.client_stream.send_multipart(to_send)

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """

        logging.debug('Shutting down')

        self.main_stream.on_recv(None)
        self.main_stream.socket.setsockopt(zmq.LINGER, 0)
        self.main_stream.socket.close()
        self.main_stream.close()
        self.main_stream = None

        self.client_stream.on_recv(None)
        self.client_stream.socket.setsockopt(zmq.LINGER, 0)
        self.client_stream.socket.close()
        self.client_stream.close()
        self.client_stream = None

        self.hb_stream.on_recv(None)
        self.hb_stream.socket.setsockopt(zmq.LINGER, 0)
        self.hb_stream.socket.close()
        self.hb_stream.close()
        self.hb_stream = None

        self._workers = {}
        self._services = {}

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """

        #
        #  Remove 'dead' (not responding to heartbeats) workers.
        #
        for wrep in self._workers.values():
            if not wrep.is_alive():
                self.unregister_worker(wrep.id)

    def on_ready(self, rp, msg):
        """Process worker READY command.

        Registers the worker for a service.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        ret_id = rp[0]
        logging.debug('Worker sent ready msg: {} ,{}'.format(rp, msg))
        self.register_worker(ret_id, msg[0])

    def on_reply(self, rp, msg):
        """Process worker REPLY command.

        Route the `msg` to the client given by the address(es) in front of `msg`.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        ret_id = rp[0]
        wrep = self._workers.get(ret_id)

        if not wrep:
            #
            # worker not found, ignore message
            #
            logging.error(
                "Worker with return id {} not found. Ignore message.".format(
                    ret_id))
            return

        service = wrep.service
        logging.info("Worker {} sent reply.".format(service))

        try:
            wq, wr = self._services[service]

            #
            # Send response to client
            #
            cp, msg = split_address(msg)
            self.client_response(cp, service, msg)

            #
            # make worker available again
            #
            wq.put(wrep.id)

            if wr:
                logging.info("Sending queued message to worker {}".format(service))
                proto, rp, msg = wr.pop(0)
                self.on_client(proto, rp, msg)
        except KeyError:
            #
            # unknown service
            #
            self.disconnect(ret_id)

    def on_heartbeat(self, rp, msg):
        """Process worker HEARTBEAT command.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        #
        # Note:
        # The modified heartbeat of the worker is sent over a separate socket
        # stream (self.hb_stream). Therefore the ret_id is wrong. Instead the
        # worker sends its id in the message.
        #
        if len(msg) > 0:
            ret_id = msg[0]
        else:
            ret_id = rp[0]

        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            #
            # Ignore HB for unknown worker
            #
            pass

    def on_disconnect(self, rp, msg):
        """Process worker DISCONNECT command.

        Unregisters the worker who sent this message.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        wid = rp[0]
        self.unregister_worker(wid)

    def on_mmi(self, rp, service, msg):
        """Process MMI request.

        mmi.service is used for querying if a specific service is available.
        mmi.services is used for querying the list of services available.

        :param rp:      return address stack
        :type rp:       list of str
        :param service: the protocol id sent
        :type service:  str
        :param msg:     message parts
        :type msg:      list of str

        :rtype: None
        """

        if service == MMI_SERVICE:
            s = msg[0]
            ret = [UNKNOWN_SERVICE]

            for wr in self._workers.values():
                if s == wr.service:
                    ret = [KNOWN_SERVICE]
                    break

        elif service == MMI_SERVICES:
            #
            # Return list of services
            #
            ret = [wr.service for wr in self._workers.values()]

        elif service == MMI_TUNNELS:
            #
            # Read the tunnel files, and send back the network info.
            #
            tunnel_paths = glob.glob(os.path.expanduser("~/tunnel_port_*.txt"))
            tunnels_data = {}
            for path in tunnel_paths:
                filename = os.path.split(path)[-1]
                service_name = filename[-7:-4]
                with open(path, 'r') as f:
                    tunnels_data[service_name] = json.load(f)
            ret = [cPickle.dumps(tunnels_data)]
        else:
            #
            # Unknown command.
            #
            ret = [UNKNOWN_COMMAND]

        self.client_response(rp, service, ret)

    def on_client(self, proto, rp, msg):
        """Method called on client message.

        Frame 0 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        .. note::

           If currently no worker is available for a known service,
           the message is queued for later delivery.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker. The worker in
        question is removed from the pool of available workers.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:    return address stack
        :type rp:     list of str
        :param msg:   message parts
        :type msg:    list of str

        :rtype: None
        """

        service = msg.pop(0)

        if service.startswith(b'mmi.'):
            logging.debug("Got MMI message from client.")
            self.on_mmi(rp, service, msg)
            return

        logging.info("Client sends message (possibly queued) to worker {}".format(service))

        try:
            wq, wr = self._services[service]
            wid = wq.get()

            if not wid:
                #
                # No worker ready. Queue message
                #
                logging.info("Worker {} missing. Queuing message.".format(service))
                msg.insert(0, service)
                wr.append((proto, rp, msg))
                return

            wrep = self._workers[wid]
            to_send = [wrep.id, EMPTY_FRAME, self.WORKER_PROTO, W_REQUEST]
            to_send.extend(rp)
            to_send.append(EMPTY_FRAME)
            to_send.extend(msg)
            self.main_stream.send_multipart(to_send)

        except KeyError:
            #
            # Unknwon service. Ignore request
            #
            logging.info('broker has no service "{}"'.format(service))

    def on_worker(self, proto, rp, msg):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        cmd = msg.pop(0)
        if cmd in self._worker_cmds:
            fnc = self._worker_cmds[cmd]
            fnc(rp, msg)
        else:
            #
            # Ignore unknown command. Disconnect worker.
            #
            logging.error("Unknown worker command: {}".format(cmd))
            self.disconnect(rp[0])

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """

        rp, msg = split_address(msg)

        try:
            #
            # Dispatch on first frame after path
            #
            t = msg.pop(0)
            if t.startswith(b'MDPW'):
                logging.debug('Recieved message from worker {}'.format(rp))
                self.on_worker(t, rp, msg)
            elif t.startswith(b'MDPC'):
                logging.debug('Recieved message from client {}'.format(rp))
                self.on_client(t, rp, msg)
            else:
                logging.error('Broker unknown Protocol: "{}"'.format(t))
        except:
            logging.error(
                "An error occured while trying to process message: rp: {}, msg: {}\n{}".format(
                    rp, msg, traceback.format_exc()
                )
            )
Esempio n. 42
0
class ZmqSubscriber(object):
    def __init__(self,
                 moduleName,
                 centralHost=SUBSCRIBER_OPT_DEFAULTS['centralHost'],
                 context=None,
                 centralPublishEndpoint=SUBSCRIBER_OPT_DEFAULTS[
                     'centralPublishEndpoint'],
                 replay=None):
        self.moduleName = moduleName
        self.centralHost = centralHost

        if context is None:
            context = zmq.Context.instance()
        self.context = context

        self.centralPublishEndpoint = parseEndpoint(
            centralPublishEndpoint,
            defaultPort=DEFAULT_CENTRAL_PUBLISH_PORT,
            centralHost=self.centralHost)
        self.replayPaths = replay
        if self.replayPaths is None:
            self.replayPaths = []

        self.handlers = {}
        self.counter = 0
        self.deserializer = serializers.get_deserializer('json')
        self.stream = None

    @classmethod
    def addOptions(cls, parser, defaultModuleName):
        if not parser.has_option('--centralHost'):
            parser.add_option('--centralHost',
                              default=SUBSCRIBER_OPT_DEFAULTS['centralHost'],
                              help='Host where central runs [%default]')
        if not parser.has_option('--moduleName'):
            parser.add_option('--moduleName',
                              default=defaultModuleName,
                              help='Name to use for this module [%default]')
        if not parser.has_option('--centralPublishEndpoint'):
            parser.add_option(
                '--centralPublishEndpoint',
                default=SUBSCRIBER_OPT_DEFAULTS['centralPublishEndpoint'],
                help='Endpoint where central publishes messages [%default]')
        if not parser.has_option('--replay'):
            parser.add_option(
                '--replay',
                action='append',
                help=
                'Replay specified message log (can specify multiple times), or use - to read from stdin'
            )

    @classmethod
    def getOptionValues(cls, opts):
        result = {}
        for key in SUBSCRIBER_OPT_DEFAULTS.iterkeys():
            val = getattr(opts, key, None)
            if val is not None:
                result[key] = val
        return result

    def start(self):
        sock = self.context.socket(zmq.SUB)
        self.stream = ZMQStream(sock)
        # causes problems with multiple instances
        #self.stream.setsockopt(zmq.IDENTITY, self.moduleName)
        self.stream.connect(self.centralPublishEndpoint)
        logging.info('zmq.subscriber: connected to central at %s',
                     self.centralPublishEndpoint)
        self.stream.on_recv(self.routeMessages)

    def routeMessages(self, messages):
        for msg in messages:
            self.routeMessage(msg)

    def routeMessage(self, msg):
        colonIndex = msg.find(':')
        topic = msg[:(colonIndex + 1)]
        body = msg[(colonIndex + 1):]

        handled = 0
        for topicPrefix, registry in self.handlers.iteritems():
            if topic.startswith(topicPrefix):
                for handler in registry.itervalues():
                    handler(topic[:-1], body)
                    handled = 1

        return handled

    def subscribeRaw(self, topicPrefix, handler):
        topicRegistry = self.handlers.setdefault(topicPrefix, {})
        if not topicRegistry:
            logging.info('zmq.subscriber: subscribe %s', topicPrefix)
            self.stream.setsockopt(zmq.SUBSCRIBE, topicPrefix)
        handlerId = (topicPrefix, self.counter)
        topicRegistry[self.counter] = handler
        self.counter += 1
        return handlerId

    def subscribeJson(self, topicPrefix, handler):
        def jsonHandler(topicPrefix, body):
            return handler(topicPrefix,
                           convertToDotDictRecurse(json.loads(body)))

        return self.subscribeRaw(topicPrefix, jsonHandler)

    def subscribeDjango(self, topicPrefix, handler):
        def djangoHandler(topicPrefix, body):
            obj = json.loads(body)
            dataText = json.dumps([obj['data']])
            modelInstance = list(self.deserializer(dataText))[0]
            return handler(topicPrefix, modelInstance.object)

        return self.subscribeRaw(topicPrefix, djangoHandler)

    def unsubscribe(self, handlerId):
        topicPrefix, index = handlerId
        topicRegistry = self.handlers[topicPrefix]
        del topicRegistry[index]
        if not topicRegistry:
            logging.info('zmq.subscriber: unsubscribe %s', topicPrefix)
            self.stream.setsockopt(zmq.UNSUBSCRIBE, topicPrefix)

    def connect(self, endpoint):
        self.stream.connect(endpoint)

    def replay(self):
        numReplayed = 0
        numHandled = 0
        for replayPath in self.replayPaths:
            print '=== replaying messages from %s' % replayPath
            if replayPath == '-':
                replayFile = sys.stdin
            else:
                replayFile = open(replayPath, 'rb')
            stream = LogParser(replayFile)
            for rec in stream:
                numReplayed += 1
                numHandled += self.routeMessage(rec.msg)

                if numReplayed % 10000 == 0:
                    print 'replayed %d messages, %d handled' % (numReplayed,
                                                                numHandled)
Esempio n. 43
0
result_dict = {}


def update_result(msg_list):
    """
    for collecting the result
    msg_list - the list containing the result.

    PS: loop is necessary, as it may get more than one result.
    """
    for msg in msg_list:
        m = json.loads(msg)
        result_dict[m['uuid']] = m

# registering the callback
zstream_pull.on_recv(update_result)


class MDHandler(tornado.web.RequestHandler):
    reult = None

    @asynchronous
    @gen.engine
    def post(self):
        # key for this task
        self.result_key = str(uuid())
        self.result = None

        # pack the task
        src = self.get_argument('src', None)
        msg = {
Esempio n. 44
0
class MDPClient(object):

    """Class for the MDP client side.

    Thin asynchronous encapsulation of a zmq.REQ socket.
    Provides a :func:`request` method with optional timeout.

    Objects of this class are ment to be integrated into the
    asynchronous IOLoop of pyzmq.

    :param context:  the ZeroMQ context to create the socket in.
    :type context:   zmq.Context
    :param endpoint: the enpoint to connect to.
    :type endpoint:  str
    :param service:  the service the client should use
    :type service:   str
    """

    _proto_version = C_CLIENT

    def __init__(self, context, endpoint):
        """Initialize the MDPClient.
        """
        
        self.context = context
        self.endpoint = endpoint
        
    def start(self):
        """
        Initialize the zmq sockets on a ioloop stream.
        The separation of this part from the init is useful if
        we start the client on a separate thread with a new ioloop
        (for example to enable use in an ipython notebook)
        """
        socket = self.context.socket(zmq.DEALER)
        ioloop = IOLoop.instance()
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self._proto_prefix = [EMPTY_FRAME, self._proto_version]
        self._delayed_timeout = None
        self.timed_out = False
        socket.connect(self.endpoint)

    def shutdown(self):
        """Method to deactivate the client connection completely.

        Will delete the stream and the underlying socket.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        
        if not self.stream:
            return
        
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.socket.close()
        self.stream.close()
        self.stream = None

    def request(self, service, msg, msg_extra=STANDARD, timeout=None):
        """Send the given message.

        :param msg:     message parts to send.
        :type msg:      list of str
        :param msg_extra: Extra message flags (e.g. STANDARD or BROADCAST)
        :type msg_extra: int
        :param timeout: time to wait in milliseconds.
        :type timeout:  int
        
        :rtype None:
        """
        if type(msg) in (bytes, unicode):
            msg = [msg]
            
        #
        # prepare full message
        #
        to_send = [chr(msg_extra)] + self._proto_prefix[:]
        to_send.extend([service])
        to_send.extend(msg)
        
        self.stream.send_multipart(to_send)
        
        if timeout:
            self._start_timeout(timeout)

    def _on_timeout(self):
        """Helper called after timeout.
        """
        
        self.timed_out = True
        self._delayed_timeout = None
        self.on_timeout()

    def _start_timeout(self, timeout):
        """Helper for starting the timeout.

        :param timeout:  the time to wait in milliseconds.
        :type timeout:   int
        """
        
        self._delayed_timeout = DelayedCallback(self._on_timeout, timeout)
        self._delayed_timeout.start()

    def _on_message(self, msg):
        """Helper method called on message receive.

        :param msg:   list of message parts.
        :type msg:    list of str
        """
        
        if self._delayed_timeout:
            # 
            # disable timout
            #
            self._delayed_timeout.stop()
            self._delayed_timeout = None
            
        self.on_message(msg)

    def on_message(self, msg):
        """Public method called when a message arrived.

        .. note:: Does nothing. Should be overloaded!
        """

        raise NotImplementedError('on_message must be implemented by the subclass.')
        
    def on_timeout(self):
        """Public method called when a timeout occured.

        .. note:: Does nothing. Should be overloaded!
        """
        raise NotImplementedError('on_timeout must be implemented by the subclass.')
Esempio n. 45
0
class Master(object):
    def __init__(self,
                 frontier,
                 data_in_sock='ipc:///tmp/robot-data-w2m.sock',
                 data_out_sock='ipc:///tmp/robot-data-m2w.sock',
                 msg_in_sock='ipc:///tmp/robot-msg-w2m.sock',
                 msg_out_sock='ipc:///tmp/robot-msg-m2w.sock',
                 io_loop=None):
        self.identity = 'master:%s:%s' % (socket.gethostname(), os.getpid())

        context = zmq.Context()

        self._io_loop = io_loop or IOLoop.instance()

        self._in_socket = context.socket(zmq.SUB)
        self._in_socket.setsockopt(zmq.SUBSCRIBE, '')
        self._in_socket.bind(data_in_sock)
        self._in_stream = ZMQStream(self._in_socket, io_loop)

        self._out_socket = context.socket(zmq.PUSH)
        self._out_socket.bind(data_out_sock)
        self._out_stream = ZMQStream(self._out_socket, io_loop)

        self._online_workers = set()
        self._running = False

        self._updater = PeriodicCallback(self._send_next, 100, io_loop=io_loop)
        self._reloader = PeriodicCallback(self.reload, 1000, io_loop=io_loop)

        self.frontier = frontier
        self.messenger = ServerMessenger(msg_in_sock, msg_out_sock, context,
                                         io_loop)

    def start(self):
        logging.info('[%s] starting', self.identity)
        self.messenger.add_callback(CTRL_MSG_WORKER, self._on_worker_msg)
        self.messenger.start()

        self._in_stream.on_recv(self._on_receive_processed)
        self._updater.start()
        self._reloader.start()
        self._running = True

    def stop(self):
        self._running = False
        self._reloader.stop()
        self._updater.stop()
        self.messenger.stop()
#        self.messenger.publish(CTRL_MSG_WORKER, self.identity,
#                CTRL_MSG_WORKER_QUIT)

    def close(self):
        self._in_stream.close()
        self._in_socket.close()
        self._out_stream.close()
        self._out_socket.close()
        self.messenger.close()

    def reload(self):
        pass

    def _on_worker_msg(self, msg):
        if msg.data == CTRL_MSG_WORKER_ONLINE:
            self._online_workers.add(msg.identity)
            logging.info('[%s] append [%s]', self.identity, msg.identity)
            self._send_next()


#        if msg.data == CTRL_MSG_WORKER_QUIT_ACK:
#            if msg.identity in self._online_workers:
#                self._online_workers.remove(msg.identity)

    def _send_next(self):
        if not self._running:
            return

        worker_num = len(self._online_workers)

        if self._running and worker_num > 0:
            while self._out_stream._send_queue.qsize() < worker_num * 4:
                request = self.frontier.get_next_request()
                if not request:
                    break

                msg = RequestMessage(self.identity, request)
                self._out_stream.send_multipart(msg.serialize())
                logging.debug('[%s] send request(%s)', self.identity,
                              request.url)

                self.frontier.reload_request(request)

    def _on_receive_processed(self, zmq_msg):
        msg = ResponseMessage.deserialize(zmq_msg)
        request = msg.response.request
        logging.debug('[%s] receive response(%s)', self.identity, request.url)
        self._send_next()
Esempio n. 46
0
class ZMQ(Client):
    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):

        # do our best to clean up potentially leaky FDs
        if hasattr(self, 'stream') and not self.stream.closed():
            self.stream.close()

        if hasattr(self, 'loop'):
            try:
                self.loop.close()
            except KeyError:
                pass

        self.socket.close()
        self.context.term()

    def __init__(self, remote, token, **kwargs):
        super(ZMQ, self).__init__(remote, token)

        self.context = zmq.Context()
        self.socket = self.context.socket(zmq.REQ)
        self.socket.RCVTIMEO = RCVTIMEO
        self.socket.SNDTIMEO = SNDTIMEO
        self.socket.setsockopt(zmq.LINGER, LINGER)
        self.nowait = kwargs.get('nowait', False)
        self.autoclose = kwargs.get('autoclose', True)
        if self.nowait:
            self.socket = self.context.socket(zmq.DEALER)

        self.autoclose = kwargs.get('autoclose', True)

    def _handle_message_fireball(self, m):
        logger.debug('message received')

        m = json.loads(m[2].decode('utf-8'))
        self.response.append(m)

        self.num_responses -= 1
        logger.debug('num responses remaining: %i' % self.num_responses)

        if self.num_responses == 0:
            logger.debug('finishing up...')
            self.loop.stop()

    def _fireball_timeout(self):
        logger.info('fireball timeout')
        self.loop.stop()

    def _send_fireball(self, mtype, data, f_size):
        if len(data) < 3:
            logger.error('no data to send')
            return []

        self.loop = IOLoop().instance()
        self.socket.close()

        self.socket = self.context.socket(zmq.DEALER)
        self.socket.connect(self.remote)

        self.stream = ZMQStream(self.socket)
        self.stream.on_recv(self._handle_message_fireball)

        self.stream.io_loop.call_later(SNDTIMEO, self._fireball_timeout)

        self.response = []

        if PYVERSION == 3:
            if isinstance(data, bytes):
                data = data.decode('utf-8')

        data = json.loads(data)

        if not isinstance(data, list):
            data = [data]

        if (len(data) % f_size) == 0:
            self.num_responses = int((len(data) / f_size))
        else:
            self.num_responses = int((len(data) / f_size)) + 1

        logger.debug('responses expected: %i' % self.num_responses)

        batch = []
        for d in data:
            batch.append(d)
            if len(batch) == f_size:
                Msg(mtype=Msg.INDICATORS_CREATE, token=self.token, data=batch).send(self.socket)
                batch = []

        if len(batch):
            Msg(mtype=Msg.INDICATORS_CREATE, token=self.token, data=batch).send(self.socket)

        logger.debug("starting loop to receive")
        self.loop.start()

        # clean up FDs
        self.loop.close()
        self.stream.close()
        self.socket.close()
        return self.response

    def _recv(self, decode=True, close=True):
        mtype, data = Msg().recv(self.socket)
        if close:
            self.socket.close()

        if not decode:
            return data

        data = json.loads(data)

        if data.get('message') == 'unauthorized':
            raise AuthError()

        if data.get('message') == 'busy':
            raise CIFBusy()

        if data.get('message') == 'invalid search':
            raise InvalidSearch()

        if data.get('status') != 'success':
            raise RuntimeError(data.get('message'))

        if data.get('data') is None:
            raise RuntimeError('invalid response')

        if isinstance(data.get('data'), bool):
            return data['data']

        # is this a straight up elasticsearch string?
        if data['data'] == '{}':
            return []

        if isinstance(data['data'], basestring) and data['data'].startswith('{"hits":{"hits":[{"_source":'):
            data['data'] = json.loads(data['data'])
            data['data'] = [r['_source'] for r in data['data']['hits']['hits']]

        try:
            data['data'] = zlib.decompress(data['data'])
        except (zlib.error, TypeError):
            pass

        return data.get('data')

    def _send(self, mtype, data='[]', nowait=False, decode=True):

        self.socket.connect(self.remote)

        if isinstance(data, str):
            data = data.encode('utf-8')

        Msg(mtype=mtype, token=self.token, data=data).send(self.socket)

        if self.nowait or nowait:
            if self.autoclose:
                self.socket.close()
            return

        rv = self._recv(decode=decode)
        return rv

    def ping(self):
        try:
            return self._send(Msg.PING)
        except zmq.error.Again:
            raise TimeoutError

    def ping_write(self):
        try:
            return self._send(Msg.PING_WRITE)
        except zmq.error.Again:
            raise TimeoutError

    def indicators_search(self, filters, decode=True):
        return self._send(Msg.INDICATORS_SEARCH, json.dumps(filters), decode=decode)

    def graph_search(self, filters, decode=True):
        return self._send(Msg.GRAPH_SEARCH, json.dumps(filters), decode=decode)

    def stats_search(self, filters, decode=True):
        return self._send(Msg.STATS_SEARCH, json.dumps(filters), decode=decode)

    def indicators_create(self, data, nowait=False, fireball=False, f_size=FIREBALL_SIZE):
        if isinstance(data, dict):
            data = self._kv_to_indicator(data)

        if isinstance(data, Indicator):
            data = str(data)

        if fireball:
            return self._send_fireball(Msg.INDICATORS_CREATE, data, f_size)

        return self._send(Msg.INDICATORS_CREATE, data, nowait=nowait)

    def indicators_delete(self, data):
        if isinstance(data, dict):
            data = self._kv_to_indicator(data)

        if isinstance(data, Indicator):
            data = str(data)

        return self._send(Msg.INDICATORS_DELETE, data)

    def tokens_search(self, filters={}):
        return self._send(Msg.TOKENS_SEARCH, json.dumps(filters))

    def tokens_create(self, data):
        return self._send(Msg.TOKENS_CREATE, data)

    def tokens_delete(self, data):
        return self._send(Msg.TOKENS_DELETE, data)

    def tokens_edit(self, data):
        return self._send(Msg.TOKENS_EDIT, data)
Esempio n. 47
0
class ZmqCentral(object):
    def __init__(self, opts):
        self.opts = opts
        self.info = {}

    def announceConnect(self, moduleName, params):
        logging.info('module %s connected', moduleName)
        self.injectStream.send('central.connect.%s:%s' %
                               (moduleName, json.dumps(params)))

    def announceDisconnect(self, moduleName):
        logging.info('module %s disconnected', moduleName)
        self.injectStream.send(
            'central.disconnect.%s:%s' %
            (moduleName, json.dumps({'timestamp': str(getTimestamp())})))

    def logMessage(self, msg, posixTime=None, attachmentDir='-'):
        mlog = self.messageLog
        mlog.write('@@@ %d %d %s ' %
                   (getTimestamp(posixTime), len(msg), attachmentDir))
        mlog.write(msg)
        mlog.write('\n')

    def logMessageWithAttachments0(self, msg):
        parsed = parseMessage(msg)
        posixTime = time.time()

        # construct attachment directory
        dt = datetime.datetime.utcfromtimestamp(posixTime)
        dateText = dt.strftime('%Y-%m-%d')
        timeText = dt.strftime('%H-%M-%S') + '.%06d' % dt.microsecond
        uniq = '%08x' % random.getrandbits(32)
        attachmentSuffix = os.path.join('attachments', dateText, timeText,
                                        parsed['topic'], uniq)
        attachmentPath = os.path.join(self.logDir, attachmentSuffix)
        os.makedirs(attachmentPath)

        # write attachments to attachment directory
        for attachment in parsed['attachments']:
            fullName = os.path.join(attachmentPath, attachment.get_filename())
            open(fullName, 'wb').write(attachment.get_payload())

        # log message with a pointer to the attachment directory
        self.logMessage(':'.join((parsed['topic'], parsed['json'])), posixTime,
                        attachmentSuffix)

    def logMessageWithAttachments(self, msg):
        try:
            return self.logMessageWithAttachments0(msg)
        except:  # pylint: disable=W0702
            self.logException('logging message with attachments')

    def handleHeartbeat(self, params):
        moduleName = params['module'].encode('utf-8')
        now = getTimestamp()

        oldInfo = self.info.get(moduleName, None)
        if oldInfo:
            if oldInfo.get('pub', None) != params.get('pub', None):
                self.announceDisconnect(moduleName)
                self.announceConnect(moduleName, params)
        else:
            self.announceConnect(moduleName, params)

        self.info[moduleName] = params
        keepalive = params.get('keepalive', DEFAULT_KEEPALIVE_US)
        params['timeout'] = now + keepalive
        return 'ok'

    def handleInfo(self):
        return self.info

    def logException(self, whileClause):
        errClass, errObject, errTB = sys.exc_info()[:3]
        errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__,
                                 str(errObject))
        logging.warning(''.join(traceback.format_tb(errTB)))
        logging.warning(errText)
        logging.warning('[error while %s at time %s]', whileClause,
                        getTimestamp())

    def handleMessages(self, messages):
        for msg in messages:
            if hasAttachments(msg):
                self.logMessageWithAttachments(msg)
            else:
                self.logMessage(msg)
            if msg.startswith('central.heartbeat.'):
                try:
                    _topic, body = msg.split(':', 1)
                    self.handleHeartbeat(json.loads(body))
                except:  # pylint: disable=W0702
                    self.logException('handling heartbeat')

    def handleRpcCall(self, messages):
        for msg in messages:
            try:
                call = json.loads(msg)
                callId = call['id']
            except:  # pylint: disable=W0702
                self.rpcStream.send(
                    json.dumps({
                        'result': None,
                        'error': 'malformed request'
                    }))

            try:
                method = call['method']
                _params = call['params']
                if method == 'info':
                    result = self.handleInfo()
                else:
                    raise ValueError('unknown method %s' % method)
                self.rpcStream.send(
                    json.dumps({
                        'result': result,
                        'error': None,
                        'id': callId
                    }))
            except:  # pylint: disable=W0702
                self.logException('handling rpc message')
                errClass, errObject = sys.exc_info()[:2]
                errText = '%s.%s: %s' % (errClass.__module__,
                                         errClass.__name__, str(errObject))
                self.rpcStream.send(
                    json.dumps({
                        'result': None,
                        'error': errText,
                        'id': callId
                    }))

    def handleDisconnectTimer(self):
        now = getTimestamp()
        disconnectModules = []
        for moduleName, entry in self.info.iteritems():
            timeout = entry.get('timeout', None)
            if timeout is not None and now > timeout:
                disconnectModules.append(moduleName)
        for moduleName in disconnectModules:
            self.announceDisconnect(moduleName)
            del self.info[moduleName]

    def readyLog(self, pathTemplate, timestamp):
        if '%s' in pathTemplate:
            timeText = timestamp.strftime('%Y-%m-%d-%H-%M-%S')
            logFile = pathTemplate % timeText
        else:
            logFile = pathTemplate
        if not os.path.exists(self.logDir):
            os.makedirs(self.logDir)
        logPath = os.path.join(self.logDir, logFile)
        if '%s' in pathTemplate:
            latestPath = os.path.join(self.logDir, pathTemplate % 'latest')
            if os.path.islink(latestPath):
                os.unlink(latestPath)
        os.symlink(logFile, latestPath)
        return logPath

    def start(self):
        # open log files
        now = datetime.datetime.utcnow()
        self.logDir = os.path.abspath(self.opts.logDir)
        self.messageLogPath = self.readyLog(self.opts.messageLog, now)
        self.messageLog = open(self.messageLogPath, 'a')
        self.consoleLogPath = self.readyLog(self.opts.consoleLog, now)

        rootLogger = logging.getLogger()
        rootLogger.setLevel(logging.DEBUG)
        fmt = logging.Formatter('%(asctime)s - %(levelname)-7s - %(message)s')
        fmt.converter = time.gmtime
        fh = logging.FileHandler(self.consoleLogPath)
        fh.setFormatter(fmt)
        fh.setLevel(logging.DEBUG)
        rootLogger.addHandler(fh)
        if self.opts.foreground:
            ch = logging.StreamHandler()
            ch.setLevel(logging.DEBUG)
            ch.setFormatter(fmt)
            rootLogger.addHandler(ch)

        # daemonize
        if self.opts.foreground:
            logging.info('staying in foreground')
        else:
            logging.info('daemonizing')
            pid = os.fork()
            if pid != 0:
                os._exit(0)
            os.setsid()
            pid = os.fork()
            if pid != 0:
                os._exit(0)
            os.chdir('/')
            os.close(1)
            os.close(2)
            nullFd = os.open('/dev/null', os.O_RDWR)
            os.dup2(nullFd, 1)
            os.dup2(nullFd, 2)

        try:
            # set up zmq
            self.context = zmq.Context.instance()
            self.rpcStream = ZMQStream(self.context.socket(zmq.REP))
            self.rpcStream.bind(self.opts.rpcEndpoint)
            self.rpcStream.on_recv(self.handleRpcCall)

            self.forwarder = ThreadDevice(zmq.FORWARDER, zmq.SUB, zmq.PUB)
            self.forwarder.setsockopt_in(zmq.IDENTITY, THIS_MODULE)
            self.forwarder.setsockopt_out(zmq.IDENTITY, THIS_MODULE)
            self.forwarder.setsockopt_in(zmq.SUBSCRIBE, '')
            self.forwarder.setsockopt_out(zmq.HWM, self.opts.highWaterMark)
            self.forwarder.bind_in(self.opts.subscribeEndpoint)
            self.forwarder.bind_in(INJECT_ENDPOINT)
            self.forwarder.bind_out(self.opts.publishEndpoint)
            self.forwarder.bind_out(MONITOR_ENDPOINT)
            for entry in self.opts.subscribeTo:
                try:
                    moduleName, endpoint = entry.split('@')
                    endpoint = parseEndpoint(endpoint)
                except ValueError:
                    raise ValueError(
                        '--subscribeTo argument "%s" is not in the format "<moduleName>@<endpoint>"'
                        % entry)
                self.forwarder.connect_in(endpoint)
                self.info[moduleName] = {'module': moduleName, 'pub': endpoint}
            self.forwarder.start()
            time.sleep(0.1)  # wait for forwarder to bind sockets

            self.monStream = ZMQStream(self.context.socket(zmq.SUB))
            self.monStream.setsockopt(zmq.SUBSCRIBE, '')
            self.monStream.connect(MONITOR_ENDPOINT)
            self.monStream.on_recv(self.handleMessages)

            self.injectStream = ZMQStream(self.context.socket(zmq.PUB))
            self.injectStream.connect(INJECT_ENDPOINT)

            self.disconnectTimer = ioloop.PeriodicCallback(
                self.handleDisconnectTimer, 5000)
            self.disconnectTimer.start()

        except:  # pylint: disable=W0702
            errClass, errObject, errTB = sys.exc_info()[:3]
            errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__,
                                     str(errObject))
            logging.error(''.join(traceback.format_tb(errTB)))
            logging.error(errText)
            logging.error('[error during startup -- exiting]')
            sys.exit(1)

    def shutdown(self):
        self.messageLog.flush()
Esempio n. 48
0
class CloneServer(object):

    # Our server is defined by these properties
    ctx = None  # Context wrapper
    kvmap = None  # Key-value store
    bstar = None  # Binary Star
    sequence = 0  # How many updates so far
    port = None  # Main port we're working on
    peer = None  # Main port of our peer
    publisher = None  # Publish updates and hugz
    collector = None  # Collect updates from clients
    subscriber = None  # Get updates from peer
    pending = None  # Pending updates from client
    primary = False  # True if we're primary
    master = False  # True if we're master
    slave = False  # True if we're slave

    def __init__(self, primary=True, ports=(5556, 5566)):
        self.primary = primary
        if primary:
            self.port, self.peer = ports
            frontend = "tcp://*:5003"
            backend = "tcp://localhost:5004"
            self.kvmap = {}
        else:
            self.peer, self.port = ports
            frontend = "tcp://*:5004"
            backend = "tcp://localhost:5003"

        self.ctx = zmq.Context.instance()
        self.pending = []
        self.bstar = BinaryStar(primary, frontend, backend)

        self.bstar.register_voter("tcp://*:%i" % self.port, zmq.ROUTER,
                                  self.handle_snapshot)

        # Set up our clone server sockets
        self.publisher = self.ctx.socket(zmq.PUB)
        self.collector = self.ctx.socket(zmq.SUB)
        self.collector.setsockopt(zmq.SUBSCRIBE, b'')
        self.publisher.bind("tcp://*:%d" % (self.port + 1))
        self.collector.bind("tcp://*:%d" % (self.port + 2))

        # Set up our own clone client interface to peer
        self.subscriber = self.ctx.socket(zmq.SUB)
        self.subscriber.setsockopt(zmq.SUBSCRIBE, b'')
        self.subscriber.connect("tcp://localhost:%d" % (self.peer + 1))

        # Register state change handlers
        self.bstar.master_callback = self.become_master
        self.bstar.slave_callback = self.become_slave

        # Wrap sockets in ZMQStreams for IOLoop handlers
        self.publisher = ZMQStream(self.publisher)
        self.subscriber = ZMQStream(self.subscriber)
        self.collector = ZMQStream(self.collector)

        # Register our handlers with reactor
        self.collector.on_recv(self.handle_collect)
        self.flush_callback = PeriodicCallback(self.flush_ttl, 1000)
        self.hugz_callback = PeriodicCallback(self.send_hugz, 1000)

        # basic log formatting:
        logging.basicConfig(format="%(asctime)s %(message)s",
                            datefmt="%Y-%m-%d %H:%M:%S",
                            level=logging.INFO)

    def start(self):
        # start periodic callbacks
        self.flush_callback.start()
        self.hugz_callback.start()
        # Run bstar reactor until process interrupted
        try:
            self.bstar.start()
        except KeyboardInterrupt:
            pass

    def handle_snapshot(self, socket, msg):
        """snapshot requests"""
        if msg[1] != "ICANHAZ?" or len(msg) != 3:
            logging.error("E: bad request, aborting")
            dump(msg)
            self.bstar.loop.stop()
            return
        identity, request = msg[:2]
        if len(msg) >= 3:
            subtree = msg[2]
            # Send state snapshot to client
            route = Route(socket, identity, subtree)

            # For each entry in kvmap, send kvmsg to client
            for k, v in self.kvmap.items():
                send_single(k, v, route)

            # Now send END message with sequence number
            logging.info("I: Sending state shapshot=%d" % self.sequence)
            socket.send(identity, zmq.SNDMORE)
            kvmsg = KVMsg(self.sequence)
            kvmsg.key = "KTHXBAI"
            kvmsg.body = subtree
            kvmsg.send(socket)

    def handle_collect(self, msg):
        """Collect updates from clients

        If we're master, we apply these to the kvmap
        If we're slave, or unsure, we queue them on our pending list
        """
        kvmsg = KVMsg.from_msg(msg)
        if self.master:
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            ttl = float(kvmsg.get('ttl', 0))
            if ttl:
                kvmsg['ttl'] = time.time() + ttl
            kvmsg.store(self.kvmap)
            logging.info("I: publishing update=%d", self.sequence)
        else:
            # If we already got message from master, drop it, else
            # hold on pending list
            if not self.was_pending(kvmsg):
                self.pending.append(kvmsg)

    def was_pending(self, kvmsg):
        """If message was already on pending list, remove and return True.
        Else return False.
        """
        found = False
        for idx, held in enumerate(self.pending):
            if held.uuid == kvmsg.uuid:
                found = True
                break
        if found:
            self.pending.pop(idx)
        return found

    def flush_ttl(self):
        """Purge ephemeral values that have expired"""
        if self.kvmap:
            for key, kvmsg in self.kvmap.items():
                self.flush_single(kvmsg)

    def flush_single(self, kvmsg):
        """If key-value pair has expired, delete it and publish the fact
        to listening clients."""
        ttl = float(kvmsg.get('ttl', 0))
        if ttl and ttl <= time.time():
            kvmsg.body = ""
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.send(self.publisher)
            del self.kvmap[kvmsg.key]
            logging.info("I: publishing delete=%d", self.sequence)

    def send_hugz(self):
        """Send hugz to anyone listening on the publisher socket"""
        kvmsg = KVMsg(self.sequence)
        kvmsg.key = "HUGZ"
        kvmsg.body = ""
        kvmsg.send(self.publisher)

    # ---------------------------------------------------------------------
    # State change handlers

    def become_master(self):
        """We're becoming master

        The backup server applies its pending list to its own hash table,
        and then starts to process state snapshot requests.
        """
        self.master = True
        self.slave = False
        # stop receiving subscriber updates while we are master
        self.subscriber.stop_on_recv()

        # Apply pending list to own kvmap
        while self.pending:
            kvmsg = self.pending.pop(0)
            self.sequence += 1
            kvmsg.sequence = self.sequence
            kvmsg.store(self.kvmap)
            logging.info("I: publishing pending=%d", self.sequence)

    def become_slave(self):
        """We're becoming slave"""
        # clear kvmap
        self.kvmap = None
        self.master = False
        self.slave = True
        self.subscriber.on_recv(self.handle_subscriber)

    def handle_subscriber(self, msg):
        """Collect updates from peer (master)
        We're always slave when we get these updates
        """
        if self.master:
            logging.warn("received subscriber message, but we are master %s",
                         msg)
            return

        # Get state snapshot if necessary
        if self.kvmap is None:
            self.kvmap = {}
            snapshot = self.ctx.socket(zmq.DEALER)
            snapshot.linger = 0
            snapshot.connect("tcp://localhost:%i" % self.peer)

            logging.info("I: asking for snapshot from: tcp://localhost:%d",
                         self.peer)
            snapshot.send_multipart(["ICANHAZ?", ''])
            while True:
                try:
                    kvmsg = KVMsg.recv(snapshot)
                except KeyboardInterrupt:
                    # Interrupted
                    self.bstar.loop.stop()
                    return
                if kvmsg.key == "KTHXBAI":
                    self.sequence = kvmsg.sequence
                    break  # Done
                kvmsg.store(self.kvmap)

            logging.info("I: received snapshot=%d", self.sequence)

        # Find and remove update off pending list
        kvmsg = KVMsg.from_msg(msg)
        # update float ttl -> timestamp
        ttl = float(kvmsg.get('ttl', 0))
        if ttl:
            kvmsg['ttl'] = time.time() + ttl

        if kvmsg.key != "HUGZ":
            if not self.was_pending(kvmsg):
                # If master update came before client update, flip it
                # around, store master update (with sequence) on pending
                # list and use to clear client update when it comes later
                self.pending.append(kvmsg)

            # If update is more recent than our kvmap, apply it
            if (kvmsg.sequence > self.sequence):
                self.sequence = kvmsg.sequence
                kvmsg.store(self.kvmap)
                logging.info("I: received update=%d", self.sequence)
Esempio n. 49
0
class MDPClient(object):
    """Class for the MDP client side.

    Thin asynchronous encapsulation of a zmq.REQ socket.
    Provides a :func:`request` method with optional timeout.

    Objects of this class are ment to be integrated into the
    asynchronous IOLoop of pyzmq.

    :param context:  the ZeroMQ context to create the socket in.
    :type context:   zmq.Context
    :param endpoint: the enpoint to connect to.
    :type endpoint:  str
    :param service:  the service the client should use
    :type service:   str
    """

    _proto_version = b'MDPC01'

    def __init__(self, context, endpoint, service):
        """Initialize the MDPClient.
        """
        socket = context.socket(zmq.REQ)
        ioloop = IOLoop.instance()
        self.service = service
        self.endpoint = endpoint
        self.stream = ZMQStream(socket, ioloop)
        self.stream.on_recv(self._on_message)
        self.can_send = True
        self._proto_prefix = [PROTO_VERSION, service]
        self._tmo = None
        self.timed_out = False
        socket.connect(endpoint)
        return

    def shutdown(self):
        """Method to deactivate the client connection completely.

        Will delete the stream and the underlying socket.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        if not self.stream:
            return
        self.stream.socket.setsockopt(zmq.LINGER, 0)
        self.stream.socket.close()
        self.stream.close()
        self.stream = None
        return

    def request(self, msg, timeout=None):
        """Send the given message.

        :param msg:     message parts to send.
        :type msg:      list of str
        :param timeout: time to wait in milliseconds.
        :type timeout:  int
        
        :rtype None:
        """
        if not self.can_send:
            raise InvalidStateError()
        # prepare full message
        to_send = self._proto_prefix[:]
        to_send.extend(msg)
        self.stream.send_multipart(to_send)
        self.can_send = False
        if timeout:
            self._start_timeout(timeout)
        return

    def _on_timeout(self):
        """Helper called after timeout.
        """
        self.timed_out = True
        self._tmo = None
        self.on_timeout()
        return

    def _start_timeout(self, timeout):
        """Helper for starting the timeout.

        :param timeout:  the time to wait in milliseconds.
        :type timeout:   int
        """
        self._tmo = DelayedCallback(self._on_timeout, timeout)
        self._tmo.start()
        return

    def _on_message(self, msg):
        """Helper method called on message receive.

        :param msg:   list of message parts.
        :type msg:    list of str
        """
        if self._tmo:
            # disable timout
            self._tmo.stop()
            self._tmo = None
        # setting state before invoking on_message, so we can request from there
        self.can_send = True
        self.on_message(msg)
        return

    def on_message(self, msg):
        """Public method called when a message arrived.

        .. note:: Does nothing. Should be overloaded!
        """
        pass

    def on_timeout(self):
        """Public method called when a timeout occured.

        .. note:: Does nothing. Should be overloaded!
        """
        pass
Esempio n. 50
0
class ZmqStreamlet(Zmqlet):
    """A :class:`ZmqStreamlet` object can send/receive data to/from ZeroMQ stream and invoke callback function. It
    has three sockets for input, output and control.

    .. warning::
        Starting from v0.3.6, :class:`ZmqStreamlet` replaces :class:`Zmqlet` as one of the key components in :class:`jina.peapods.runtime.BasePea`.
        It requires :mod:`tornado` and :mod:`uvloop` to be installed.
    """

    def __init__(
        self,
        *args,
        **kwargs,
    ):
        super().__init__(**kwargs)

    def _register_pollin(self):
        """Register :attr:`in_sock`, :attr:`ctrl_sock` and :attr:`out_sock` in poller."""
        with ImportExtensions(required=True):
            import tornado.ioloop

            get_or_reuse_loop()
            self.io_loop = tornado.ioloop.IOLoop.current()
        self.in_sock = ZMQStream(self.in_sock, self.io_loop)
        self.out_sock = ZMQStream(self.out_sock, self.io_loop)
        self.ctrl_sock = ZMQStream(self.ctrl_sock, self.io_loop)
        if self.in_connect_sock is not None:
            self.in_connect_sock = ZMQStream(self.in_connect_sock, self.io_loop)
        self.in_sock.stop_on_recv()

    def _get_dynamic_out_socket(self, target_pod):
        return super()._get_dynamic_out_socket(target_pod, as_streaming=True)

    def close(self, flush: bool = True, *args, **kwargs):
        """Close all sockets and shutdown the ZMQ context associated to this `Zmqlet`.

        .. note::
            This method is idempotent.

        :param flush: flag indicating if `sockets` need to be flushed before close is done
        :param args: Extra positional arguments
        :param kwargs: Extra key-value arguments
        """

        # if Address already in use `self.in_sock_type` is not set
        if (
            not self.is_closed
            and hasattr(self, 'in_sock_type')
            and self.in_sock_type == zmq.DEALER
        ):
            try:
                if self._active:
                    self._send_cancel_to_router(raise_exception=True)
            except zmq.error.ZMQError:
                self.logger.debug(
                    f'The dealer {self.name} can not unsubscribe from the router. '
                    f'In case the router is down this is expected.'
                )
        self._active = (
            False  # Important to avoid sending idle back while flushing in socket
        )
        if not self.is_closed:
            # wait until the close signal is received
            time.sleep(0.01)
            if flush:
                for s in self.opened_socks:
                    events = s.flush()
                    self.logger.debug(f'Handled #{events} during flush of socket')
            super().close()
            if hasattr(self, 'io_loop'):
                try:
                    self.io_loop.stop()
                    # Replace handle events function, to skip
                    # None event after sockets are closed.
                    if hasattr(self.in_sock, '_handle_events'):
                        self.in_sock._handle_events = lambda *args, **kwargs: None
                    if hasattr(self.out_sock, '_handle_events'):
                        self.out_sock._handle_events = lambda *args, **kwargs: None
                    if hasattr(self.ctrl_sock, '_handle_events'):
                        self.ctrl_sock._handle_events = lambda *args, **kwargs: None
                    if hasattr(self.in_connect_sock, '_handle_events'):
                        self.in_connect_sock._handle_events = (
                            lambda *args, **kwargs: None
                        )
                except AttributeError as e:
                    self.logger.error(f'failed to stop. {e!r}')

    def pause_pollin(self):
        """Remove :attr:`in_sock` from the poller """
        self.in_sock.stop_on_recv()
        self.is_polling_paused = True

    def resume_pollin(self):
        """Put :attr:`in_sock` back to the poller """
        if self.is_polling_paused:
            self.in_sock.on_recv(self._in_sock_callback)
            self.is_polling_paused = False

    def start(self, callback: Callable[['Message'], None]):
        """
        Open all sockets and start the ZMQ context associated to this `Zmqlet`.

        :param callback: callback function to receive the protobuf message
        """

        def _callback(msg, sock_type):
            msg = _parse_from_frames(sock_type, msg)
            self.bytes_recv += msg.size
            self.msg_recv += 1

            callback(msg)

        self._in_sock_callback = lambda x: _callback(x, self.in_sock_type)
        self.in_sock.on_recv(self._in_sock_callback)
        self.ctrl_sock.on_recv(lambda x: _callback(x, self.ctrl_sock_type))
        if self.out_sock_type == zmq.ROUTER and not self.args.dynamic_routing_out:
            self.out_sock.on_recv(lambda x: _callback(x, self.out_sock_type))
        if self.in_connect_sock is not None:
            self.in_connect_sock.on_recv(
                lambda x: _callback(x, SocketType.ROUTER_CONNECT)
            )
        self.io_loop.start()
        self.io_loop.clear_current()
        self.io_loop.close(all_fds=True)
Esempio n. 51
0
class IOPubThread(object):
    """An object for sending IOPub messages in a background thread

    Prevents a blocking main thread from delaying output from threads.

    IOPubThread(pub_socket).background_socket is a Socket-API-providing object
    whose IO is always run in a thread.
    """
    def __init__(self, socket, pipe=False):
        """Create IOPub thread

        Parameters
        ----------

        socket: zmq.PUB Socket
            the socket on which messages will be sent.
        pipe: bool
            Whether this process should listen for IOPub messages
            piped from subprocesses.
        """
        self.socket = socket
        self.background_socket = BackgroundSocket(self)
        self._master_pid = os.getpid()
        self._pipe_flag = pipe
        self.io_loop = IOLoop(make_current=False)
        if pipe:
            self._setup_pipe_in()
        self._local = threading.local()
        self._events = deque()
        self._setup_event_pipe()
        self.thread = threading.Thread(target=self._thread_main)
        self.thread.daemon = True

    def _thread_main(self):
        """The inner loop that's actually run in a thread"""
        self.io_loop.make_current()
        self.io_loop.start()
        self.io_loop.close(all_fds=True)

    def _setup_event_pipe(self):
        """Create the PULL socket listening for events that should fire in this thread."""
        ctx = self.socket.context
        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0

        _uuid = b2a_hex(os.urandom(16)).decode('ascii')
        iface = self._event_interface = 'inproc://%s' % _uuid
        pipe_in.bind(iface)
        self._event_puller = ZMQStream(pipe_in, self.io_loop)
        self._event_puller.on_recv(self._handle_event)

    @property
    def _event_pipe(self):
        """thread-local event pipe for signaling events that should be processed in the thread"""
        try:
            event_pipe = self._local.event_pipe
        except AttributeError:
            # new thread, new event pipe
            ctx = self.socket.context
            event_pipe = ctx.socket(zmq.PUSH)
            event_pipe.linger = 0
            event_pipe.connect(self._event_interface)
            self._local.event_pipe = event_pipe
        return event_pipe

    def _handle_event(self, msg):
        """Handle an event on the event pipe

        Content of the message is ignored.

        Whenever *an* event arrives on the event stream,
        *all* waiting events are processed in order.
        """
        # freeze event count so new writes don't extend the queue
        # while we are processing
        n_events = len(self._events)
        for i in range(n_events):
            event_f = self._events.popleft()
            event_f()

    def _setup_pipe_in(self):
        """setup listening pipe for IOPub from forked subprocesses"""
        ctx = self.socket.context

        # use UUID to authenticate pipe messages
        self._pipe_uuid = os.urandom(16)

        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0

        try:
            self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
        except zmq.ZMQError as e:
            warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
                          "\nsubprocess output will be unavailable.")
            self._pipe_flag = False
            pipe_in.close()
            return
        self._pipe_in = ZMQStream(pipe_in, self.io_loop)
        self._pipe_in.on_recv(self._handle_pipe_msg)

    def _handle_pipe_msg(self, msg):
        """handle a pipe message from a subprocess"""
        if not self._pipe_flag or not self._is_master_process():
            return
        if msg[0] != self._pipe_uuid:
            print("Bad pipe message: %s", msg, file=sys.__stderr__)
            return
        self.send_multipart(msg[1:])

    def _setup_pipe_out(self):
        # must be new context after fork
        ctx = zmq.Context()
        pipe_out = ctx.socket(zmq.PUSH)
        pipe_out.linger = 3000  # 3s timeout for pipe_out sends before discarding the message
        pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
        return ctx, pipe_out

    def _is_master_process(self):
        return os.getpid() == self._master_pid

    def _check_mp_mode(self):
        """check for forks, and switch to zmq pipeline if necessary"""
        if not self._pipe_flag or self._is_master_process():
            return MASTER
        else:
            return CHILD

    def start(self):
        """Start the IOPub thread"""
        self.thread.start()
        # make sure we don't prevent process exit
        # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
        atexit.register(self.stop)

    def stop(self):
        """Stop the IOPub thread"""
        if not self.thread.is_alive():
            return
        self.io_loop.add_callback(self.io_loop.stop)
        self.thread.join()
        if hasattr(self._local, 'event_pipe'):
            self._local.event_pipe.close()

    def close(self):
        if self.closed:
            return
        self.socket.close()
        self.socket = None

    @property
    def closed(self):
        return self.socket is None

    def schedule(self, f):
        """Schedule a function to be called in our IO thread.

        If the thread is not running, call immediately.
        """
        if self.thread.is_alive():
            self._events.append(f)
            # wake event thread (message content is ignored)
            self._event_pipe.send(b'')
        else:
            f()

    def send_multipart(self, *args, **kwargs):
        """send_multipart schedules actual zmq send in my thread.

        If my thread isn't running (e.g. forked process), send immediately.
        """
        self.schedule(lambda: self._really_send(*args, **kwargs))

    def _really_send(self, msg, *args, **kwargs):
        """The callback that actually sends messages"""
        mp_mode = self._check_mp_mode()

        if mp_mode != CHILD:
            # we are master, do a regular send
            self.socket.send_multipart(msg, *args, **kwargs)
        else:
            # we are a child, pipe to master
            # new context/socket for every pipe-out
            # since forks don't teardown politely, use ctx.term to ensure send has completed
            ctx, pipe_out = self._setup_pipe_out()
            pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
            pipe_out.close()
            ctx.term()
Esempio n. 52
0
class Messager(object):
    def __init__(self, callback):
        self.callback = callback
        self.stamps = {}
        self.topics = []
        self._sock = ctx.socket(zmq.SUB)
        self._sock.connect('inproc:///tmp/monster_{}'.format(pid))
        self._stream = ZMQStream(self._sock)
        self._stream.on_recv(self.recv)

    def destroy(self):
        self.callback = None
        self.topics = None
        self._stream.close()

    def subscribe(self, raw_topic, stamp):
        '''订阅
            订阅的主题支持多级主题,用冒号分隔,例如
            WEATHER:CHINA:HANGZHOU
            订阅父主题将会收到所有该类主题消息,比如订阅了WEATHER:CHINA,将收到所有中国城市的天气

            但由于zmq的订阅规则中并不支持多级主题,于是需要自己在内容中维护多级主题关系,将顶级主题送到zmq中
        '''
        topic = raw_topic.split(':')
        if not topic:
            return
        if [x for x in self.topics if x == topic]:
            return
        self.stamps.update({raw_topic: stamp})
        self._sock.setsockopt(zmq.SUBSCRIBE, str(topic[0]))
        #TODO 目前的多级主题(topics)的内存结构为列表,消息送达之后需整体遍历列表以判断用户是否订阅该主题,可以考虑使用trie树来优化
        self.topics.append(topic)

    def unsubscribe(self, raw_topic):
        '''取消订阅
            比订阅操作更复杂的是退订的时候必须检查顶级主题下的所以子主题都已经退订,如果有一个子主题还在就不能
            完全从zmq当中退订该顶级主题
        '''
        topic = raw_topic.split(':')
        if not topic:
            return
        try:
            self.topics.remove(raw_topic.split(':'))
            self.stamps.pop(raw_topic)
            for tt in self.topics:
                if tt[0] == topic[0]:
                    break
            else:
                self._sock.setsockopt(zmq.UNSUBSCRIBE, str(topic[0]))
        except:
            pass

    def recv(self, frame):
        _, data = frame
        try:
            dd = json.loads(data)
            top = dd.get('topic')
            if not top:
                return
            top = top.split(':')
            for topic in self.topics:
                if topic == top[:len(topic)]:
                    raw_topic = ':'.join(topic)
                    stamp = self.stamps.get(raw_topic)
                    dd['stamp'] = stamp
                    self.callback(json.dumps(dd))
        except:
            pass
Esempio n. 53
0
class Server(object):

    ctx = None
    loop = None
    stats = None
    spiders_in = None
    spiders_out = None
    sw_in = None
    sw_out = None
    db_in = None
    db_out = None

    def __init__(self, hostname, base_port):
        self.ctx = zmq.Context()
        self.loop = IOLoop.instance()
        self.stats = {
            'started': time(),
            'spiders_out_recvd': 0,
            'spiders_in_recvd': 0,
            'db_in_recvd': 0,
            'db_out_recvd': 0,
            'sw_in_recvd': 0,
            'sw_out_recvd': 0
        }

        socket_config = SocketConfig(hostname, base_port)

        spiders_in_s = self.ctx.socket(zmq.XPUB)
        spiders_out_s = self.ctx.socket(zmq.XSUB)
        sw_in_s = self.ctx.socket(zmq.XPUB)
        sw_out_s = self.ctx.socket(zmq.XSUB)
        db_in_s = self.ctx.socket(zmq.XPUB)
        db_out_s = self.ctx.socket(zmq.XSUB)

        spiders_in_s.bind(socket_config.spiders_in())
        spiders_out_s.bind(socket_config.spiders_out())
        sw_in_s.bind(socket_config.sw_in())
        sw_out_s.bind(socket_config.sw_out())
        db_in_s.bind(socket_config.db_in())
        db_out_s.bind(socket_config.db_out())

        self.spiders_in = ZMQStream(spiders_in_s)
        self.spiders_out = ZMQStream(spiders_out_s)
        self.sw_in = ZMQStream(sw_in_s)
        self.sw_out = ZMQStream(sw_out_s)
        self.db_in = ZMQStream(db_in_s)
        self.db_out = ZMQStream(db_out_s)

        self.spiders_out.on_recv(self.handle_spiders_out_recv)
        self.sw_out.on_recv(self.handle_sw_out_recv)
        self.db_out.on_recv(self.handle_db_out_recv)

        self.sw_in.on_recv(self.handle_sw_in_recv)
        self.db_in.on_recv(self.handle_db_in_recv)
        self.spiders_in.on_recv(self.handle_spiders_in_recv)
        logging.basicConfig(format="%(asctime)s %(message)s",
                            datefmt="%Y-%m-%d %H:%M:%S",
                            level=logging.INFO)
        self.logger = logging.getLogger(
            "distributed_frontera.messagebus.zeromq.broker.Server")

    def start(self):
        self.logger.info("Distributed Frontera ZeroMQ broker is started.")
        self.log_stats()
        try:
            self.loop.start()
        except KeyboardInterrupt:
            pass

    def log_stats(self):
        self.logger.info(self.stats)
        self.loop.add_timeout(timedelta(seconds=10), self.log_stats)

    def handle_spiders_out_recv(self, msg):
        self.sw_in.send_multipart(msg)
        self.db_in.send_multipart(msg)
        self.stats['spiders_out_recvd'] += 1

    def handle_sw_out_recv(self, msg):
        self.db_in.send_multipart(msg)
        self.stats['sw_out_recvd'] += 1

    def handle_db_out_recv(self, msg):
        self.spiders_in.send_multipart(msg)
        self.stats['db_out_recvd'] += 1

    def handle_db_in_recv(self, msg):
        self.stats['db_in_recvd'] += 1
        if msg[0][0] in ['\x01', '\x00']:
            action, identity, partition_id = self.decode_subscription(msg[0])
            if identity == 'sl':
                self.spiders_out.send_multipart(msg)
                return
            if identity == 'us':
                self.sw_out.send_multipart(msg)
                return
            raise AttributeError('Unknown identity in channel subscription.')

    def handle_sw_in_recv(self, msg):
        if msg[0][0] in ['\x01', '\x00']:
            self.spiders_out.send_multipart(msg)
        self.stats['sw_in_recvd'] += 1

    def handle_spiders_in_recv(self, msg):
        if msg[0][0] in ['\x01', '\x00']:
            self.db_out.send_multipart(msg)
        self.stats['spiders_in_recvd'] += 1

    def decode_subscription(self, msg):
        """

        :param msg:
        :return: tuple of action, identity, partition_id
        where
        action is 1 - subscription, 0 - unsubscription,
        identity - 2 characters,
        partition_id - 8 bit unsigned integer (None if absent)
        """
        if len(msg) == 4:
            return unpack(">B2sB", msg)
        elif len(msg) == 3:
            action, identity = unpack(">B2s", msg)
            return action, identity, None
        raise ValueError("Can't decode subscription correctly.")
Esempio n. 54
0
class TestMNWorker(TestCase):

    endpoint = b'tcp://127.0.0.1:5555'
    service = b'test'

    def setUp(self):
        if _do_print:
            print('Setting up...')
        sys.stdout.flush()
        self.context = zmq.Context()
        self.broker = None
        self._msgs = []
        return

    def tearDown(self):
        if _do_print:
            print('Tearing down...')
        sys.stdout.flush()
        if self.broker:
            self._stop_broker()
        self.broker = None
        self.context = None
        return

    def _on_msg(self, msg):
        if _do_print:
            print('Broker received:', msg)
        self.target = msg.pop(0)
        marker_frame = msg.pop(0)
        if msg[1] == b'\x01':  # ready
            if _do_print:
                print('READY received')
            return
        if msg[1] == b'\x04':  # ready
            if _do_print:
                print('HB received')
            return
        if msg[1] == b'\x03':  # reply
            IOLoop.instance().stop()
            return
        return

    def _start_broker(self, do_reply=False):
        """Helper activating a fake broker in the ioloop.
        """
        socket = self.context.socket(zmq.ROUTER)
        self.broker = ZMQStream(socket)
        self.broker.socket.setsockopt(zmq.LINGER, 0)
        self.broker.bind(self.endpoint)
        self.broker.on_recv(self._on_msg)
        self.broker.do_reply = do_reply
        self.broker.ticker = PeriodicCallback(self._tick,
                                              WorkerRunner.HB_INTERVAL)
        self.broker.ticker.start()
        self.target = None
        if _do_print:
            print("Broker started")
        return

    def _stop_broker(self):
        if self.broker:
            self.broker.ticker.stop()
            self.broker.ticker = None
            self.broker.socket.close()
            self.broker.close()
            self.broker = None
        if _do_print:
            print("Broker stopped")
        return

    def _tick(self):
        if self.broker and self.target:
            msg = [self.target, b'', b'MNPW01', b'\x04']
            self.broker.send_multipart(msg)
            if _do_print:
                print("Tick sent:", msg)
        return

    def send_req(self):
        data = [b'AA', b'bb']
        msg = [self.target, b'', b'MNPW01', b'\x02', self.target, b''] + data
        self.broker.send_multipart(msg)
        if _do_print:
            print('broker sent:', msg)
        return

    @staticmethod
    def stop_loop():
        IOLoop.instance().stop()
        return

    # Tests follow

    def test_simple_worker(self):
        """Test MNWorker simple req/reply.
        """
        self._start_broker()
        time.sleep(0.2)
        worker = WorkerRunner(self.context, self.endpoint, self.service)
        sender = DelayedCallback(self.send_req, 500)
        stopper = DelayedCallback(self.stop_loop, 2500)
        sender.start()
        stopper.start()
        IOLoop.instance().start()
        worker.shutdown()
        self._stop_broker()
        return
Esempio n. 55
0
class MDPBroker(object):
    """The MDP broker class.

    The broker routes messages from clients to appropriate workers based on the
    requested service.

    This base class defines the overall functionality and the API. Subclasses are
    ment to implement additional features (like logging).

    The broker uses ØMQ ROUTER sockets to deal witch clients and workers. These sockets
    are wrapped in pyzmq streams to fit well into IOLoop.

    .. note::

      The workers will *always* be served by the `main_ep` endpoint.

      In a two-endpoint setup clients will be handled via the `opt_ep`
      endpoint.

    :param context:    the context to use for socket creation.
    :type context:     zmq.Context
    :param main_ep:    the primary endpoint for workers and clients.
    :type main_ep:     str
    :param opt_ep:     is an optional 2nd endpoint.
    :type opt_ep:      str
    :param worker_q:   the class to be used for the worker-queue.
    :type worker_q:    class
    """

    CLIENT_PROTO = b'MDPC01'  #: Client protocol identifier
    WORKER_PROTO = b'MDPW01'  #: Worker protocol identifier

    def __init__(self, context, main_ep, opt_ep=None):
        """Init MDPBroker instance.
        """
        l = logger.Logger('mq_broker')
        self.log = l.get_logger()
        self.log.info("MDP broker startup...")

        socket = ZmqSocket(context, zmq.ROUTER)
        socket.bind(main_ep)
        self.main_stream = ZMQStream(socket)
        self.main_stream.on_recv(self.on_message)
        if opt_ep:
            socket = ZmqSocket(context, zmq.ROUTER)
            socket.bind(opt_ep)
            self.client_stream = ZMQStream(socket)
            self.client_stream.on_recv(self.on_message)
        else:
            self.client_stream = self.main_stream
        self.log.debug("Socket created...")
        self._workers = {}
        # services contain the worker queue and the request queue
        self._services = {}
        self._worker_cmds = {
            b'\x01': self.on_ready,
            b'\x03': self.on_reply,
            b'\x04': self.on_heartbeat,
            b'\x05': self.on_disconnect,
        }
        self.log.debug("Launch the timer...")
        self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL)
        self.hb_check_timer.start()
        self.log.info("MDP broker started")
        return

    def register_worker(self, wid, service):
        """Register the worker id and add it to the given service.

        Does nothing if worker is already known.

        :param wid:    the worker id.
        :type wid:     str
        :param service:    the service name.
        :type service:     str

        :rtype: None
        """
        self.log.debug(
            "Try to register a worker : wid={0}, service={1}".format(
                wid, service))
        try:
            if wid in self._workers:
                self.log.debug("Worker %s already registered" % service)
                return
            self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service,
                                           self.main_stream)
            if service in self._services:
                wq, wr = self._services[service]
                wq.put(wid)
            else:
                q = ServiceQueue()
                q.put(wid)
                self._services[service] = (q, [])
            self.log.info("Registered worker : wid={0}, service={1}".format(
                wid, service))
        except:
            self.log.error(
                "Error while registering a worker : wid={0}, service={1}, trace={2}"
                .format(wid, service, traceback.format_exc()))
        return

    def unregister_worker(self, wid):
        """Unregister the worker with the given id.

        If the worker id is not registered, nothing happens.

        Will stop all timers for the worker.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        self.log.debug("Try to unregister a worker : wid={0}".format(wid))
        try:
            try:
                wrep = self._workers[wid]
            except KeyError:
                # not registered, ignore
                self.log.warning(
                    "The worker wid={0} is not registered, ignoring the unregister request"
                    .format(wid))
                return
            wrep.shutdown()
            service = wrep.service
            if service in self._services:
                wq, wr = self._services[service]
                wq.remove(wid)
            del self._workers[wid]
            self.log.info("Unregistered worker : wid={0}".format(wid))
        except:
            self.log.error(
                "Error while unregistering a worker : wid={0}, trace={1}".
                format(wid, traceback.format_exc()))
        return

    def disconnect(self, wid):
        """Send disconnect command and unregister worker.

        If the worker id is not registered, nothing happens.

        :param wid:    the worker id.
        :type wid:     str

        :rtype: None
        """
        self.log.debug("Try to disconnect a worker : wid={0}".format(wid))
        try:
            try:
                wrep = self._workers[wid]
            except KeyError:
                # not registered, ignore
                self.log.warning(
                    "The worker wid={0} service={1} is not registered, ignoring the disconnect request"
                    .format(wid, wrep.service))
                return
            to_send = [wid, self.WORKER_PROTO, b'\x05']
            self.main_stream.send_multipart(to_send)
            self.log.info(
                "Request to unregister a worker : wid={0} service={1}".format(
                    wid, wrep.service))
        except:
            self.log.error(
                "Error while disconnecting a worker : wid={0}, trace={1}".
                format(wid, traceback.format_exc()))
        self.unregister_worker(wid)
        return

    def client_response(self, rp, service, msg):
        """Package and send reply to client.

        :param rp:       return address stack
        :type rp:        list of str
        :param service:  name of service
        :type service:   str
        :param msg:      message parts
        :type msg:       list of str

        :rtype: None
        """
        to_send = rp[:]
        to_send.extend([b'', self.CLIENT_PROTO, service])
        to_send.extend(msg)
        self.client_stream.send_multipart(to_send)
        return

    def shutdown(self):
        """Shutdown broker.

        Will unregister all workers, stop all timers and ignore all further
        messages.

        .. warning:: The instance MUST not be used after :func:`shutdown` has been called.

        :rtype: None
        """
        self.log.info("Shutdown starting...")
        try:
            self.log.debug("Closing the socket...")
            if self.client_stream == self.main_stream:
                self.client_stream = None
            self.main_stream.on_recv(None)
            self.main_stream.socket.setsockopt(zmq.LINGER, 0)
            self.main_stream.socket.close()
            self.main_stream.close()
            self.main_stream = None
            if self.client_stream:
                self.client_stream.on_recv(None)
                self.client_stream.socket.setsockopt(zmq.LINGER, 0)
                self.client_stream.socket.close()
                self.client_stream.close()
                self.client_stream = None
            self.log.debug("Clean workers and services...")
            self._workers = {}
            self._services = {}
        except:
            self.log.error("Error during shutdown : trace={0}".format(
                traceback.format_exc()))
        return

    def on_timer(self):
        """Method called on timer expiry.

        Checks which workers are dead and unregisters them.

        :rtype: None
        """
        self.log.debug("Check for dead workers...")
        for wrep in list(self._workers.values()):
            if not wrep.is_alive():
                self.log.info(
                    "A worker seems to be dead : wid={0} service={1}".format(
                        wrep.id, wrep.service))
                self.unregister_worker(wrep.id)
        return

    def on_ready(self, rp, msg):
        """Process worker READY command.

        Registers the worker for a service.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        self.register_worker(ret_id, msg[0])
        return

    def on_reply(self, rp, msg):
        """Process worker REPLY command.

        Route the `msg` to the client given by the address(es) in front of `msg`.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        # make worker available again
        try:
            wrep = self._workers[ret_id]
            service = wrep.service
            wq, wr = self._services[service]
            cp, msg = split_address(msg)
            self.client_response(cp, service, msg)
            wq.put(wrep.id)
            if wr:
                proto, rp, msg = wr.pop(0)
                self.on_client(proto, rp, msg)
        except KeyError:
            # unknown service
            self.disconnect(ret_id)
        return

    def on_heartbeat(self, rp, msg):
        """Process worker HEARTBEAT command.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        ret_id = rp[0]
        try:
            worker = self._workers[ret_id]
            if worker.is_alive():
                worker.on_heartbeat()
        except KeyError:
            # ignore HB for unknown worker
            pass
        return

    def on_disconnect(self, rp, msg):
        """Process worker DISCONNECT command.

        Unregisters the worker who sent this message.

        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        wid = rp[0]
        self.log.info("A worker disconnects itself : wid={0}".format(wid))
        self.unregister_worker(wid)
        return

    def on_mmi(self, rp, service, msg):
        """Process MMI request.

        For now only mmi.service is handled.

        :param rp:      return address stack
        :type rp:       list of str
        :param service: the protocol id sent
        :type service:  str
        :param msg:     message parts
        :type msg:      list of str

        :rtype: None
        """
        if service == b'mmi.service':
            s = msg[0]
            ret = b'404'
            for wr in list(self._workers.values()):
                if s == wr.service:
                    ret = b'200'
                    break
            self.client_response(rp, service, [ret])
        elif service == b'mmi.services':
            ret = []
            for wr in list(self._workers.values()):
                ret.append(wr.service)
            self.client_response(rp, service, [b', '.join(ret)])
        else:
            self.client_response(rp, service, [b'501'])
        return

    def on_client(self, proto, rp, msg):
        """Method called on client message.

        Frame 0 of msg is the requested service.
        The remaining frames are the request to forward to the worker.

        .. note::

           If the service is unknown to the broker the message is
           ignored.

        .. note::

           If currently no worker is available for a known service,
           the message is queued for later delivery.

        If a worker is available for the requested service, the
        message is repackaged and sent to the worker. The worker in
        question is removed from the pool of available workers.

        If the service name starts with `mmi.`, the message is passed to
        the internal MMI_ handler.

        .. _MMI: http://rfc.zeromq.org/spec:8

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:    return address stack
        :type rp:     list of str
        :param msg:   message parts
        :type msg:    list of str

        :rtype: None
        """
        service = msg.pop(0)
        if service.startswith(b'mmi.'):
            self.on_mmi(rp, service, msg)
            return
        try:
            wq, wr = self._services[service]
            wid = wq.get()
            if not wid:
                # no worker ready
                # queue message
                msg.insert(0, service)
                wr.append((proto, rp, msg))
                return
            wrep = self._workers[wid]
            to_send = [wrep.id, b'', self.WORKER_PROTO, b'\x02']
            to_send.extend(rp)
            to_send.append(b'')
            to_send.extend(msg)
            self.main_stream.send_multipart(to_send)
        except KeyError:
            # unknwon service
            # ignore request
            msg = "broker has no service {0}".format(service)
            print(msg)
            self.log.warning(msg)
        return

    def on_worker(self, proto, rp, msg):
        """Method called on worker message.

        Frame 0 of msg is the command id.
        The remaining frames depend on the command.

        This method determines the command sent by the worker and
        calls the appropriate method. If the command is unknown the
        message is ignored and a DISCONNECT is sent.

        :param proto: the protocol id sent
        :type proto:  str
        :param rp:  return address stack
        :type rp:   list of str
        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        cmd = msg.pop(0)
        if cmd in self._worker_cmds:
            fnc = self._worker_cmds[cmd]
            fnc(rp, msg)
        else:
            # ignore unknown command
            # DISCONNECT worker
            self.log.warning(
                "Unknown command from worker (it will be disconnect) : wid={0}, cmd={1}"
                .format(rp[0], cmd))
            self.disconnect(rp[0])
        return

    def on_message(self, msg):
        """Processes given message.

        Decides what kind of message it is -- client or worker -- and
        calls the appropriate method. If unknown, the message is
        ignored.

        :param msg: message parts
        :type msg:  list of str

        :rtype: None
        """
        self.log.debug("Message received: {0}".format(msg))
        rp, msg = split_address(msg)
        # dispatch on first frame after path
        t = msg.pop(0)
        if t.startswith(b'MDPW'):
            self.on_worker(t, rp, msg)
        elif t.startswith(b'MDPC'):
            self.on_client(t, rp, msg)
        else:
            self.log.warning("Broker unknown Protocol: {0}".format(t))
        return
Esempio n. 56
0
class LRUQueue(object):
    """LRUQueue class using ZMQStream/IOLoop for event dispatching"""
    def __init__(self, backend_socket, frontend_socket):
        self.available_workers = 0
        self.workers = []
        self.client_nbr = NBR_CLIENTS

        self.backend = ZMQStream(backend_socket)
        self.frontend = ZMQStream(frontend_socket)
        self.backend.on_recv(self.handle_backend)

        self.loop = IOLoop.instance()

    def handle_backend(self, msg):
        # Queue worker address for LRU routing
        worker_addr, empty, client_addr = msg[:3]

        assert self.available_workers < NBR_WORKERS

        # add worker back to the list of workers
        self.available_workers += 1
        self.workers.append(worker_addr)

        #   Second frame is empty
        assert empty == b""

        # Third frame is READY or else a client reply address
        # If client reply, send rest back to frontend
        if client_addr != b"READY":
            empty, reply = msg[3:]

            # Following frame is empty
            assert empty == b""

            self.frontend.send_multipart([client_addr, b'', reply])

            self.client_nbr -= 1

            if self.client_nbr == 0:
                # Exit after N messages
                self.loop.add_timeout(time.time() + 1, self.loop.stop)

        if self.available_workers == 1:
            # on first recv, start accepting frontend messages
            self.frontend.on_recv(self.handle_frontend)

    def handle_frontend(self, msg):
        # Now get next client request, route to LRU worker
        # Client request is [address][empty][request]
        client_addr, empty, request = msg

        assert empty == b""

        #  Dequeue and drop the next worker address
        self.available_workers -= 1
        worker_id = self.workers.pop()

        self.backend.send_multipart(
            [worker_id, b'', client_addr, b'', request])
        if self.available_workers == 0:
            # stop receiving until workers become available again
            self.frontend.stop_on_recv()
Esempio n. 57
0
class ZmqStreamlet(Zmqlet):
    """A :class:`ZmqStreamlet` object can send/receive data to/from ZeroMQ stream and invoke callback function. It
    has three sockets for input, output and control.

    .. warning::
        Starting from v0.3.6, :class:`ZmqStreamlet` replaces :class:`Zmqlet` as one of the key components in :class:`jina.peapods.runtime.BasePea`.
        It requires :mod:`tornado` and :mod:`uvloop` to be installed.
    """

    def register_pollin(self):
        """Register :attr:`in_sock`, :attr:`ctrl_sock` and :attr:`out_sock` in poller."""
        with ImportExtensions(required=True):
            import tornado.ioloop

            get_or_reuse_loop()
            self.io_loop = tornado.ioloop.IOLoop.current()
        self.in_sock = ZMQStream(self.in_sock, self.io_loop)
        self.out_sock = ZMQStream(self.out_sock, self.io_loop)
        self.ctrl_sock = ZMQStream(self.ctrl_sock, self.io_loop)
        self.in_sock.stop_on_recv()

    def close(self):
        """Close all sockets and shutdown the ZMQ context associated to this `Zmqlet`.

        .. note::
            This method is idempotent.
        """
        if not self.is_closed:
            # wait until the close signal is received
            time.sleep(0.01)
            for s in self.opened_socks:
                s.flush()
            super().close()
            if hasattr(self, 'io_loop'):
                try:
                    self.io_loop.stop()
                    # Replace handle events function, to skip
                    # None event after sockets are closed.
                    if hasattr(self.in_sock, '_handle_events'):
                        self.in_sock._handle_events = lambda *args, **kwargs: None
                    if hasattr(self.out_sock, '_handle_events'):
                        self.out_sock._handle_events = lambda *args, **kwargs: None
                    if hasattr(self.ctrl_sock, '_handle_events'):
                        self.ctrl_sock._handle_events = lambda *args, **kwargs: None
                except AttributeError as e:
                    self.logger.error(f'failed to stop. {e!r}')

    def pause_pollin(self):
        """Remove :attr:`in_sock` from the poller """
        self.in_sock.stop_on_recv()

    def resume_pollin(self):
        """Put :attr:`in_sock` back to the poller """
        self.in_sock.on_recv(self._in_sock_callback)

    def start(self, callback: Callable[['Message'], 'Message']):
        """
        Open all sockets and start the ZMQ context associated to this `Zmqlet`.

        :param callback: callback function to receive the protobuf message
        """

        def _callback(msg, sock_type):
            msg = _parse_from_frames(sock_type, msg)
            self.bytes_recv += msg.size
            self.msg_recv += 1

            msg = callback(msg)

            if msg:
                self.send_message(msg)

        self._in_sock_callback = lambda x: _callback(x, self.in_sock_type)
        self.in_sock.on_recv(self._in_sock_callback)
        self.ctrl_sock.on_recv(lambda x: _callback(x, self.ctrl_sock_type))
        if self.out_sock_type == zmq.ROUTER:
            self.out_sock.on_recv(lambda x: _callback(x, self.out_sock_type))
        self.io_loop.start()
        self.io_loop.clear_current()
        self.io_loop.close(all_fds=True)
Esempio n. 58
0
class ControlledProcess(mp.Process):
    """
    Process subclass with control port.

    Parameters
    ----------
    port_ctrl : int
        Network port for receiving control messages.
    quit_sig : int
        OS signal to use when quitting the proess.
    id : str
        Unique object identifier. Used for communication and logging.

    See Also
    --------
    multiprocessing.Process
    """
    def __init__(self, port_ctrl, id, *args, **kwargs):

        # Unique object identifier:
        self.id = id

        # Logging:
        self.logger = twiggy.log.name(self.id)

        # Control port:
        self.port_ctrl = port_ctrl

        # Flag to use when stopping the process:
        self.running = False

        super(ControlledProcess, self).__init__(*args, **kwargs)

    def _ctrl_handler(self, msg):
        """
        Control port handler.
        """

        self.logger.info('recv: %s' % str(msg))
        if msg[0] == 'quit':
            try:
                self.stream_ctrl.flush()
                self.stream_ctrl.stop_on_recv()
                self.ioloop_ctrl.stop()
            except IOError:
                self.logger.info('streams already closed')
            except Exception as e:
                self.logger.info('other error occurred: ' + e.message)
            self.running = False

    def _init_ctrl_handler(self):
        """
        Initialize control port handler.
        """

        # Set the linger period to prevent hanging on unsent messages
        # when shutting down:
        self.logger.info('initializing ctrl handler')
        self.sock_ctrl = self.zmq_ctx.socket(zmq.DEALER)
        self.sock_ctrl.setsockopt(zmq.IDENTITY, self.id)
        self.sock_ctrl.setsockopt(zmq.LINGER, LINGER_TIME)
        self.sock_ctrl.connect('tcp://localhost:%i' % self.port_ctrl)

        self.stream_ctrl = ZMQStream(self.sock_ctrl, self.ioloop_ctrl)
        self.stream_ctrl.on_recv(self._ctrl_handler)

    def _init_net(self, event_thread=True):
        """
        Initialize network connection.

        Parameters
        ----------
        event_thread : bool
            If True, start the control event loop in a new thread.
        """

        # Set up zmq context and event loop:
        self.zmq_ctx = zmq.Context()
        self.ioloop_ctrl = IOLoop.instance()

        # Set up event loop handlers:
        self._init_ctrl_handler()

        # Start event loop:
        if event_thread:
            th.Thread(target=self.ioloop_ctrl.start).start()
        else:
            self.ioloop_ctrl.start()

    def run(self):
        """
        Body of process.
        """

        self._init_net()
        self.running = True
        while True:
            self.logger.info('idling')
            if not self.running:
                self.logger.info('stopping run loop')
                break
        self.logger.info('done')
Esempio n. 59
0
class ZMQPubSub(PubSubBase):
    def __init__(self,
                 device_ip='127.0.0.1',
                 fport=5559,
                 bport=5560,
                 *args,
                 **kwargs):
        self.channels = list()
        self.device_ip = '127.0.0.1'
        self.fport = fport
        self.bport = bport
        super(ZMQPubSub, self).__init__(*args, **kwargs)

    ##
    ## pubsub api
    ##

    def connect(self):
        self.context = zmq.Context()
        self.socket = self.context.socket(zmq.SUB)
        self.socket.connect('tcp://%s:%s' % (self.device_ip, self.bport))
        self.stream = ZMQStream(self.socket)
        self.stream.on_recv(self.on_streaming_data)
        self.connected()

    def disconnect(self):
        self.disconnected()

    def subscribe(self, channel_id):
        self.socket.setsockopt(zmq.SUBSCRIBE, str(channel_id))
        self.channels.append(channel_id)
        self.subscribed(channel_id)

    def unsubscribe(self, channel_id=None):
        channels = [channel_id] if channel_id else self.channels
        for channel_id in channels:
            self.socket.setsockopt(zmq.UNSUBSCRIBE, str(channel_id))
            self.unsubscribed(channel_id)
            self.channels.remove(channel_id)

    @staticmethod
    def publish(channel_id, message, device_ip='127.0.0.1', fport=5559):
        context = zmq.Context()
        socket = context.socket(zmq.PUSH)
        socket.connect('tcp://%s:%s' % (device_ip, fport))
        socket.send_unicode('%s %s' % (channel_id, message))

    ##
    ## other methods
    ##

    def on_streaming_data(self, data):
        for l in data:
            reply = l.split(' ', 1)
            self.on_message(reply[0], reply[1])

    @staticmethod
    def start_service(fport=5559, bport=5560):
        try:
            context = zmq.Context(1)

            frontend = context.socket(zmq.PULL)
            frontend.bind('tcp://*:%s' % fport)

            backend = context.socket(zmq.PUB)
            backend.bind('tcp://*:%s' % bport)

            logger.info('starting zmq device')
            zmq.device(zmq.FORWARDER, frontend, backend)
        except KeyboardInterrupt:
            pass
        except Exception as e:
            logger.exception(e)
        finally:
            frontend.close()
            backend.close()
            context.term()
Esempio n. 60
0
 def setup_zmq(self, url):
     zmq_socket = self.context.socket(zmq.REP)
     zmq_socket.bind(url)
     zmq_stream = ZMQStream(zmq_socket)
     zmq_stream.on_recv(self.handle_zmq)
     return zmq_socket, zmq_stream, url