def run(self): if self.filename == 'sys.stdout': self.file = sys.stdout elif self.filename == 'sys.stderr': self.file = sys.stderr else: self.file = open(self.filename, 'a+b') ioloop.install() loop = ioloop.IOLoop.instance() stream = None try: context = zmq.Context() socket = context.socket(zmq.SUB) socket.connect(self.addr) socket.setsockopt(zmq.SUBSCRIBE, '') stream = ZMQStream(socket) except Exception, err: print self.name, 'error getting outstream:', err exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback) traceback.print_tb(exc_traceback, limit=30) if stream and not stream.closed(): stream.close()
class DeviceServicePublisher(object): def __init__(self, context, publisher_endpoint): self.context = context self.publisher_endpoint = publisher_endpoint socket = self.context.socket(zmq.PUB) ioloop = IOLoop.instance() self.publisher = ZMQStream(socket, ioloop) self.publisher.socket.setsockopt(zmq.LINGER, 0) self.publisher.bind(self.publisher_endpoint) return def shutdown(self): self.publisher.socket.close() self.publisher.close() self.publisher = None return def send(self, msg, topic='all'): pub_msg = [] pub_msg.append(topic) pub_msg.append(msg) self.publisher.send_multipart(pub_msg) return
class WebSocketZMQBridgeHandler(websocket.WebSocketHandler): def open(self, *args, **kwargs): self.currentMessage = [] self.__endpoint = 'tcp://localhost:224' socket = zmq.Context().socket(zmq.DEALER) self.__stream = ZMQStream(socket, IOLoop.current()) self.__stream.on_recv(self.__onReceive) self.__stream.socket.setsockopt(zmq.LINGER, 0) self.__stream.connect(self.__endpoint) def on_close(self, *args, **kwargs): self.__stream.close() def on_message(self, message): hasMore = message[0] self.currentMessage.append(message[1:]) if not hasMore: sendingMessage = self.currentMessage self.currentMessage = [] self.__stream.send_multipart(sendingMessage) def __onReceive(self, msg): for frame in msg[:-1]: self.write_message(b'\x01' + frame, binary=True) self.write_message(b'\x00' + msg[-1], binary=True)
class SocketConnection(sockjs.tornado.SockJSConnection): clients = set() def on_open(self, request): self.clients.add(self) subscriber = context.socket(zmq.SUB) subscriber.connect("tcp://localhost:%s" % str(ZMQ_PORT)) subscriber.setsockopt(zmq.SUBSCRIBE, '') self.subscribe_stream = ZMQStream(subscriber) self.subscribe_stream.on_recv(self.on_message_published) def on_message(self, message): logging.info('message received, publish it to %d clients' % len(self.clients)) publish_stream.send_unicode(message) def on_message_published(self, message): logging.info('client received new published message') self.send(message) def on_close(self): self.clients.remove(self) # Properly close ZMQ socket self.subscribe_stream.close()
class ManagerControlled(object): def __init__(self, *args, **kwargs): self.context = Context.instance() self.loop = IOLoop.instance() self.control_socket = self.context.socket(SUB) self.control_socket.setsockopt(LINGER, 0) # discard unsent messages on close self.control_socket.setsockopt(SUBSCRIBE, '') self.control_socket.connect('tcp://{}:{}'.format(MANAGER_PUB_ADDRESS, MANAGER_PUB_PORT)) self.control_stream = ZMQStream(self.control_socket, self.loop) self.control_stream.on_recv_stream(self.control_handler) def control_handler(self, stream, message_list): for message in message_list: try: notification, data = message.split() except ValueError: notification = message if notification == NOTIFICATION_PROCESS_STOP: self.stop() def stop(self): self.control_stream.stop_on_recv() self.control_stream.close() self.control_socket.close()
class Publisher(object): def __init__(self, context, pub_endpoint): self.context = context self.pub_endpoint = pub_endpoint socket = self.context.socket(zmq.PUB) ioloop = IOLoop.instance() self.publisher = ZMQStream(socket, ioloop) self.publisher.socket.setsockopt(zmq.LINGER, 0) self.publisher.bind(self.pub_endpoint) return def shutdown(self): self.publisher.socket.unbind(self.pub_endpoint) self.publisher.socket.close() self.publisher.close() self.publisher = None return def send(self, msg): logger.debug("Publisher sending: {0}".format(msg)) self.publisher.send_multipart(msg) return
class Client(object): def __init__(self): self.stream = None self.result = None def connect(self, port): context = zmq.Context() socket = context.socket(zmq.REQ) socket.connect(port) self.stream = ZMQStream(socket) def __getattr__(self, item): def wrapper(*args): request = [item] for param in args: request.append(param) return self._run(tuple(request)) return wrapper def _run(self, request): def on_response(message): response = msgpack.unpackb(message[0], use_list=False) if response[0] == 'OK': self.result = response[1] elif response[0] == 'ERR': raise Exception(response[2]) ZMQIOLoop.instance().stop() self.stream.send(msgpack.packb(request)) self.stream.on_recv(on_response) ZMQIOLoop.instance().start() return self.result def disconnect(self): self.stream.close()
class SocketConnection(sockjs.tornado.SockJSConnection): clients = set() def on_open(self, request): self.clients.add(self) subscriber = context.socket(zmq.SUB) subscriber.connect("tcp://localhost:%s" % str(ZMQ_PORT)) subscriber.setsockopt(zmq.SUBSCRIBE, '') self.subscribe_stream = ZMQStream(subscriber) self.subscribe_stream.on_recv(self.on_message_published) def on_message(self, message): logging.info( 'message received, publish it to %d clients' % len(self.clients) ) publish_stream.send_unicode(message) def on_message_published(self, message): logging.info('client received new published message') self.send(message) def on_close(self): self.clients.remove(self) # Properly close ZMQ socket self.subscribe_stream.close()
class AsyncCircusClient(object): def __init__(self, context=None, endpoint=DEFAULT_ENDPOINT_DEALER, timeout=5.0, ssh_server=None, ssh_keyfile=None): self._init_context(context) self.endpoint = endpoint self._id = b(uuid.uuid4().hex) self.socket = self.context.socket(zmq.DEALER) self.socket.setsockopt(zmq.IDENTITY, self._id) self.socket.setsockopt(zmq.LINGER, 0) get_connection(self.socket, endpoint, ssh_server, ssh_keyfile) self._timeout = timeout self.timeout = timeout * 1000 self.stream = ZMQStream(self.socket, tornado.ioloop.IOLoop.instance()) def _init_context(self, context): self.context = context or zmq.Context.instance() def stop(self): self.stream.stop_on_recv() # only supported by libzmq >= 3 if hasattr(self.socket, 'disconnect'): self.socket.disconnect(self.endpoint) self.stream.close() def send_message(self, command, **props): return self.call(make_message(command, **props)) @tornado.gen.coroutine def call(self, cmd): if isinstance(cmd, string_types): raise DeprecationWarning('call() takes a mapping') call_id = uuid.uuid4().hex cmd['id'] = call_id try: cmd = json.dumps(cmd) except ValueError as e: raise CallError(str(e)) try: yield tornado.gen.Task(self.stream.send, cmd) except zmq.ZMQError as e: raise CallError(str(e)) while True: messages = yield tornado.gen.Task(self.stream.on_recv) for message in messages: try: res = json.loads(message) if res.get('id') != call_id: # we got the wrong message continue raise tornado.gen.Return(res) except ValueError as e: raise CallError(str(e))
class AsyncCircusClient(object): def __init__(self, context=None, endpoint=DEFAULT_ENDPOINT_DEALER, timeout=5.0, ssh_server=None, ssh_keyfile=None): self._init_context(context) self.endpoint = endpoint self._id = b(uuid.uuid4().hex) self.socket = self.context.socket(zmq.DEALER) self.socket.setsockopt(zmq.IDENTITY, self._id) self.socket.setsockopt(zmq.LINGER, 0) get_connection(self.socket, endpoint, ssh_server, ssh_keyfile) self._timeout = timeout self.timeout = timeout * 1000 self.stream = ZMQStream(self.socket, tornado.ioloop.IOLoop.instance()) def _init_context(self, context): self.context = context or zmq.Context.instance() def stop(self): self.stream.stop_on_recv() # only supported by libzmq >= 3 if hasattr(self.socket, 'disconnect'): self.socket.disconnect(self.endpoint) self.stream.close() @tornado.gen.coroutine def send_message(self, command, **props): res = yield self.call(make_message(command, **props)) raise tornado.gen.Return(res) @tornado.gen.coroutine def call(self, cmd): if isinstance(cmd, string_types): raise DeprecationWarning('call() takes a mapping') call_id = uuid.uuid4().hex cmd['id'] = call_id try: cmd = json.dumps(cmd) except ValueError as e: raise CallError(str(e)) try: yield tornado.gen.Task(self.stream.send, cmd) except zmq.ZMQError as e: raise CallError(str(e)) while True: messages = yield tornado.gen.Task(self.stream.on_recv) for message in messages: try: res = json.loads(message) if res.get('id') != call_id: # we got the wrong message continue raise tornado.gen.Return(res) except ValueError as e: raise CallError(str(e))
class AppClient(object): def __init__(self): self.ctx = zmq.Context() self.loop = IOLoop.instance() self.endpoint = "tcp://127.0.0.1:5556" self.client = self.ctx.socket(zmq.DEALER) self.client.setsockopt( zmq.LINGER, 0 ) # Without linger and timeouts you might have problems when closing context self.client.setsockopt(zmq.RCVTIMEO, 5000) # 5s self.client.setsockopt(zmq.SNDTIMEO, 5000) print("Connecting to", self.endpoint) self.client.connect(self.endpoint) self.client = ZMQStream(self.client) self.client.on_recv(self.on_recv) self.periodic = PeriodicCallback(self.periodictask, 1000) self.last_recv = None def disconnect(self): if self.ctx is not None: try: self.periodic.stop() print("Closing socket and context") self.client.close() self.ctx.term() except Exception as e: print(e) def periodictask(self): if self.client is None: return if not self.last_recv or self.last_recv + timedelta( seconds=5) < datetime.utcnow(): print("No data from remote (5s)... [ping]") print("Sending HELLO to server") msg = HelloMessage() msg.send(self.client) def start(self): try: self.periodic.start() self.loop.start() msg = HelloMessage() msg.send(self.client) except KeyboardInterrupt: print("\n\nCtrl+C detected\n") except Exception as E: print("Error detected") print(str(E)) finally: self.disconnect() def on_recv(self, msg): self.last_recv = datetime.utcnow() print("Received a message of type %s from server!" % msg[0])
class MainHandler(websocket.WebSocketHandler): _first = True @property def ref(self): return id(self) def initialize(self): print "WebSocket initialize" self.push_socket = ctx.socket(zmq.PUSH) self.sub_socket = ctx.socket(zmq.SUB) self.push_socket.connect("ipc:///tmp/ws_push") #self.sub_socket.connect("ipc:///tmp/ws_sub") self.sub_socket.bind("ipc:///tmp/ws_sub") self.sub_socket.setsockopt(zmq.SUBSCRIBE, "") self.zmq_stream = ZMQStream(self.sub_socket) self.zmq_stream.on_recv(self.zmq_msg_recv) def open(self, *args, **kwargs): print "WebSocket opened", args, kwargs def on_message(self, message): print "WebSocket on_message" if self._first: msg = {'message': message, 'id':self.ref, 'action':'connect'} self._first = False else: msg = {'message': message, 'id':self.ref, 'action':'message'} self.push_socket.send_pyobj(msg) def on_close(self): print "WebSocket closed" msg = {'message': '', 'id': id(self), 'action': 'close'} self.push_socket.send_pyobj(msg) self.zmq_stream.close() self.sub_socket.close() self.push_socket.close() def zmq_msg_recv(self, data): print "zmq_msg_recv: %s" % repr(data) for message in data: message = pickle.loads(message) #_id, _msg = message['id'], message['message'] print ' = ', repr(message) #if _id != self.ref: # continue #self.write_message(_msg) self.write_message(json.dumps(message, indent=4))
class Worker(object): def __init__(self, robot, data_in_sock='ipc:///tmp/robot-data-m2w.sock', data_out_sock='ipc:///tmp/robot-data-w2m.sock', msg_in_sock='ipc:///tmp/robot-msg-m2w.sock', msg_out_sock='ipc:///tmp/robot-msg-w2m.sock', io_loop=None): self.identity = 'worker:%s:%s' % (socket.gethostname(), os.getpid()) context = zmq.Context() self._io_loop = io_loop or IOLoop.instance() self._in_socket = context.socket(zmq.PULL) self._in_socket.connect(data_in_sock) self._in_stream = ZMQStream(self._in_socket, io_loop) self._out_socket = context.socket(zmq.PUB) self._out_socket.connect(data_out_sock) self._out_stream = ZMQStream(self._out_socket, io_loop) self._running = False self.robot = robot self.robot.set_worker_identity(self.identity) self.messenger = ClientMessenger(msg_in_sock, msg_out_sock, context, io_loop) def start(self): logging.info('[%s] starting', self.identity) self.messenger.start() self.messenger.publish(CTRL_MSG_WORKER, self.identity, CTRL_MSG_WORKER_ONLINE) self._in_stream.on_recv(self._on_receive_request) self._running = True def stop(self): self._running = False self.messenger.stop() def close(self): self._in_stream.close() self._in_socket.close() self._out_stream.close() self._out_socket.close() self.messenger.close() def _on_receive_request(self, zmq_msg): msg = RequestMessage.deserialize(zmq_msg) request = msg.request logging.debug('[%s] receive request(%s)', self.identity, request.url) self.robot.fetch(request)
class AppServer(object): def __init__(self): self.listen = "127.0.0.1" self.port = 5556 self.ctx = zmq.Context() self.loop = IOLoop.instance() self.client_identities = {} self.server = self.ctx.socket(zmq.ROUTER) self.server.setsockopt( zmq.LINGER, 0 ) # Without linger and timeouts you might have problems when closing context self.server.setsockopt(zmq.RCVTIMEO, 5000) # 5s self.server.setsockopt(zmq.SNDTIMEO, 5000) bind_addr = "tcp://%s:%s" % (self.listen, self.port) self.server.bind(bind_addr) print("Server listening for new client connections at", bind_addr) self.server = ZMQStream(self.server) self.server.on_recv(self.on_recv) self.periodic = PeriodicCallback(self.periodictask, 1000) def start(self): self.periodic.start() try: self.loop.start() except KeyboardInterrupt: self.periodic.stop() print("\nClosing socket and context\n") self.server.close() self.ctx.term() def periodictask(self): stale_clients = [] for client_id, last_seen in self.client_identities.items(): if last_seen + timedelta(seconds=10) < datetime.utcnow(): stale_clients.append(client_id) else: msg = HelloMessage() msg.send(self.server, client_id) for client_id in stale_clients: print( "\nHaven't received a HELO from client %s recently. Dropping from list of connected clients." % client_id) del self.client_identities[client_id] sys.stdout.write(".") sys.stdout.flush() def on_recv(self, msg): identity = msg[ 0] # This contains client id (socket handle), use to reply it back self.client_identities[identity] = datetime.utcnow() msg_type = msg[1] print("Received message of type %s from client ID %s!" % (msg_type, identity))
def test_that_creating_mgmt_works(self): ctx = zmq.Context() io_loop = IOLoop.instance() def stop_looping(_msg): io_loop.stop() settings = Settings() settings.ZEROMQ_MASTER_PUSH = 'inproc://spyder-zmq-master-push' settings.ZEROMQ_WORKER_PROC_FETCHER_PULL = \ settings.ZEROMQ_MASTER_PUSH settings.ZEROMQ_MASTER_SUB = 'inproc://spyder-zmq-master-sub' settings.ZEROMQ_WORKER_PROC_EXTRACTOR_PUB = \ settings.ZEROMQ_MASTER_SUB settings.ZEROMQ_MGMT_MASTER = 'inproc://spyder-zmq-mgmt-master' settings.ZEROMQ_MGMT_WORKER = 'inproc://spyder-zmq-mgmt-worker' pubsocket = ctx.socket(zmq.PUB) pubsocket.bind(settings.ZEROMQ_MGMT_MASTER) pub_stream = ZMQStream(pubsocket, io_loop) subsocket = ctx.socket(zmq.SUB) subsocket.setsockopt(zmq.SUBSCRIBE, "") subsocket.bind(settings.ZEROMQ_MGMT_WORKER) sub_stream = ZMQStream(subsocket, io_loop) mgmt = workerprocess.create_worker_management(settings, ctx, io_loop) mgmt.add_callback(ZMQ_SPYDER_MGMT_WORKER, stop_looping) mgmt.start() def assert_quit_message(msg): self.assertEqual(ZMQ_SPYDER_MGMT_WORKER_QUIT_ACK, msg.data) sub_stream.on_recv(assert_quit_message) death = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER, data=ZMQ_SPYDER_MGMT_WORKER_QUIT) pub_stream.send_multipart(death.serialize()) io_loop.start() mgmt._out_stream.close() mgmt._in_stream.close() mgmt._publisher.close() mgmt._subscriber.close() pub_stream.close() pubsocket.close() sub_stream.close() subsocket.close() ctx.term()
class MultiplexPubSub(object): def __init__(self): self.callbacks = set() def add_callback(self, callback): self.callbacks.add(callback) def remove_callback(self, callback): self.callbacks.remove(callback) @tornado.gen.coroutine def on_recv(self, data): data[2] = json.loads(data[2]) if data[2] == 1: data[2] = {"value": data[3]} if not "timestamp" in data[2]: data[2]['timestamp'] = time.time() # print("callback:",data,self.callbacks) for callback in self.callbacks: try: callback(data) except WebSocketClosedError: print("Closed by WebSocketClosedError!") self.callbacks.remove(callback) data.on_close() def connect(self): self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.connect('tcp://house-nas:10900') self.send_socket = self.context.socket(zmq.PUB) self.send_socket.connect('tcp://house-nas:10901') print("socket connect:", self.socket, self.on_recv) self.stream = ZMQStream(self.socket) self.stream.on_recv(self.on_recv) def send(self, data): self.send_socket.send_multipart([str(i) for i in json.loads(data)]) def subscribe(self, channel_id): print("subscribe:", channel_id) self.socket.setsockopt(zmq.SUBSCRIBE, channel_id) def close(self): self.stream.close() print self.socket.close()
def open(self): stream = None try: context = zmq.Context() socket = context.socket(zmq.SUB) socket.connect(self.addr) socket.setsockopt(zmq.SUBSCRIBE, '') stream = ZMQStream(socket) except Exception, err: exc_type, exc_value, exc_traceback = sys.exc_info() print 'ZMQStreamHandler ERROR getting ZMQ stream:', err traceback.print_exception(exc_type, exc_value, exc_traceback) if stream and not stream.closed(): stream.close()
class ZMQDownlinkConnector: def __init__(self, connect_addr): self._zmq_sub_socket = zmq.Context.instance().socket(zmq.SUB) self._zmq_sub_socket.setsockopt(zmq.RCVHWM, 0) self._zmq_sub_socket.connect(connect_addr) self._zmq_sub_stream = ZMQStream(self._zmq_sub_socket) self._zmq_sub_socket.setsockopt_string( zmq.SUBSCRIBE, '') # Subscribing to every message def register_callback(self, callback): self._zmq_sub_stream.on_recv(callback) def close(self): self._zmq_sub_stream.close()
class Subscriber(object): u""" 本地订阅者 订阅topic匹配关系 订阅 ehr:api:1 匹配消息 'ehr', 'ehr:api', 'ehr:api:1' 实现直连推送,或者广播 """ def __init__(self, callback): self.callback = callback self.topic = '' self.sock = context.socket(zmq.SUB) self.sock.connect('inproc:///tmp/hub_{}'.format(pid)) self.stream = ZMQStream(self.sock) self.stream.on_recv(self.recv) def subscribe(self, topic): if not isinstance(topic, basestring) or not topic: return self.topic = topic self.sock.setsockopt(zmq.SUBSCRIBE, str(topic.split(':')[0])) def unsubscribe(self): if self.topic: self.sock.setsockopt(zmq.UNSUBSCRIBE, str(self.topic.split(':')[0])) self.topic = '' def recv(self, msg): _, body = msg try: data = json.loads(body) topic = data.get('topic', '') if not topic: return if re.match(r'^{}(:.+)?$'.format(topic), self.topic): self.callback(body) except: pass def close(self): self.topic = None self.callback = None self.stream.close()
class _Messenger(object): def __init__(self, in_sock, out_sock, context, io_loop=None): self._context = context self._io_loop = io_loop or IOLoop.instance() self._create_socket(in_sock, out_sock) self._in_stream = ZMQStream(self._in_socket, io_loop) self._out_stream = ZMQStream(self._out_socket, io_loop) self._callbacks = defaultdict(list) def _create_socket(self, in_sock, out_sock): raise NotImplementedError() def start(self): self._in_stream.on_recv(self._on_receive) def stop(self): self._in_stream.stop_on_recv() # self._publish(CTRL_MSG_WORKER, None, CTRL_MSG_WORKER_QUIT_ACK) # def close(self): self._in_stream.close() self._in_socket.close() self._out_stream.close() self._out_socket.close() def _on_receive(self, zmq_msg): msg = CtrlMessage.deserialize(zmq_msg) if msg.topic in self._callbacks: for callback in self._callbacks[msg.topic]: callback(msg) # if msg.data == CTRL_MSG_WORKER_QUIT: # self.stop() def add_callback(self, topic, callback): self._callbacks[topic].append(callback) def remove_callback(self, topic, callback): if topic in self._callbacks and callback in self._callbacks[topic]: self._callbacks[topic].remove(callback) def publish(self, topic, identity, data): msg = CtrlMessage(topic, identity, data) self._out_stream.send_multipart(msg.serialize())
def open(self): self.time_opened = time.time() stream = None try: context = zmq.Context() socket = context.socket(zmq.SUB) socket.connect(self.addr) socket.setsockopt(zmq.SUBSCRIBE, '') stream = ZMQStream(socket) except Exception, err: print 'Error getting ZMQ stream:', err exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback) traceback.print_tb(exc_traceback, limit=30) if stream and not stream.closed(): stream.close()
class Msglet(object): def __init__(self, callback): self.callback = callback self.bind() def bind(self): self.socket = ctx.socket(zmq.SUB) self.socket.connect(config['protocol']) self.stream = ZMQStream(self.socket) self.stream.on_recv(self.callback) def subscribe(self, channel): self.socket.setsockopt_string(zmq.SUBSCRIBE, channel) def close(self): self.stream.close()
def open(self): self.time_opened = time.time() stream = None try: context = zmq.Context() socket = context.socket(zmq.SUB) socket.connect(self.addr) socket.setsockopt(zmq.SUBSCRIBE, "") stream = ZMQStream(socket) except Exception, err: print "Error getting ZMQ stream:", err exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback) traceback.print_tb(exc_traceback, limit=30) if stream and not stream.closed(): stream.close()
class AsyncReciever(Reciever): """ similar to Reciever, except you can bind a callback using the on_recieve method that will fire as soon as the socket recieves a message. See `on_recieve` """ def __init__(self, socket, origin=''): super(type(self), self).__init__(socket, origin) self.stream = ZMQStream(self.socket) def on_recieve(self, callback): """ Sets `callback` to be run whenever this receiver gets a message. the callback is a runnable that takes one parameter: a message You may set callback to None to pause message handling. """ def zmq_cb(multipart): message = self.parse_zeromq_parts(multipart) return callback(message) if callback is None: zmq_cb = None self.stream.on_recv(zmq_cb) def close(self): """ close the ZeroMQ socket stream """ self.on_recieve(None) return self.stream.close()
class ZMQPubSub(object): def __init__(self, callback): self.callback = callback def connect(self): self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.connect('tcp://127.0.0.1:6666') self.stream = ZMQStream(self.socket) self.stream.on_recv(self.callback) def subscribe(self): self.socket.setsockopt_string(zmq.SUBSCRIBE, "") def disconnect(self): self.stream.close() self.socket.close()
class LiveLogHandler(WebSocketHandler): def open(self, network, channel): self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.connect("tcp://127.0.0.1:{0}".format(config.ZEROMQ_PORT)) self.socket.setsockopt(zmq.SUBSCRIBE, "{0}~{1}".format(network,channel)) self.zmq_stream = ZMQStream(self.socket) self.zmq_stream.on_recv(self.on_zmq_msg_receive) def on_close(self): self.zmq_stream.close() self.socket.close() def on_zmq_msg_receive(self, data): data = data[0] lines = data.split("\n")[1:] self.write_message("\n".join([irc_format(line) for line in lines]))
class CommandConnection(SockJSConnection): """ Take commands and return responses. """ def on_open(self, info): print "CommandConnection:", info socket = self.context.socket(zmq.DEALER) socket.connect() self.stream = ZMQStream(socket) self.stream.on_recv(self.on_recv_zmq) def on_close(self): self.stream.close() def on_recv_zmq(self, message): self.send(message)
class ZMQPubSub(object): def __init__(self, callback): self.callback = callback def connect(self): self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.connect('tcp://house-nas:10900') print("socket connect:", self.socket) self.stream = ZMQStream(self.socket) self.stream.on_recv(self.callback) def subscribe(self, channel_id): self.socket.setsockopt(zmq.SUBSCRIBE, channel_id) def close(self): self.stream.close() print self.socket.close()
class ZMQCameraPubSub(object): def __init__(self, callback): self.callback = callback self.name = "".join( random.choice(string.ascii_lowercase + string.digits) for x in range(6)) def connect(self, stream): self.context = zmq.Context() self.subscriber = self.context.socket(zmq.SUB) # self.subscriber.setsockopt(zmq.RCVHWM, 1) # self.subscriber.setsockopt(zmq.RCVBUF, 1*1024) self.subscriber.setsockopt(zmq.LINGER, 0) self.subscriber.connect(stream) self.subscriber = ZMQStream(self.subscriber) self.subscriber.on_recv(self.callback, copy=False) # self.request.linger = 0 self.subscriber.setsockopt(zmq.SUBSCRIBE, b"") self.subscriber.setsockopt(zmq.SUBSCRIBE, self.name.encode('ascii')) def close(self): if self.subscriber: self.subscriber.stop_on_recv() self.subscriber.close() self.subscriber = None def subscribe(self, to, topic=''): subscribeto = to if len(topic) > 0: subscribeto = f"{subscribeto}.{topic}" subscribeto = subscribeto.encode('ascii') self.subscriber.setsockopt(zmq.SUBSCRIBE, subscribeto) def unsubscribe(self, to, topic=''): subscribetopic = to if len(topic) > 0: subscribetopic = f"{subscribetopic}.{topic}" subscribetopic = subscribetopic.encode('ascii') self.subscriber.setsockopt(zmq.UNSUBSCRIBE, subscribetopic)
class WSHandler(WebSocketHandler): def open(self): global Graph # globals :( sub_sock = context.socket(zmq.SUB) sub_sock.connect("ipc:///tmp/netnetwork.sock") sub_sock.setsockopt(zmq.SUBSCRIBE, 'gupdates') self.sub_stream = ZMQStream(sub_sock) self.sub_stream.on_recv(self.on_zmq_recv) print('Opened a new websocket') if Graph is not None: self.write_message(Graph) def on_zmq_recv(self, msg): global Graph print("Forwarding message to client") self.write_message(msg[1]) Graph = msg[1] def on_close(self): print("Closing ws stream") self.sub_stream.close()
def main(settings): """ Initialize the logger sink. """ if os.path.isfile('logging.conf'): logging.config.fileConfig('logging.conf') ctx = zmq.Context() io_loop = IOLoop.instance() log_sub = ctx.socket(zmq.SUB) log_sub.setsockopt(zmq.SUBSCRIBE, "") log_sub.bind(settings.ZEROMQ_LOGGING) log_stream = ZMQStream(log_sub, io_loop) log_stream.on_recv(log_zmq_message) def handle_shutdown_signal(_sig, _frame): """ Called from the os when a shutdown signal is fired. """ log_stream.stop_on_recv() log_stream.flush() io_loop.stop() # handle kill signals signal.signal(signal.SIGINT, handle_shutdown_signal) signal.signal(signal.SIGTERM, handle_shutdown_signal) try: io_loop.start() except ZMQError: LOGGERS['master'].debug("Caught a ZMQError. Hopefully during shutdown") LOGGERS['master'].debug(traceback.format_exc()) log_stream.close() ctx.term()
class ZeroMQHandler(tornado.websocket.WebSocketHandler): def __init__(self, *args, **kwargs): super(ZeroMQHandler, self).__init__(*args, **kwargs) self.socket = None self.stream = None def open(self): settings = self.application.settings self.socket = settings['zeromq']['context'].socket(REQ) self.socket.connect(settings['zeromq']['url']) self.stream = ZMQStream(self.socket, settings['ioloop']) self.stream.on_recv(self.on_dispatch) def on_message(self, message): request = load_message(message) if request: data = message.encode('utf8') self.stream.send(data) else: self.write_message(ERROR_INVALID_REQUEST) def on_dispatch(self, messages): for message in messages: data = message.encode('utf8') self.write_message(data) def on_close(self): self.stream.close() self.socket.close() def check_origin(self, origin): return True def data_received(self, chunk): pass
class SocketConnection(sockjs.tornado.SockJSConnection): clients = set() # Instantiate context only once # TODO: it will be a good idea to create in somewhere in the application init method, # but connection object does'nt have an access to it. Maybe we should put it into a SockRouter? context = zmq.Context() def on_open(self, request): self.clients.add(self) publisher = self.context.socket(zmq.PUB) publisher.bind(IPC_SOCKET) self.publish_stream = ZMQStream(publisher) subscriber = self.context.socket(zmq.SUB) subscriber.connect(IPC_SOCKET) subscriber.setsockopt(zmq.SUBSCRIBE, '') self.subscribe_stream = ZMQStream(subscriber) self.subscribe_stream.on_recv(self.on_receive_message) def on_message(self, message): self.publish_stream.send_unicode(message) def on_receive_message(self, message): self.send(message) def on_close(self): self.clients.remove(self) # Properly close ZMQ sockets self.publish_stream.close() self.subscribe_stream.close()
class MainHandler(websocket.WebSocketHandler): """ This is a main tornado handler that receives all websocket connections. """ _first = True _namespace = 'default' @property def ref(self): return id(self) def initialize(self): settings = self.application.settings self.push_socket = ctx.socket(zmq.PUSH) self.sub_socket = ctx.socket(zmq.SUB) self.push_socket.connect(settings['push_socket']) self.sub_socket.connect(settings['sub_socket']) self.sub_socket.setsockopt(zmq.SUBSCRIBE, "") self.zmq_stream = ZMQStream(self.sub_socket) self.zmq_stream.on_recv(self.zmq_msg_recv) def open(self, *args, **kwargs): """ On connects web socket connectionm, this obtain a namespace from a url, if not found, set a default namespace. """ if "namespace" in kwargs and kwargs['namespace']: self._namespace = kwargs['namespace'] def on_message(self, message): """ On receives message from a websocket, this make a intern protocol message and send it to django worker via zmq push socket. """ if self._first: msg = {'action':'connect'} self._first = False else: msg = {'action':'message'} msg['namespace'] = self._namespace msg['message'] = message msg['id'] = self.ref self.push_socket.send_pyobj(msg) def on_close(self): """ On websocket client closes the connection, automaticaly close all related sockets related with this recently closed websocket connection. """ msg = {'message': '', 'id': self.ref, 'action': 'close'} msg['namespace'] = self._namespace self.push_socket.send_pyobj(msg) self.zmq_stream.close() self.sub_socket.close() self.push_socket.close() def zmq_msg_recv(self, data): """ On received data from a django worker, inmediately send this messages to websocket client. """ for message in data: message = pickle.loads(message) _id, _msg = message['id'], message['message'] if _id != self.ref: continue self.write_message(_msg)
class TestMNClient(TestCase): endpoint = b'tcp://127.0.0.1:5555' service = b'test' def setUp(self): if _do_print: print('Setting up...') self.context = zmq.Context() self.broker = None self._msgs = [] return def tearDown(self): if _do_print: print('Tearing down...') if self.broker: self._stop_broker() self.broker = None self._msgs = [] self.context.term() self.context = None return def _on_msg(self, msg): self._msgs.append(msg) if _do_print: print('broker received:', msg) if self.broker.do_reply: new_msg = msg[:4] new_msg.append(b'REPLY') self.broker.send_multipart(new_msg) else: IOLoop.instance().stop() return def _start_broker(self, do_reply=False): """Helper activating a fake broker in the ioloop. """ if _do_print: print('Starting broker at', self.endpoint) socket = self.context.socket(zmq.ROUTER) self.broker = ZMQStream(socket) self.broker.socket.setsockopt(zmq.LINGER, 0) self.broker.bind(self.endpoint) self.broker.on_recv(self._on_msg) self.broker.do_reply = do_reply return def _stop_broker(self): if _do_print: print('Stopping broker') if self.broker: self.broker.socket.close() self.broker.close() self.broker = None return # Tests from here def test_01_create_01(self): """Test MNClient simple create. """ client = MNClient(self.context, self.endpoint, self.service) self.assertEqual(self.endpoint, client.endpoint) self.assertEqual(self.service, client.service) client.shutdown() return def test_02_send_01(self): """Test MNClient simple request. """ self._start_broker() client = MNClient(self.context, self.endpoint, self.service) client.request(b'XXX') IOLoop.instance().start() client.shutdown() self.assertEqual(len(self._msgs), 1) rmsg = self._msgs[0] # msg[0] is identity of sender self.assertEqual(rmsg[1], b'') # routing delimiter self.assertEqual(rmsg[2], client._proto_version) self.assertEqual(rmsg[3], self.service) self.assertEqual(rmsg[4], b'XXX') self._stop_broker() return def test_02_send_02(self): """Test MNClient multipart request. """ mydata = [b'AAA', b'bbb'] self._start_broker() client = MNClient(self.context, self.endpoint, self.service) client.request(mydata) IOLoop.instance().start() client.shutdown() self.assertEqual(len(self._msgs), 1) rmsg = self._msgs[0] # msg[0] is identity of sender self.assertEqual(rmsg[1], b'') # routing delimiter self.assertEqual(rmsg[2], client._proto_version) self.assertEqual(rmsg[3], self.service) self.assertEqual(rmsg[4:], mydata) self._stop_broker() return def test_02_send_03(self): """Test MNClient request in invalid state. """ client = MNClient(self.context, self.endpoint, self.service) client.request(b'XXX') # ok self.assertRaises(InvalidStateError, client.request, b'AAA') client.shutdown() return def test_03_timeout_01(self): """Test MNClient request w/ timeout. """ client = MyClient(self.context, self.endpoint, self.service) client.request(b'XXX', 20) # 20 millisecs timeout IOLoop.instance().start() client.shutdown() self.assertEqual(client.timed_out, True) return def test_04_receive_01(self): """Test MNClient message receive. """ self._start_broker(do_reply=True) client = MyClient(self.context, self.endpoint, self.service) client.request(b'XXX') IOLoop.instance().start() client.shutdown() self._stop_broker() self.assertEqual(True, hasattr(client, 'last_msg')) self.assertEqual(3, len(client.last_msg)) self.assertEqual(b'REPLY', client.last_msg[-1]) self.assertEqual(self.service, client.last_msg[-2]) return
class MqAsyncReq(object): """Class for the MDP client side. Thin asynchronous encapsulation of a zmq.REQ socket. Provides a :func:`request` method with optional timeout. Objects of this class are ment to be integrated into the asynchronous IOLoop of pyzmq. :param context: the ZeroMQ context to create the socket in. :type context: zmq.Context :param endpoint: the enpoint to connect to. :type endpoint: str :param service: the service the client should use :type service: str """ _proto_version = b'MDPC01' def __init__(self, context, service): """Initialize the MDPClient. """ if ("domogik.common.configloader" in sys.modules): cfg = Loader('mq').load() confi = dict(cfg[1]) self.endpoint = "tcp://{0}:{1}".format(config['ip'], config['req_rep_port']) else: ip = Parameter.objects.get(key='mq-ip') port = Parameter.objects.get(key='mq-req_rep_port') self.endpoint = "tcp://{0}:{1}".format(ip.value, port.value) socket = ZmqSocket(context, zmq.REQ) ioloop = IOLoop.instance() self.service = service self.stream = ZMQStream(socket, ioloop) self.stream.on_recv(self._on_message) self.can_send = True self._proto_prefix = [ PROTO_VERSION, service] self._tmo = None self.timed_out = False socket.connect(self.endpoint) return def shutdown(self): """Method to deactivate the client connection completely. Will delete the stream and the underlying socket. .. warning:: The instance MUST not be used after :func:`shutdown` has been called. :rtype: None """ if not self.stream: return self.stream.socket.setsockopt(zmq.LINGER, 0) self.stream.socket.close() self.stream.close() self.stream = None return def request(self, msg, timeout=None): """Send the given message. :param msg: message parts to send. :type msg: list of str :param timeout: time to wait in milliseconds. :type timeout: int :rtype None: """ if not self.can_send: raise InvalidStateError() if type(msg) in (bytes, str): msg = [msg] # prepare full message to_send = self._proto_prefix[:] to_send.extend(msg) self.stream.send_multipart(to_send) self.can_send = False if timeout: self._start_timeout(timeout) return def _on_timeout(self): """Helper called after timeout. """ self.timed_out = True self._tmo = None self.on_timeout() return def _start_timeout(self, timeout): """Helper for starting the timeout. :param timeout: the time to wait in milliseconds. :type timeout: int """ self._tmo = DelayedCallback(self._on_timeout, timeout) self._tmo.start() return def _on_message(self, msg): """Helper method called on message receive. :param msg: list of message parts. :type msg: list of str """ if self._tmo: # disable timout self._tmo.stop() self._tmo = None # setting state before invoking on_message, so we can request from there self.can_send = True self.on_message(msg) return def on_message(self, msg): """Public method called when a message arrived. .. note:: Does nothing. Should be overloaded! """ pass def on_timeout(self): """Public method called when a timeout occured. .. note:: Does nothing. Should be overloaded! """ pass
class MDPWorker(object): """Class for the MDP worker side. Thin encapsulation of a zmq.DEALER socket. Provides a send method with optional timeout parameter. Will use a timeout to indicate a broker failure. """ _proto_version = b'MDPW01' # TODO: integrate that into API HB_INTERVAL = 1000 # in milliseconds HB_LIVENESS = 3 # HBs to miss before connection counts as dead def __init__(self, context, endpoint, service): """Initialize the MDPWorker. context is the zmq context to create the socket from. service is a byte-string with the service name. """ self.context = context self.endpoint = endpoint self.service = service self.stream = None self._tmo = None self.need_handshake = True self.ticker = None self._delayed_cb = None self._create_stream() return def _create_stream(self): """Helper to create the socket and the stream. """ socket = self.context.socket(zmq.DEALER) ioloop = IOLoop.instance() self.stream = ZMQStream(socket, ioloop) self.stream.on_recv(self._on_message) self.stream.socket.setsockopt(zmq.LINGER, 0) self.stream.connect(self.endpoint) self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL) self._send_ready() self.ticker.start() return def _send_ready(self): """Helper method to prepare and send the workers READY message. """ ready_msg = [b'', self._proto_version, b'\x01', self.service] self.stream.send_multipart(ready_msg) self.curr_liveness = self.HB_LIVENESS return def _tick(self): """Method called every HB_INTERVAL milliseconds. """ self.curr_liveness -= 1 ## print '%.3f tick - %d' % (time.time(), self.curr_liveness) self.send_hb() if self.curr_liveness >= 0: return ## print '%.3f lost connection' % time.time() # ouch, connection seems to be dead self.shutdown() # try to recreate it self._delayed_cb = DelayedCallback(self._create_stream, 5000) self._delayed_cb.start() return def send_hb(self): """Construct and send HB message to broker. """ msg = [b'', self._proto_version, b'\x04'] self.stream.send_multipart(msg) return def shutdown(self): """Method to deactivate the worker connection completely. Will delete the stream and the underlying socket. """ if self.ticker: self.ticker.stop() self.ticker = None if not self.stream: return self.stream.socket.close() self.stream.close() self.stream = None self.timed_out = False self.need_handshake = True self.connected = False return def reply(self, msg): """Send the given message. msg can either be a byte-string or a list of byte-strings. """ ## if self.need_handshake: ## raise ConnectionNotReadyError() # prepare full message to_send = self.envelope self.envelope = None if isinstance(msg, list): to_send.extend(msg) else: to_send.append(msg) self.stream.send_multipart(to_send) return def _on_message(self, msg): """Helper method called on message receive. msg is a list w/ the message parts """ # 1st part is empty msg.pop(0) # 2nd part is protocol version # TODO: version check proto = msg.pop(0) # 3rd part is message type msg_type = msg.pop(0) # XXX: hardcoded message types! # any message resets the liveness counter self.need_handshake = False self.curr_liveness = self.HB_LIVENESS if msg_type == b'\x05': # disconnect self.curr_liveness = 0 # reconnect will be triggered by hb timer elif msg_type == b'\x02': # request # remaining parts are the user message envelope, msg = split_address(msg) envelope.append(b'') envelope = [b'', self._proto_version, b'\x03'] + envelope # REPLY self.envelope = envelope self.on_request(msg) else: # invalid message # ignored pass return def on_request(self, msg): """Public method called when a request arrived. Must be overloaded! """ pass
class MDPWorker(object): """Class for the MDP worker side. Thin encapsulation of a zmq.XREQ socket. Provides a send method with optional timeout parameter. Will use a timeout to indicate a broker failure. """ _proto_version = b'MDPW01' # TODO: integrate that into API HB_INTERVAL = 1000 # in milliseconds HB_LIVENESS = 3 # HBs to miss before connection counts as dead def __init__(self, context, endpoint, service): """Initialize the MDPWorker. context is the zmq context to create the socket from. service is a byte-string with the service name. """ self.context = context self.endpoint = endpoint self.service = service self.stream = None self._tmo = None self.need_handshake = True self.ticker = None self._delayed_cb = None self._create_stream() return def _create_stream(self): """Helper to create the socket and the stream. """ socket = self.context.socket(zmq.XREQ) ioloop = IOLoop.instance() self.stream = ZMQStream(socket, ioloop) self.stream.on_recv(self._on_message) self.stream.socket.setsockopt(zmq.LINGER, 0) self.stream.connect(self.endpoint) self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL) self._send_ready() self.ticker.start() return def _send_ready(self): """Helper method to prepare and send the workers READY message. """ ready_msg = [ b'', self._proto_version, chr(1), self.service ] self.stream.send_multipart(ready_msg) self.curr_liveness = self.HB_LIVENESS return def _tick(self): """Method called every HB_INTERVAL milliseconds. """ self.curr_liveness -= 1 ## print '%.3f tick - %d' % (time.time(), self.curr_liveness) self.send_hb() if self.curr_liveness >= 0: return print '%.3f lost connection' % time.time() # ouch, connection seems to be dead self.shutdown() # try to recreate it self._delayed_cb = DelayedCallback(self._create_stream, 5000) self._delayed_cb.start() return def send_hb(self): """Construct and send HB message to broker. """ msg = [ b'', self._proto_version, chr(4) ] self.stream.send_multipart(msg) return def shutdown(self): """Method to deactivate the worker connection completely. Will delete the stream and the underlying socket. """ if self.ticker: self.ticker.stop() self.ticker = None if not self.stream: return self.stream.socket.close() self.stream.close() self.stream = None self.timed_out = False self.need_handshake = True self.connected = False return def reply(self, msg): """Send the given message. msg can either be a byte-string or a list of byte-strings. """ ## if self.need_handshake: ## raise ConnectionNotReadyError() # prepare full message to_send = self.envelope self.envelope = None if isinstance(msg, list): to_send.extend(msg) else: to_send.append(msg) self.stream.send_multipart(to_send) return def _on_message(self, msg): """Helper method called on message receive. msg is a list w/ the message parts """ # 1st part is empty msg.pop(0) # 2nd part is protocol version # TODO: version check proto = msg.pop(0) # 3nd part is message type msg_type = msg.pop(0) # XXX: hardcoded message types! # any message resets the liveness counter self.need_handshake = False self.curr_liveness = self.HB_LIVENESS if msg_type == '\x05': # disconnect print ' DISC' self.curr_liveness = 0 # reconnect will be triggered by hb timer elif msg_type == '\x02': # request # remaining parts are the user message envelope, msg = split_address(msg) envelope.append(b'') envelope = [ b'', self._proto_version, '\x03'] + envelope # REPLY self.envelope = envelope self.on_request(msg) else: # invalid message # ignored pass return def on_request(self, msg): """Public method called when a request arrived. Must be overloaded! """ pass
class GameManagerServer(object): client_router_sock = None worker_router_sock = None address = None manager = None io_loop = None def __init__(self, addr="tcp://*:", port=config.GAME_MANAGER_PORT): super(GameManagerServer, self).__init__() self.context = zmq.Context() self.io_loop = ZMQIOLoop.instance() self.client_router_sock = self.context.socket(zmq.ROUTER) self.address = addr + str(port) self.client_router_sock.bind(self.address) self.worker_router_sock = self.context.socket(zmq.ROUTER) self.worker_router_sock.bind("tcp://*:6000") self.client_router_sock = ZMQStream(self.client_router_sock) self.client_router_sock.on_recv(self.recv_from_client) self.worker_router_sock = ZMQStream(self.worker_router_sock) self.worker_router_sock.on_recv(self.recv_from_game) self.manager = GameManager(self.send_to_client, self.send_to_game) def start(self): try: self.io_loop.start() except KeyboardInterrupt: pass self.client_router_sock.close() self.worker_router_sock.close() def send_to_client(self, msg): routable_msg = [msg[mpwp.MSG_TO]] + msg # prepend routing IDENTITY self.client_router_sock.send_multipart(routable_msg) def send_to_game(self, msg): routable_msg = [msg[mpwp.MSG_TO]] + msg # prepend routing IDENTITY self.worker_router_sock.send_multipart(routable_msg) def recv_from_client(self, msg): if msg: router_id = msg[0] actual_msg = msg[1:] # trim off router info if router_id == actual_msg[mpwp.MSG_FROM]: if actual_msg[mpwp.MSG_VERSION] == mpwp.VERSION: self.manager.handle_client_incoming(actual_msg) else: pass # send VERSION_MISMATCH_ERROR else: if actual_msg[mpwp.MSG_TO] == mpwp.GAME_MANAGER_ID and actual_msg[mpwp.MSG_FROM] == mpwp.MATCHMAKER_ID: to_id = router_id from_id = actual_msg[mpwp.MSG_FROM] msg_type = actual_msg[mpwp.MSG_TYPE] msg_content = mpwp.msg_content(actual_msg) self.manager.handle_matchmaker_incoming(to_id, from_id, msg_type, msg_content) else: pass # error, invalid message else: return # fatal error def recv_from_game(self, msg): router_id = msg[0] actual_msg = msg[1:] # trim off router info self.manager.recv_from_game(actual_msg)
class AdminSocketHandler(SockJSConnection): def on_message_published(self, message): actual_message = message[0] if six.PY3: actual_message = actual_message.decode() self.send(actual_message.split(CHANNEL_DATA_SEPARATOR, 1)[1]) @coroutine def subscribe(self): projects, error = yield state.project_list() self.projects = [x['_id'] for x in projects] self.uid = uuid.uuid4().hex self.connections = self.application.admin_connections context = zmq.Context() subscribe_socket = context.socket(zmq.SUB) if self.application.zmq_pub_sub_proxy: subscribe_socket.connect(self.application.zmq_xpub) else: for address in self.application.zmq_sub_address: subscribe_socket.connect(address) for project_id in self.projects: if project_id not in self.connections: self.connections[project_id] = {} self.connections[project_id][self.uid] = self channel_to_subscribe = create_project_channel_name(project_id) subscribe_socket.setsockopt_string( zmq.SUBSCRIBE, six.u(channel_to_subscribe) ) self.subscribe_stream = ZMQStream(subscribe_socket) self.subscribe_stream.on_recv(self.on_message_published) logger.info('admin connected') def unsubscribe(self): if not hasattr(self, 'uid'): return for project_id in self.projects: if not project_id in self.connections: continue try: del self.connections[project_id][self.uid] except KeyError: pass if not self.connections[project_id]: del self.connections[project_id] self.subscribe_stream.on_recv(None) self.subscribe_stream.close() logger.info('admin disconnected') def on_open(self, info): try: value = info.cookies['user'].value except (KeyError, AttributeError): self.close() else: user = decode_signed_value( self.application.settings['cookie_secret'], 'user', value ) if user: self.subscribe() else: self.close() def on_close(self): self.unsubscribe()
class Master(object): def __init__(self, frontier, data_in_sock='ipc:///tmp/robot-data-w2m.sock', data_out_sock='ipc:///tmp/robot-data-m2w.sock', msg_in_sock='ipc:///tmp/robot-msg-w2m.sock', msg_out_sock='ipc:///tmp/robot-msg-m2w.sock', io_loop=None): self.identity = 'master:%s:%s' % (socket.gethostname(), os.getpid()) context = zmq.Context() self._io_loop = io_loop or IOLoop.instance() self._in_socket = context.socket(zmq.SUB) self._in_socket.setsockopt(zmq.SUBSCRIBE, '') self._in_socket.bind(data_in_sock) self._in_stream = ZMQStream(self._in_socket, io_loop) self._out_socket = context.socket(zmq.PUSH) self._out_socket.bind(data_out_sock) self._out_stream = ZMQStream(self._out_socket, io_loop) self._online_workers = set() self._running = False self._updater = PeriodicCallback(self._send_next, 100, io_loop=io_loop) self._reloader = PeriodicCallback(self.reload, 1000, io_loop=io_loop) self.frontier = frontier self.messenger = ServerMessenger(msg_in_sock, msg_out_sock, context, io_loop) def start(self): logging.info('[%s] starting', self.identity) self.messenger.add_callback(CTRL_MSG_WORKER, self._on_worker_msg) self.messenger.start() self._in_stream.on_recv(self._on_receive_processed) self._updater.start() self._reloader.start() self._running = True def stop(self): self._running = False self._reloader.stop() self._updater.stop() self.messenger.stop() # self.messenger.publish(CTRL_MSG_WORKER, self.identity, # CTRL_MSG_WORKER_QUIT) def close(self): self._in_stream.close() self._in_socket.close() self._out_stream.close() self._out_socket.close() self.messenger.close() def reload(self): pass def _on_worker_msg(self, msg): if msg.data == CTRL_MSG_WORKER_ONLINE: self._online_workers.add(msg.identity) logging.info('[%s] append [%s]', self.identity, msg.identity) self._send_next() # if msg.data == CTRL_MSG_WORKER_QUIT_ACK: # if msg.identity in self._online_workers: # self._online_workers.remove(msg.identity) def _send_next(self): if not self._running: return worker_num = len(self._online_workers) if self._running and worker_num > 0: while self._out_stream._send_queue.qsize() < worker_num * 4: request = self.frontier.get_next_request() if not request: break msg = RequestMessage(self.identity, request) self._out_stream.send_multipart(msg.serialize()) logging.debug('[%s] send request(%s)', self.identity, request.url) self.frontier.reload_request(request) def _on_receive_processed(self, zmq_msg): msg = ResponseMessage.deserialize(zmq_msg) request = msg.response.request logging.debug('[%s] receive response(%s)', self.identity, request.url) self._send_next()
class Broker(object): """This is implementation of broker You don't need to override any methods in this class. It works immediately. Just call start_listening() method :type context: Context :param context: instance of zmq.Context :param endpoint: listening address :type endpoint: str """ def __init__(self, context, endpoint): socket = context.socket(zmq.ROUTER) socket.bind(endpoint) self.stream = ZMQStream(socket) self.stream.on_recv(self.on_message) # services, workers and multicast groups self._workers = {} self._services = {} self._multicasts = {} self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL) self.hb_check_timer.start() return def start_listening(self): """Start listening to new messages """ IOLoop.instance().start() def stop_listening(self): """Stop listening """ IOLoop.instance().stop() def on_message(self, msg): """Processes given message. Decides what kind of message it is -- client or worker -- and calls the appropriate method. If unknown, the message is ignored. :param msg: message parts :type msg: list of str :rtype: None """ return_addresses, msg = self.split_address(msg) # dispatch on first frame after path method_to_call = None try: t = msg.pop(0) if t.startswith(b'MDPW'): method_to_call = self.on_worker elif t.startswith(b'MDPC'): method_to_call = self.on_client else: # Unknown protocol pass except (AttributeError, IndexError): # Wrong incoming msg format pass if method_to_call is not None: method_to_call(return_addresses, msg) return def on_client(self, return_addresses, message): """Method called on client message. Frame 0 of msg is the command id Frame 1 of msg is the requested service. The remaining frames are the request to forward to the worker. .. note:: If the service is unknown to the broker the message is ignored. If a worker is available for the requested service, the message is repackaged and sent to the worker. If the service name starts with `mmi.`, the message is passed to the internal MMI_ handler. .. _MMI: http://rfc.zeromq.org/spec:8 If the service name starts with `multicast.`, the message is sent to all worker in that group. :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ cmd = message.pop(0) # always 0x01 service = message.pop(0) # mmi requests if service.startswith(b'mmi.'): self.on_client_mmi(return_addresses, service, message) return # multicast requests if service.startswith(b'multicast.'): self.on_client_multicast(return_addresses, service, message) return # worker requests try: available_workers = self._services[service] random_worker = choice(available_workers) # TODO: loadbalancing to_send = [random_worker.id, b'', MDP_WORKER_VERSION, b'\x02'] to_send.extend(return_addresses) to_send.append(b'') to_send.extend(message) self.stream.send_multipart(to_send) except KeyError: # unknwon service self.client_response(return_addresses, b'broker', b'No worker available', error=True) return def on_client_multicast(self, return_addresses, service, message): """Handling multicast messages from client :param return_addresses: return address stack :type return_addresses: list of str :param service: name of mmi service :type service: str :param message: message parts :type message: list of str """ target = service[10:] # remove 'multicast.' try: # first, prepare list of workers in target multicast grouped_by_names = {} for worker in self._multicasts[target]: if worker.service in grouped_by_names: grouped_by_names[worker.service].append(worker) else: grouped_by_names[worker.service] = [worker] # send message to one worker per service sent_messages = [] for name, workers in grouped_by_names.items(): random_worker = choice(workers) # TODO: loadbalancing to_send = [random_worker.id, b'', MDP_WORKER_VERSION, b'\x02'] to_send.extend(return_addresses) to_send.append(b'') to_send.extend(message) self.stream.send_multipart(to_send) sent_messages.append(random_worker.service) # notify client with list of services in multicast group client_msg = return_addresses[:] client_msg.extend([b'', MDP_WORKER_VERSION, b'\x05']) client_msg.extend(sent_messages) self.stream.send_multipart(client_msg) except KeyError: # unknwon service self.client_response(return_addresses, b'broker', b'No services available in this multicast', error=True) return def on_client_mmi(self, return_addresses, service, message): """Handling MMI messages from client :param return_addresses: return address stack :type return_addresses: list of str :param service: name of mmi service :type service: str :param message: message parts :type message: list of str """ if service == b'mmi.service': return self.on_client_mmi_service(return_addresses, service, message) elif service == b'mmi.services': return self.on_client_mmi_services(return_addresses, service, message) elif service == b'mmi.workers': return self.on_client_mmi_workers(return_addresses, service, message) elif service == b'mmi.multicasts': return self.on_client_mmi_multicasts(return_addresses, service, message) else: # unknown mmi service - notify client self.client_response(return_addresses, b'broker', b'Service not found', error=True) def on_client_mmi_service(self, return_addresses, service, message): """Check if services exists """ return self.client_response_pack(return_addresses, b'broker', message[0] in self._services.keys()) def on_client_mmi_services(self, return_addresses, service, message): """List of all services """ return self.client_response_pack(return_addresses, b'broker', [k for k in self._services]) def on_client_mmi_workers(self, return_addresses, service, message): """Number of workers per service """ s = {} for se in self._services: s[se] = len(self._services[se]) return self.client_response_pack(return_addresses, b'broker', s) def on_client_mmi_multicasts(self, return_addresses, service, message): """List of available multicast groups """ m = {} for se in self._multicasts: m[se] = [s.service for s in self._multicasts[se]] return self.client_response_pack(return_addresses, b'broker', m) def on_worker(self, return_addresses, message): """Method called on worker message. Frame 0 of msg is the command id. The remaining frames depend on the command. This method determines the command sent by the worker and calls the appropriate method. If the command is unknown the message is ignored and a DISCONNECT is sent. :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ cmd = message.pop(0) worker_cmds = { b'\x01': self.on_worker_ready, b'\x03': self.on_worker_partial_reply, b'\x04': self.on_worker_final_reply, b'\x05': self.on_worker_heartbeat, b'\x06': self.on_worker_disconnect, b'\x07': self.on_worker_multicast_add, # this is not part of the Majordomo Protocol 0.2 ! b'\x08': self.on_worker_exception, # this is not part of the Majordomo Protocol 0.2 ! b'\x09': self.on_worker_error, # this is not part of the Majordomo Protocol 0.2 ! } if cmd in worker_cmds: fnc = worker_cmds[cmd] fnc(return_addresses, message) else: # ignore unknown command pass return def on_worker_ready(self, return_addresses, message): """Called when new worker is ready to receive messages. Register worker to list of available services. :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ # Frame 0 of msg is a service. service = message.pop(0) wid = return_addresses[0] return self.register_worker(wid, service) def on_worker_partial_reply(self, return_addresses, message): """Process worker PARTIAL REPLY command. Route the `message` to the client given by the address(es) in front of `message`. :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ ret_id = return_addresses[0] try: wrep = self._workers[ret_id] except KeyError: return # worker is gone, ignore this message return_addresses, msg = self.split_address(message) self.client_response(return_addresses, wrep.service, msg, partial=True) return def on_worker_final_reply(self, return_addresses, message): """Process worker FINAL REPLY command. Route the `message` to the client given by the address(es) in front of `message`. :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ ret_id = return_addresses[0] try: wrep = self._workers[ret_id] except KeyError: return # worker is gone, ignore this message return_addresses, msg = self.split_address(message) self.client_response(return_addresses, wrep.service, msg) return def on_worker_heartbeat(self, return_addresses, message): """Process worker HEARTBEAT command. :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ ret_id = return_addresses[0] try: worker = self._workers[ret_id] if worker.is_alive(): worker.on_heartbeat() except KeyError: # ignore HB for unknown worker pass return def on_worker_disconnect(self, return_addresses, message): """Process worker DISCONNECT command. Remove worker from list of services. :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ wid = return_addresses[0] return self.unregister_worker(wid) def on_worker_multicast_add(self, return_addresses, message): """Process worker MULTICAST ADD command. Add worker to list of multicasts This is not part of the Majordomo Protocol 0.2 ! :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ multicast_name = message.pop(0) wid = return_addresses[0] return self.register_multicast(wid, multicast_name) def on_worker_exception(self, return_addresses, message): """Process worker EXCEPTION command. Route the `message` to the client given by the address(es) in front of `message`. This is not part of the Majordomo Protocol 0.2 ! :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ ret_id = return_addresses[0] try: wrep = self._workers[ret_id] except KeyError: return # worker is gone, ignore this message return_addresses, msg = self.split_address(message) self.client_response(return_addresses, wrep.service, msg, exception=True) return def on_worker_error(self, return_addresses, message): """Process worker ERROR command. Route the `message` to the client given by the address(es) in front of `message`. This is not part of the Majordomo Protocol 0.2 ! :param return_addresses: return address stack :type return_addresses: list of str :param message: message parts :type message: list of str :rtype: None """ ret_id = return_addresses[0] try: wrep = self._workers[ret_id] except KeyError: return # worker is gone, ignore this message return_addresses, msg = self.split_address(message) self.client_response(return_addresses, wrep.service, msg, error=True) return def on_timer(self): """Method called on timer expiry. Checks which workers are dead and unregisters them. :rtype: None """ for wrep in self._workers.values(): if not wrep.is_alive(): self.on_log_event('worker.connection_timeout', "Worker connection timeout for service '%s'." % wrep.service) self.unregister_worker(wrep.id) return def client_response(self, return_addresses, service, msg, partial=False, exception=False, error=False): """Package and send reply to client. :param return_addresses: return address stack :type return_addresses: list of str :param service: name of service :type service: str :param msg: message parts :type msg: list of str | str :rtype: None """ to_send = return_addresses[:] if error: t = b'\x04' elif exception: t = b'\x06' elif partial: t = b'\x02' else: t = b'\x03' to_send.extend([b'', MDP_WORKER_VERSION, t, service]) if isinstance(msg, list): to_send.extend(msg) else: to_send.append(msg) self.stream.send_multipart(to_send) return def client_response_pack(self, return_addresses, service, msg, partial=False): """Send message to client and pack it (msg) in msgpack format Exception and error messages are not allowed here. :param return_addresses: return address stack :type return_addresses: list of str :param service: name of service :type service: str :param msg: message to pack and send :type msg: mixed :param partial: if message is partial of final, default False :type partial: bool :rtype: None """ packed = msgpack.Packer().pack(msg) self.client_response(return_addresses, service, packed, partial=partial) def unregister_worker(self, wid): """Unregister the worker with the given id. If the worker id is not registered, nothing happens. Will stop all timers for the worker. :param wid: the worker id. :type wid: str :rtype: None """ try: wrep = self._workers[wid] except KeyError: # not registered, ignore return wrep.shutdown() service = wrep.service # remove worker from service list if service in self._services: worker_list = self._services[service] for worker in worker_list: if worker.id == wid: worker_list.remove(worker) if not worker_list: del self._services[service] # remove worker from multicasts for m_name in self._multicasts: mw = self._multicasts[m_name] for w in [w for w in mw if w.id == wid]: mw.remove(w) # delete empty rows empty_keys = [k for k, v in self._multicasts.items() if len(v) == 0] for k in empty_keys: del self._multicasts[k] del self._workers[wid] self.on_log_event('worker.unregister', "Worker for service '%s' disconnected." % service) return def register_worker(self, wid, service): """Register the worker id and add it to the given service. Does nothing if worker is already known. :param wid: the worker id. :type wid: str :param service: the service name. :type service: str :rtype: None """ if wid in self._workers: return worker = WorkerRep(wid, service, self.stream) self._workers[wid] = worker if service in self._services: s = self._services[service] s.append(worker) else: self._services[service] = [worker] self.on_log_event('worker.register', "Worker for service '%s' is connected." % service) return def register_multicast(self, wid, multicast_name): """Add worker to multicast group :type wid: str :param wid: the worker id. :type multicast_name: str :param multicast_name: group name """ if wid not in self._workers: return worker = self._workers[wid] if multicast_name in self._multicasts: m = self._multicasts[multicast_name] m.append(worker) else: self._multicasts[multicast_name] = [worker] self.on_log_event('worker.register_multicast', "Service '%s' added to multicast group '%s'." % (worker.service, multicast_name)) return def shutdown(self): """Shutdown broker. Will unregister all workers, stop all timers and ignore all further messages. .. warning:: The instance MUST not be used after :func:`shutdown` has been called. :rtype: None """ self.stream.on_recv(None) self.stream.socket.setsockopt(zmq.LINGER, 0) self.stream.socket.close() self.stream.close() self.stream = None self._workers = {} self._services = {} self._multicasts = {} return def on_log_event(self, event, message): """Override this method if you want to log events from broker :type event: str :param event: event type - used for filtering :type message: str :param message: log message :rtype: None """ pass # helpers: def split_address(self, msg): """Function to split return Id and message received by ROUTER socket. Returns 2-tuple with return Id and remaining message parts. Empty frames after the Id are stripped. """ ret_ids = [] for i, p in enumerate(msg): if p: ret_ids.append(p) else: break return ret_ids, msg[i + 1:]
class MQRep(object): """Class for the MDP worker side. Thin encapsulation of a zmq.DEALER socket. Provides a send method with optional timeout parameter. Will use a timeout to indicate a broker failure. """ _proto_version = b'MDPW01' # TODO: integrate that into API HB_INTERVAL = 1000 # in milliseconds HB_LIVENESS = 3 # HBs to miss before connection counts as dead def __init__(self, context, service): """Initialize the MDPWorker. context is the zmq context to create the socket from. service is a byte-string with the service name. """ if DEBUG: print("MQRep > __init__") cfg = Loader('mq').load() config = dict(cfg[1]) if config['ip'].strip() == "*": config['ip'] = get_ip() self.endpoint = "tcp://{0}:{1}".format(config['ip'], config['req_rep_port']) self.context = context self.service = service self.stream = None self._tmo = None self.need_handshake = True self.ticker = None self._delayed_cb = None self._create_stream() ### patch fritz self._reconnect_in_progress = False ### end patch fritz return def _create_stream(self): """Helper to create the socket and the stream. """ if DEBUG: print("MQRep > _create_stream") socket = ZmqSocket(self.context, zmq.DEALER) ioloop = IOLoop.instance() self.stream = ZMQStream(socket, ioloop) self.stream.on_recv(self._on_mpd_message) self.stream.socket.setsockopt(zmq.LINGER, 0) self.stream.connect(self.endpoint) if self.ticker != None: if DEBUG: print("MQRep > _create_stream - stop ticker") self.ticker.stop() self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL) self._send_ready() self.ticker.start() return def _send_ready(self): """Helper method to prepare and send the workers READY message. """ if DEBUG: print("MQREP > _send_ready") ready_msg = [ b'', self._proto_version, b'\x01', self.service ] self.stream.send_multipart(ready_msg) self.curr_liveness = self.HB_LIVENESS if DEBUG: print("MQREP > _send_ready > curr_liveness <= {0}".format(self.HB_LIVENESS)) return def _tick(self): """Method called every HB_INTERVAL milliseconds. """ if DEBUG: print("MQREP > _tick") self.curr_liveness -= 1 if DEBUG: print('MQREP > _tick - {0} tick = {1}'.format(time.time(), self.curr_liveness)) self.send_hb() if self.curr_liveness >= 0: return if DEBUG: print('MQREP > _tick - {0} lost connection'.format(time.time())) # ouch, connection seems to be dead self.shutdown() # try to recreate it self._delayed_cb = DelayedCallback(self._create_stream, self.HB_INTERVAL) self._delayed_cb.start() return def send_hb(self): """Construct and send HB message to broker. """ msg = [ b'', self._proto_version, b'\x04' ] self.stream.send_multipart(msg) return def shutdown(self): """Method to deactivate the worker connection completely. Will delete the stream and the underlying socket. """ if self.ticker: self.ticker.stop() self.ticker = None if not self.stream: return self.stream.socket.close() self.stream.close() self.stream = None self.timed_out = False self.need_handshake = True self.connected = False return def reply(self, msg): """Send the given message. msg can either be a byte-string or a list of byte-strings. """ ## if self.need_handshake: ## raise ConnectionNotReadyError() # prepare full message to_send = self.envelope self.envelope = None if isinstance(msg, list): to_send.extend(msg) else: to_send.append(msg) self.stream.send_multipart(to_send) return def _on_mpd_message(self, msg): """Helper method called on message receive. msg is a list w/ the message parts """ if DEBUG: print("MQRep > _on_mpd_message : {0} - {1}".format(time.strftime("%H:%M:%S"), msg)) # 1st part is empty msg.pop(0) # 2nd part is protocol version # TODO: version check proto = msg.pop(0) # 3rd part is message type msg_type = msg.pop(0) # XXX: hardcoded message types! # any message resets the liveness counter self.need_handshake = False self.curr_liveness = self.HB_LIVENESS if DEBUG: print("MQREP > _on_mpd_message > curr_liveness <= {0}".format(self.HB_LIVENESS)) if msg_type == b'\x05': # disconnect if DEBUG: print("MQREP > _on_mpd_message > type x05 : disconnect") self.curr_liveness = 0 # reconnect will be triggered by hb timer elif msg_type == b'\x02': # request if DEBUG: print("MQREP > _on_mpd_message > type x02 : request") # remaining parts are the user message envelope, msg = split_address(msg) envelope.append(b'') envelope = [ b'', self._proto_version, b'\x03'] + envelope # REPLY self.envelope = envelope mes = MQMessage() mes.set(msg) #print("MQRep > before self.on_mdp_request") #print(self.on_mdp_request) #print(mes) try: self.on_mdp_request(mes) except: print("ERROR {0}".format(traceback.format_exc())) else: if DEBUG: print("MQREP > _on_mpd_message > type ??? : invalid or hbeat") # invalid message # ignored # if \x04, this is a hbeat message pass return def on_mdp_request(self, msg): """Public method called when a request arrived. Must be overloaded! """ pass
class ZMQ(Client): def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): # do our best to clean up potentially leaky FDs if hasattr(self, 'stream') and not self.stream.closed(): self.stream.close() if hasattr(self, 'loop'): try: self.loop.close() except KeyError: pass self.socket.close() self.context.term() def __init__(self, remote, token, **kwargs): super(ZMQ, self).__init__(remote, token) self.context = zmq.Context() self.socket = self.context.socket(zmq.REQ) self.socket.RCVTIMEO = RCVTIMEO self.socket.SNDTIMEO = SNDTIMEO self.socket.setsockopt(zmq.LINGER, LINGER) self.nowait = kwargs.get('nowait', False) self.autoclose = kwargs.get('autoclose', True) if self.nowait: self.socket = self.context.socket(zmq.DEALER) self.autoclose = kwargs.get('autoclose', True) def _handle_message_fireball(self, m): logger.debug('message received') m = json.loads(m[2].decode('utf-8')) self.response.append(m) self.num_responses -= 1 logger.debug('num responses remaining: %i' % self.num_responses) if self.num_responses == 0: logger.debug('finishing up...') self.loop.stop() def _fireball_timeout(self): logger.info('fireball timeout') self.loop.stop() def _send_fireball(self, mtype, data, f_size): if len(data) < 3: logger.error('no data to send') return [] self.loop = IOLoop().instance() self.socket.close() self.socket = self.context.socket(zmq.DEALER) self.socket.connect(self.remote) self.stream = ZMQStream(self.socket) self.stream.on_recv(self._handle_message_fireball) self.stream.io_loop.call_later(SNDTIMEO, self._fireball_timeout) self.response = [] if PYVERSION == 3: if isinstance(data, bytes): data = data.decode('utf-8') data = json.loads(data) if not isinstance(data, list): data = [data] if (len(data) % f_size) == 0: self.num_responses = int((len(data) / f_size)) else: self.num_responses = int((len(data) / f_size)) + 1 logger.debug('responses expected: %i' % self.num_responses) batch = [] for d in data: batch.append(d) if len(batch) == f_size: Msg(mtype=Msg.INDICATORS_CREATE, token=self.token, data=batch).send(self.socket) batch = [] if len(batch): Msg(mtype=Msg.INDICATORS_CREATE, token=self.token, data=batch).send(self.socket) logger.debug("starting loop to receive") self.loop.start() # clean up FDs self.loop.close() self.stream.close() self.socket.close() return self.response def _recv(self, decode=True, close=True): mtype, data = Msg().recv(self.socket) if close: self.socket.close() if not decode: return data data = json.loads(data) if data.get('message') == 'unauthorized': raise AuthError() if data.get('message') == 'busy': raise CIFBusy() if data.get('message') == 'invalid search': raise InvalidSearch() if data.get('status') != 'success': raise RuntimeError(data.get('message')) if data.get('data') is None: raise RuntimeError('invalid response') if isinstance(data.get('data'), bool): return data['data'] # is this a straight up elasticsearch string? if data['data'] == '{}': return [] if isinstance(data['data'], basestring) and data['data'].startswith('{"hits":{"hits":[{"_source":'): data['data'] = json.loads(data['data']) data['data'] = [r['_source'] for r in data['data']['hits']['hits']] try: data['data'] = zlib.decompress(data['data']) except (zlib.error, TypeError): pass return data.get('data') def _send(self, mtype, data='[]', nowait=False, decode=True): self.socket.connect(self.remote) if isinstance(data, str): data = data.encode('utf-8') Msg(mtype=mtype, token=self.token, data=data).send(self.socket) if self.nowait or nowait: if self.autoclose: self.socket.close() return rv = self._recv(decode=decode) return rv def ping(self): try: return self._send(Msg.PING) except zmq.error.Again: raise TimeoutError def ping_write(self): try: return self._send(Msg.PING_WRITE) except zmq.error.Again: raise TimeoutError def indicators_search(self, filters, decode=True): return self._send(Msg.INDICATORS_SEARCH, json.dumps(filters), decode=decode) def graph_search(self, filters, decode=True): return self._send(Msg.GRAPH_SEARCH, json.dumps(filters), decode=decode) def stats_search(self, filters, decode=True): return self._send(Msg.STATS_SEARCH, json.dumps(filters), decode=decode) def indicators_create(self, data, nowait=False, fireball=False, f_size=FIREBALL_SIZE): if isinstance(data, dict): data = self._kv_to_indicator(data) if isinstance(data, Indicator): data = str(data) if fireball: return self._send_fireball(Msg.INDICATORS_CREATE, data, f_size) return self._send(Msg.INDICATORS_CREATE, data, nowait=nowait) def indicators_delete(self, data): if isinstance(data, dict): data = self._kv_to_indicator(data) if isinstance(data, Indicator): data = str(data) return self._send(Msg.INDICATORS_DELETE, data) def tokens_search(self, filters={}): return self._send(Msg.TOKENS_SEARCH, json.dumps(filters)) def tokens_create(self, data): return self._send(Msg.TOKENS_CREATE, data) def tokens_delete(self, data): return self._send(Msg.TOKENS_DELETE, data) def tokens_edit(self, data): return self._send(Msg.TOKENS_EDIT, data)
class Worker(object): """Class for the MDP worker side. Thin encapsulation of a zmq.DEALER socket. Provides a send method with optional timeout parameter. Will use a timeout to indicate a broker failure. """ max_forks = 10 ipc = 'ipc:///tmp/zmq-rpc-'+str(uuid4()) HB_INTERVAL = 1000 # in milliseconds HB_LIVENESS = 3 # HBs to miss before connection counts as dead def __init__(self, context, endpoint, service, multicasts=()): """Initialize the MDPWorker. :param context: is the zmq context to create the socket from :type context: zmq.Context :param service: service name - you can put hostname here :type service: str :param multicasts: list of groups to subscribe :type multicasts: list """ self.context = context self.endpoint = endpoint self.service = service.encode('utf-8') # convert to byte-string - required in python 3 self.multicasts = [m.encode('utf-8') for m in multicasts] # convert to byte-string self.stream = None self._tmo = None self.need_handshake = True self.ticker = None self._delayed_cb = None self._create_stream() self.forks = [] self.curr_liveness = self.HB_LIVENESS socket = self.context.socket(zmq.ROUTER) socket.bind(self.ipc) self.stream_w = ZMQStream(socket) self.stream_w.on_recv(self._on_fork_response) self.reply_socket = None return def _create_stream(self): """Helper to create the socket and the stream. """ self.on_log_event('broker.connect', 'Trying to connect do broker') socket = self.context.socket(zmq.DEALER) ioloop = IOLoop.instance() self.stream = ZMQStream(socket, ioloop) self.stream.on_recv(self._on_message) self.stream.socket.setsockopt(zmq.LINGER, 0) self.stream.connect(self.endpoint) self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL) self._send_ready() for m in self.multicasts: self._register_worker_to_multicast(m) self.ticker.start() return def _tick(self): """Method called every HB_INTERVAL milliseconds. """ self.curr_liveness -= 1 self.send_hb() if self.curr_liveness >= 0: return # ouch, connection seems to be dead self.on_log_event('broker.timeout', 'Connection to broker timeouted, disconnecting') self.shutdown(False) # try to recreate it self._delayed_cb = DelayedCallback(self._create_stream, 5000) self._delayed_cb.start() return def send_hb(self): """Construct and send HB message to broker. """ msg = [b'', MDP_WORKER_VERSION, b'\x05'] self.stream.send_multipart(msg) return def shutdown(self, final=True): """Method to deactivate the worker connection completely. Will delete the stream and the underlying socket. :param final: if shutdown is final and we want to close all sockets :type final: bool """ if self.ticker: self.ticker.stop() self.ticker = None if not self.stream: return self.stream.on_recv(None) self.disconnect() self.stream.socket.close() self.stream.close() self.stream = None self.need_handshake = True if final: self.stream_w.socket.close() self.stream_w.close() self.stream = None return def disconnect(self): """Helper method to send the workers DISCONNECT message. """ self.stream.socket.send_multipart([b'', MDP_WORKER_VERSION, b'\x06' ]) self.curr_liveness = self.HB_LIVENESS return def _send_ready(self): """Helper method to prepare and send the workers READY message. """ self.on_log_event('broker.ready', 'Sending ready to broker.') ready_msg = [b'', MDP_WORKER_VERSION, b'\x01', self.service] self.stream.send_multipart(ready_msg) self.curr_liveness = self.HB_LIVENESS return def _register_worker_to_multicast(self, name): """Helper method to register worker to multicast group :param name: group name :type name: str """ self.on_log_event('broker.register-group', 'Subscribing to group \'%s\'.' % name) reg_msg = [b'', MDP_WORKER_VERSION, b'\x07', name] self.stream.send_multipart(reg_msg) self.curr_liveness = self.HB_LIVENESS return def _on_message(self, msg): """Helper method called on message receive. :param msg: message parts :type msg: list """ # 1st part is empty msg.pop(0) # 2nd part is protocol version protocol_version = msg.pop(0) if protocol_version != MDP_WORKER_VERSION: # version check, ignore old versions return # 3rd part is message type msg_type = msg.pop(0) # any message resets the liveness counter self.need_handshake = False self.curr_liveness = self.HB_LIVENESS if msg_type == b'\x06': # disconnect self.curr_liveness = 0 # reconnect will be triggered by hb timer elif msg_type == b'\x02': # request # remaining parts are the user message addresses, msg = self.split_address(msg) self._on_request(addresses, msg) elif msg_type == b'\x05': # received hardbeat - timer handled above pass else: # invalid message ignored pass return def _on_fork_response(self, to_send): """Helper method to send message from forked worker. This message will be received by main worker process and resend to broker. :param to_send address and data to send :type to_send list """ self.stream.send_multipart(to_send) return def send_reply(self, addresses, msg, partial=False, exception=False): """Send reply from forked worker process. This method can be called only from do_work() method! This method will send messages to main worker listening on local socket in /tmp/zmq-rpc-... :param addresses: return address stack :type addresses: list of str :param msg: return value from called method :type msg: mixed :param partial: if the message is partial or final :type partial: bool :param exception: if the message is exception, msg format is: {'class':'c', 'message':'m', 'traceback':'t'} :type exception: bool """ if not self.reply_socket: context = zmq.Context() self.reply_socket = context.socket(zmq.DEALER) self.reply_socket.connect(self.ipc) msg = msgpack.Packer().pack(msg) if exception: to_send = [b'', MDP_WORKER_VERSION, b'\x08'] elif partial: to_send = [b'', MDP_WORKER_VERSION, b'\x03'] else: to_send = [b'', MDP_WORKER_VERSION, b'\x04'] to_send.extend(addresses) to_send.append(b'') if isinstance(msg, list): to_send.extend(msg) else: to_send.append(msg) m = self.reply_socket.send_multipart(to_send, track=True, copy=False) m.wait() if not partial: self.reply_socket.close() self.reply_socket = None return def send_message(self, addresses, msg, partial=False, error=False): """Send response message from main worker process. Please do not call this method from do_work() :param addresses: return address stack :type addresses: list of str :param msg: return value from called method :type msg: mixed :param partial: if the message is partial or final :type partial: bool :param error: if the message is error :type error: bool """ to_send = [b'', MDP_WORKER_VERSION] if partial: to_send.append(b'\x03') elif error: to_send.append(b'\x09') else: to_send.append(b'\x04') to_send.extend(addresses) to_send.append(b'') if isinstance(msg, list): to_send.extend(msg) else: to_send.append(msg) self.stream.send_multipart(to_send) return def _on_request(self, addresses, message): """Helper method called on RPC message receive. """ # remove finished forks self._remove_finished_processes() # test max forks if len(self.forks) >= self.max_forks: self.send_message(addresses, b'max workers limit exceeded', error=True) self.on_max_forks(addresses, message) return name = message[0] args = msgpack.unpackb(message[1]) kwargs = msgpack.unpackb(message[2]) p = Process(target=self.do_work, args=(addresses, name, args, kwargs)) p.start() p._args = None # free memory self.forks.append(p) return def _remove_finished_processes(self): """Helper method dedicated to cleaning list of forked workers """ for f in [f for f in self.forks if not f.is_alive()]: self.forks.remove(f) return def split_address(self, msg): """Function to split return Id and message received by ROUTER socket. Returns 2-tuple with return Id and remaining message parts. Empty frames after the Id are stripped. """ ret_ids = [] for i, p in enumerate(msg): if p: ret_ids.append(p) else: break return ret_ids, msg[i + 1:] def on_log_event(self, event, message): """Override this method if you want to log events from broker :type event: str :param event: event type - used for filtering :type message: str :param message: log message :rtype: None """ pass def on_max_forks(self, addresses, message): """This method is called when max_forks limit is reached You can override this method. """ pass def do_work(self, addresses, name, args, kwargs): """Main method responsible for handling rpc calls, and sending response messages. Please override this method! :param addresses: return address stack :type addresses: list of str :param name: name of task :type name: str :param args: positional task arguments :type args: list :param kwargs: key-value task arguments :type kwargs: dict """ # this is example of simple response message self.send_reply(addresses, 'method not implemented') # and send message to main worker # you can also send partial message and exception - read 'send_reply' docs return
class MNWorker(MN_object): """Class for the MN worker side. Thin encapsulation of a zmq.DEALER socket. Provides a send method with optional timeout parameter. Will use a timeout to indicate a broker failure. :param context: the context to use for socket creation. :type context: zmq.Context :param endpoint: endpoint to connect to. :type endpoint: str :param service: the name of the service we support. :type service: byte-string """ _proto_version = b'MNPW01' # worker protocol version def __init__(self, context, endpoint, service, worker_type, address, protocols): """Initialize the MNWorker. """ self.context = context self.endpoint = endpoint self.service = service self.type = worker_type self.address = address self.protocols = protocols self.envelope = None self.HB_RETRIES = HB_RETRIES self.HB_INTERVAL = HB_INTERVAL self._data = {} self.stream = None self._tmo = None self.timed_out = False self.need_handshake = True self.connected = False self.ticker = None self._delayed_cb = None self._create_stream() _LOG.info("Worker initialized and can be found at '%s'" % endpoint) return def _create_stream(self): """Helper to create the socket and the stream. """ socket = self.context.socket(zmq.DEALER) ioloop = IOLoop.instance() self.stream = ZMQStream(socket, ioloop) self.stream.on_recv(self._on_message) self.stream.socket.setsockopt(zmq.LINGER, 0) self.stream.connect(self.endpoint) self.ticker = PeriodicCallback(self._tick, self.HB_INTERVAL) self._send_ready() self.ticker.start() return def _send_ready(self): """Helper method to prepare and send the workers READY message. """ _LOG.debug("Informing broker I am ready") ready_msg = [ b'', WORKER_PROTO, MSG_READY, self.service, self.type, self.address, self.protocols ] if self.stream.closed(): self.shutdown() self.stream.send_multipart(ready_msg) self.curr_retries = self.HB_RETRIES return def _tick(self): """Method called every HB_INTERVAL milliseconds. """ self.curr_retries -= 1 self.send_hb() if self.curr_retries >= 0: return # connection seems to be dead self.shutdown() # try to recreate it # self._delayed_cb = IOLoop.call_later(self._create_stream, 5000) # self._delayed_cb = IOLoop.add_timeout(self._create_stream, 5000) self._delayed_cb = DelayedCallback(self._create_stream, self.HB_INTERVAL) self._delayed_cb.start() return def send_hb(self): """Construct and send HB message to broker. """ _LOG.debug("Sending heartbeat") msg = [b'', WORKER_PROTO, MSG_HEARTBEAT] if self.stream.closed(): self.shutdown() self.stream.send_multipart(msg) return def shutdown(self): """Method to deactivate the worker connection completely. Will delete the stream and the underlying socket. """ if self.ticker: self.ticker.stop() self.ticker = None if not self.stream: return self.stream.socket.close() self.stream.close() self.stream = None self.timed_out = False self.need_handshake = True self.connected = False return def reply(self, msg): """Send the given message. :param msg: full message to send. :type msg: can either be a byte-string or a list of byte-strings """ if self.need_handshake: raise ConnectionNotReadyError() to_send = self.envelope self.envelope = None if isinstance(msg, list): to_send.extend(msg) else: to_send.append(msg) if self.stream.closed(): self.shutdown() self.stream.send_multipart(to_send) return def _on_message(self, msg): """Helper method called on message receive. :param msg: a list w/ the message parts :type msg: a list of byte-strings """ _LOG.debug("Received: %s." % msg) # 1st part is empty msg.pop(0) # 2nd part is protocol version proto = msg.pop(0) if proto != WORKER_PROTO: # ignore message from not supported protocol pass # 3rd part is message type msg_type = msg.pop(0) # XXX: hardcoded message types! # any message resets the retries counter self.need_handshake = False self.curr_retries = self.HB_RETRIES if msg_type == MSG_DISCONNECT: # disconnect _LOG.info("Broker wants us to disconnect.") self.curr_retries = 0 # reconnect will be triggered by hb timer elif msg_type == MSG_QUERY: # request # remaining parts are the user message _LOG.debug("Received new request: %s." % msg) envelope, msg = split_address(msg) envelope.append(b'') envelope = [b'', WORKER_PROTO, MSG_REPLY] + envelope # reply self.envelope = envelope self.on_request(msg) else: # invalid message # ignored _LOG.debug('ignoring message with invalid id') pass return def on_request(self, msg): """Public method called when a request arrived. :param msg: a list w/ the message parts :type msg: a list of byte-strings Must be overloaded to provide support for various services! """ pass
class AdminSocketHandler(SockJSConnection): def on_message_published(self, message): actual_message = message[0] if six.PY3: actual_message = actual_message.decode() self.send(actual_message.split(CHANNEL_DATA_SEPARATOR, 1)[1]) @coroutine def subscribe(self): projects, error = yield state.project_list() self.projects = [x['_id'] for x in projects] self.uid = uuid.uuid4().hex self.connections = self.application.admin_connections context = zmq.Context() subscribe_socket = context.socket(zmq.SUB) if self.application.zmq_pub_sub_proxy: subscribe_socket.connect(self.application.zmq_xpub) else: for address in self.application.zmq_sub_address: subscribe_socket.connect(address) for project_id in self.projects: if project_id not in self.connections: self.connections[project_id] = {} self.connections[project_id][self.uid] = self channel_to_subscribe = create_project_channel_name(project_id) subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, six.u(channel_to_subscribe)) self.subscribe_stream = ZMQStream(subscribe_socket) self.subscribe_stream.on_recv(self.on_message_published) logger.info('admin connected') def unsubscribe(self): if not hasattr(self, 'uid'): return for project_id in self.projects: if not project_id in self.connections: continue try: del self.connections[project_id][self.uid] except KeyError: pass if not self.connections[project_id]: del self.connections[project_id] self.subscribe_stream.on_recv(None) self.subscribe_stream.close() logger.info('admin disconnected') def on_open(self, info): try: value = info.cookies['user'].value except (KeyError, AttributeError): self.close() else: user = decode_signed_value( self.application.settings['cookie_secret'], 'user', value) if user: self.subscribe() else: self.close() def on_close(self): self.unsubscribe()
class Status(SockJSConnection): remaining = 0 rTime = 0 pTime = 0 def on_open(self, info): self.prog_socket = context.socket(zmq.SUB) self.prog_socket.connect("ipc:///tmp/progressOut.ipc") self.prog_socket.setsockopt(zmq.SUBSCRIBE, "") self.prog_stream = ZMQStream(self.prog_socket, tornado.ioloop.IOLoop.instance()) self.prog_stream.on_recv(self.process_prog) self.peak_socket = context.socket(zmq.SUB) self.peak_socket.connect("ipc:///tmp/peaks.ipc") self.peak_socket.setsockopt(zmq.SUBSCRIBE, "") self.peak_stream = ZMQStream(self.peak_socket, tornado.ioloop.IOLoop.instance()) self.peak_stream.on_recv(self.process_peaks) def on_close(self): self.prog_stream.close() self.prog_socket.close() self.peak_stream.close() self.peak_socket.close() def process_prog(self, data): now = time() if now - Status.rTime > 30: if sys.DEV_MODE: stat = os.statvfs(".") else: stat = os.statvfs("/var/audio") Status.remaining = (stat.f_bsize * stat.f_bavail) / ( (core.depth / 8) * core.channels * core.rate * core.comp_ratio ) Status.rTime = now updateBTimer() updateTemp() save_config() # core.sync_dir() for msg in data: d = json.loads(msg) d["_t"] = "status" # d['c'] = os.getloadavg() d["r"] = Status.remaining d["s"] = core.port.gotSignal() d["ss"] = core.depth d["sr"] = core.rate # d['bt'] = hrBTimer() d["ct"] = int(getTemp()) # if o: self.send(d) Status.pTime = now def process_peaks(self, data): d = {"_t": "peaks", "p": json.loads(data[0])} self.send(d) def on_message(self, message): # for handling ping now = datetime.datetime.now() message = json.loads(message) message["server"] = [now.hour, now.minute, now.second, now.microsecond / 1000] message["_t"] = "pong" self.send(message)
class NodeService(App): __actions__ = [] # _api_port = 5000 def __init__(self, api_port): super().__init__() # self.api_port = api_port self.__api_port = api_port nodemodel = Node.select().first() if not nodemodel: settings = {"discovery": True, "discoverable": True} nodemodel = Node(uuid=uuid.uuid4().hex, settings=settings) nodemodel.node_name = "Ancilla" nodemodel.save(force_insert=True) self.model = nodemodel self.identity = self.model.uuid.encode('utf-8') self.name = self.model.name #identity.decode('utf-8') self.config.update({ "catchall": True }) self.settings = self.config._make_overlay() self.settings.load_dict(self.model.settings) self.ctx = zmq.Context.instance() self.setup_router() self.discovery = Discovery(self) self.settings._add_change_listener( functools.partial(self.settings_changed, 'settings')) # self.pubsocket = self.ctx.socket(zmq.PUB) # self.pubsocket.bind("ipc://publisher") # self.publisher = ZMQStream(self.pubsocket) self.setup_publisher() # publisher = self.ctx.socket(zmq.PUB) # publisher.setsockopt( zmq.LINGER, 0 ) # # publisher.setsockopt(zmq.DONTWAIT, True) # publisher.bind("ipc://publisher.ipc") # self.publisher = ZMQStream(publisher) collector = self.ctx.socket(zmq.PULL) collector.bind("ipc://collector.ipc") collector.setsockopt( zmq.LINGER, 1 ) self.collector = ZMQStream(collector) self.collector.on_recv(self.handle_collect) self.file_service = None self.layerkeep_service = None self._services = [] self.init_services() self.api = NodeApi(self) post_save.connect(self.post_save_handler, name=f'service_model', sender=Service) post_delete.connect(self.post_delete_service_handler, name=f'camera_model', sender=Service) post_save.connect(self.post_save_node_handler, name=f'node_model', sender=Node) # post_delete.connect(self.post_delete_camera_handler, name=f'camera_model', sender=Camera) self.limit_memory() soft, hard = resource.getrlimit(resource.RLIMIT_AS) print(f'Node MEM limit NOW = {soft}, {hard}') def _hangle_sig_memory(self, signum, stack): print("Memory Warning: Node Service") gc.collect() def limit_memory(self): maxhard = psutil.virtual_memory().available maxsoft = maxhard p = psutil.Process(pid=os.getpid()) soft, hard = resource.getrlimit(resource.RLIMIT_AS) if hard > 0: h = min([maxhard, hard]) else: h = maxhard if h > 0: s = min([maxsoft, h]) else: s = maxsoft if hasattr(p, 'rlimit'): # soft, hard = p.rlimit(resource.RLIMIT_AS) print(f'Node MEM limit = {soft}, {hard}: {h}') p.rlimit(resource.RLIMIT_AS, (s, hard)) else: print(f'Node MEM limit = {soft}, {hard}: {h}') resource.setrlimit(resource.RLIMIT_AS, (s, hard)) self._old_usr1_hdlr = signal.signal(signal.SIGUSR1, self._hangle_sig_memory) def cleanup(self): print('Clean Up Node and Services') for s in self._mounts: s.cleanup() self._mounts = [] self.discovery.stop() self.file_service = None self.layerkeep_service = None self.zmq_router.close() self.collector.close(linger=1) self.publisher.close(linger=1) self.ctx.destroy() @property def api_port(self): return self.__api_port @api_port.setter def api_port(self, value): self.discovery.update_port(value) self.__api_port = value def setup_router(self): trybind = 30 bound = False self.router_port = 5555 self.bind_address = f"tcp://*:{self.router_port}" self.router_address = f"tcp://127.0.0.1:{self.router_port}" self.zrouter = self.ctx.socket(zmq.ROUTER) self.zrouter.identity = self.identity while not bound and trybind > 0: try: self.bind_address = f"tcp://*:{self.router_port}" self.zrouter.bind(self.bind_address) self.router_address = f"tcp://127.0.0.1:{self.router_port}" print(f"Node Bound to {self.bind_address}") bound = True except zmq.error.ZMQError: trybind -= 1 self.router_port += 1 self.zmq_router = ZMQStream(self.zrouter, IOLoop.current()) self.zmq_router.on_recv(self.router_message) self.zmq_router.on_send(self.router_message_sent) def setup_publisher(self): trybind = 30 bound = False self.publisher_port = 5556 self.bind_address = f"tcp://*:{self.publisher_port}" self.publisher_address = f"tcp://127.0.0.1:{self.publisher_port}" publisher = self.ctx.socket(zmq.PUB) publisher.setsockopt( zmq.LINGER, 0 ) # publisher.setsockopt(zmq.DONTWAIT, True) while not bound and trybind > 0: try: self.pub_bind_address = f"tcp://*:{self.publisher_port}" publisher.bind(self.pub_bind_address) self.publisher_address = f"tcp://127.0.0.1:{self.publisher_port}" print(f"Node Pub Bound to {self.pub_bind_address}") bound = True except zmq.error.ZMQError: trybind -= 1 self.publisher_port += 1 self.publisher = ZMQStream(publisher) def list_actions(self, *args): return self.__actions__ def list_plugins(self): import os for module in os.listdir(os.path.dirname(__file__)): if module == '__init__.py' or module[-3:] != '.py': continue __import__(module[:-3], locals(), globals()) def mount_service(self, model): kwargs = {"publisher_address": self.publisher_address} prefix = model.api_prefix #f"/services/{model.kind}/{model.id}/" res = next((item for item in self._mounts if item.config['_mount.prefix'] == prefix), None) if res: return ["exist", res] LayerkeepCls = getattr(importlib.import_module("ancilla.foundation.node.plugins"), "LayerkeepPlugin") ServiceCls = getattr(importlib.import_module("ancilla.foundation.node.services"), model.class_name) service = ServiceCls(model, **kwargs) service.install(LayerkeepCls()) self._services.append(model) self.mount(prefix, service) return ["created", service] def handle_service_name_change(self, oldname, newname): sc = Service.event_listeners.children().alias('children') services = Service.select().from_(Service, sc).where(sc.c.Key.startswith(oldname))[:] for s in services: evhandlers = {} for (k, v) in s.event_listeners.items(): if k.startswith(oldname + "."): newkey = newname + k[len(oldname):] evhandlers[newkey] = v else: evhandlers[k] = v s.event_listeners = evhandlers s.save() # services = Service.update(Service.settings["event_handlers"].update(evhandlers)).where # .from_(Service, sc).where(sc.c.Key.startswith(oldname))[:] def settings_changed(self, event, oldval, key, newval): # print(f'evt: {event} key= {key} OldVal = {oldval} NewVal: {newval}') if key == "discovery": self.discovery.run(newval) elif key == "discoverable": self.discovery.make_discoverable(newval) pass def post_save_node_handler(self, sender, instance, *args, **kwargs): print(f"Post save Node handler {sender}", flush=True) if self.model.name != self.name: self.name = instance.name self.discovery.update_name(self.name) old_settings = self.settings.to_json() new_settings = ConfigDict().load_dict(self.model.settings).to_json() if old_settings != new_settings: # print(f"OldSet = {old_settings}", flush=True) # print(f"NEWSet = {new_settings}", flush=True) self.settings.update(new_settings) oldkeys = old_settings.keys() newkeys = new_settings.keys() for key in oldkeys - newkeys: if key not in self.settings._virtual_keys: del self.settings[key] def post_save_handler(self, sender, instance, *args, **kwargs): print(f"Post save Service handler {sender} {instance}", flush=True) model = None for idx, item in enumerate(self._services): if item.id == instance.id: model = item self._services[idx] = instance if model: oldmodel = model srv = next((item for item in self._mounts if item.model.id == instance.id), None) oldname = model.name model = instance # print(f"PostSaveHandler OLDName = {oldname}, instan= {instance.name}", flush=True) if oldname != instance.name: self.handle_service_name_change(oldname, instance.name) if srv: srv.update_model(model) old_config = ConfigDict().load_dict(oldmodel.configuration).to_json() # old_config = oldmodel.configuration old_settings = srv.settings.to_json() old_event_listeners = srv.event_handlers.to_json() # print(f"NEWListeners = {json.dumps(srv.model.event_listeners)}", flush=True) # print(f"OldListeners = {json.dumps(oldmodel.event_listeners)}", flush=True) new_config = ConfigDict().load_dict(srv.model.configuration).to_json() new_settings = ConfigDict().load_dict(srv.model.settings).to_json() new_event_listeners = ConfigDict().load_dict(srv.model.event_listeners).to_json() # if cur_settings != json.dumps(srv.model.settings): if old_config != new_config: # print(f"OldConfig = {old_config}", flush=True) # print(f"NEWConfig = {new_config}", flush=True) # print(f"ConfVke {srv.config._virtual_keys}", flush=True) srv.config.update(new_config) oldkeys = old_config.keys() newkeys = new_config.keys() for key in oldkeys - newkeys: if key not in srv.config._virtual_keys: del srv.config[key] if old_settings != new_settings: # print(f"OldSet = {old_settings}", flush=True) # print(f"NEWSet = {new_settings}", flush=True) # print(f"SettingsVke {srv.settings._virtual_keys}", flush=True) srv.settings.update(new_settings) oldkeys = old_settings.keys() newkeys = new_settings.keys() for key in oldkeys - newkeys: if key not in srv.settings._virtual_keys: del srv.settings[key] if old_event_listeners != new_event_listeners: srv.event_handlers.update(new_event_listeners) # print(f"OldListeners = {old_event_listeners}", flush=True) # print(f"NEWListeners = {new_event_listeners}", flush=True) oldkeys = old_event_listeners.keys() newkeys = new_event_listeners.keys() for key in oldkeys - newkeys: if key not in srv.settings._virtual_keys: del srv.event_handlers[key] def post_delete_service_handler(self, sender, instance, *args, **kwargs): # service_path = "/".join([Env.ancilla, "services", f"service{instance.id}"]) if os.path.exists(instance.directory): shutil.rmtree(instance.directory) # self.delete_recording(instance) # def post_delete_camera_handler(self, sender, instance, *args, **kwargs): # service_id = instance.service_id # cam_path = "/".join([Env.ancilla, "services", f"service{instance.service_id}"]) # if os.path.exists(cam_path): # shutil.rmtree(cam_path) def init_services(self): LayerkeepCls = getattr(importlib.import_module("ancilla.foundation.node.plugins"), "LayerkeepPlugin") self.install(LayerkeepCls()) lkmodel = Service.select().where(Service.kind == "layerkeep").first() if not lkmodel: self.__create_layerkeep_service() filemodel = Service.select().where(Service.kind == "file").first() if not filemodel: self.__create_file_service() kwargs = {"publisher_address": self.publisher_address} for s in Service.select(): self._services.append(s) if s.kind == "file": ServiceCls = getattr(importlib.import_module("ancilla.foundation.node.services"), s.class_name) service = ServiceCls(s, **kwargs) service.install(LayerkeepCls()) self.file_service = service self.mount(f"/api/services/{s.kind}/{s.id}/", service) elif s.kind == "layerkeep": ServiceCls = getattr(importlib.import_module("ancilla.foundation.node.services"), s.class_name) service = ServiceCls(s, **kwargs) self.layerkeep_service = service self.mount(f"/api/services/{s.kind}/{s.id}/", service) else: ServiceCls = getattr(importlib.import_module("ancilla.foundation.node.services"), s.class_name) service = ServiceCls(s, **kwargs) service.install(LayerkeepCls()) self.mount(f"/api/services/{s.kind}/{s.id}/", service) def delete_service(self, service): self._services = [item for item in self._services if item.id != service.id] srv = next((item for item in self._mounts if item.model.id == service.id), None) if srv: self.unmount(srv) def delete_recording(self, msg): if isinstance(msg, CameraRecording): recording = msg else: data = msg.get("data") or None if data: if data.get("id"): recording = CameraRecording.get_by_id(data.get("id")) if recording: try: if os.path.exists(recording.image_path): shutil.rmtree(recording.image_path) if os.path.exists(recording.video_path): os.remove(recording.video_path) res = recording.delete_instance(recursive=True) return True except Exception as e: print(f"delete exception {str(e)}") raise e return False def stop_service(self, service): srv = next((item for item in self._mounts if item.model.id == service.id), None) if srv: self.unmount(srv) def unmount(self, app): curmounts = self._mounts curmounts.remove(app) self.reset_app() self.api.setup() print("reseting app ", flush=True) app.cleanup() self.remount_apps(curmounts) def __create_layerkeep_service(self): service = Service(name="layerkeep", kind="layerkeep", class_name="Layerkeep") service.save(force_insert=True) return service def __create_file_service(self): service = Service(name="local", kind="file", class_name="FileService") service.save(force_insert=True) return service def send(self, environ = {}, **kwargs): res = self._handle(environ) return res def run_action(self, action, payload, target = None, **kwargs): if not target: target = self else: target = next((item for item in self._mounts if item.name == target), self) # print(f'Actions= {action}, payload={payload} and target ={target}') try: if action in target.list_actions(): method = getattr(target, action) res = method(payload) if yields(res): future = asyncio.run_coroutine_threadsafe(res, asyncio.get_running_loop()) # while not future.done(): # time.sleep(0.01) # return future.result(0.) # print("FUTURE = ", future) # zmqrouter = self.zmq_router # def onfinish(fut): # newres = fut.result(1) # status = b'success' # if "error" in newres: # status = b'error' # zmqrouter.send_multipart([replyto, status, json.dumps(newres).encode('ascii')]) # future.add_done_callback(onfinish) else: return res else: return {"status": "error", "message": "Action Doesnt Exist"} except Exception as e: return {"status": "error", "message": f'Error Running Action: {str(e)}' } def sendto(self, action): node_identity, request_id, device_identity, action, *msgparts = msg # msg = msg[2] # if len(msg) > 2: # subtree = msg[2] message = "" if len(msgparts) > 0: message = msgparts[0] response = {"request_id": request_id.decode('utf-8'), "action": action.decode('utf-8')} if device_identity: curdevice = self.active_devices.get(device_identity) if curdevice: res = curdevice.send([request_id, action, message]) def router_message_sent(self, msg, status): print("NODE INSIDE ROUTE MESSageSEND", flush=True) def router_message(self, msg): # print(f"Router Msg = {msg}", flush=True) replyto, seq_s, brequest, *args = msg # seq = struct.unpack('!q',seq_s)[0] # action = action.decode('utf-8') request = brequest.decode('utf-8') try: req = json.loads(request) classname = req.get('__class__') module_name, class_name = classname.rsplit(".", 1) MyClass = getattr(importlib.import_module(module_name), class_name) instance = MyClass(**req.get('data', {})) self.handle_route(replyto, seq_s, instance) except Exception as e: self.logger.error(f'Node Router Exception: {str(e)}') # def router_message(self, msg): # print("NOde Unpack here", flush=True) # print(f"Router Msg = {msg}", flush=True) # replyto, method, path, *args = msg # method = method.decode('utf-8') # path = path.decode('utf-8') # params = {} # if len(args): # try: # params = json.loads(args.pop().decode('utf-8')) # except Exception as e: # print(f"Could not load params: {str(e)}", flush=True) # environ = {"REQUEST_METHOD": method.upper(), "PATH": path, "params": params} # res = self._handle(environ) # # print(typing.co, flush=True) # if yields(res): # future = asyncio.run_coroutine_threadsafe(res, asyncio.get_running_loop()) # zmqrouter = self.zmq_router # def onfinish(fut): # newres = fut.result(1) # status = b'success' # if "error" in newres: # status = b'error' # zmqrouter.send_multipart([replyto, status, json.dumps(newres).encode('ascii')]) # future.add_done_callback(onfinish) # else: # status = b'success' # if "error" in res: # status = b'error' # self.zmq_router.send_multipart([replyto, status, json.dumps(res).encode('ascii')]) # # node_identity, request_id, device_identity, action, *msgparts = msg # return "Routed" # def request(self, request): # pass def handle_collect(self, msg): # print(f'HandleCol Pub to {msg}', flush=True) # self.publisher.send_multipart(msg) if len(msg) >= 3: topic, service, *other = msg # topic, device, payload, *other = msg # if topic.startswith(b'events.'): # print(f"HandleCol inside, {topic} and {service}", flush=True) if topic.startswith(service): newtopic = self.identity + b'.' + topic else: newtopic = self.identity + b'.' + service + b'.' + topic # print(f"HandleCol inside, {newtopic} and {service}", flush=True) self.publisher.send_multipart([newtopic, service] + other) pass def service_change(self, *args): tree = Service.settings["event_handlers"].tree().alias('tree') Service.select().from_(Service, tree).where(tree.c.Key.startswith("servicename.events.print")) sc = Service.settings["event_handlers"].children().alias('children') q = Service.select().from_(Service, sc).where(sc.c.Key.startswith("servicename.events.print")) q = (Service.select(sc.c.key, sc.c.value, sc.c.fullkey) .from_(Service, sc) .order_by(sc.c.key) .tuples()) q[:]
class MDPBroker(object): """The MDP broker class. The broker routes messages from clients to appropriate workers based on the requested service. This base class defines the overall functionality and the API. Subclasses are ment to implement additional features (like logging). The broker uses ØMQ XREQ sockets to deal witch clients and workers. These sockets are wrapped in pyzmq streams to fit well into IOLoop. .. note:: The workers will *always* be served by the `main_ep` endpoint. In a two-endpoint setup clients will be handled via the `opt_ep` endpoint. :param context: the context to use for socket creation. :type context: zmq.Context :param main_ep: the primary endpoint for workers and clients. :type main_ep: str :param opt_ep: is an optional 2nd endpoint. :type opt_ep: str :param worker_q: the class to be used for the worker-queue. :type worker_q: class """ CLIENT_PROTO = b'MDPC01' #: Client protocol identifier WORKER_PROTO = b'MDPW01' #: Worker protocol identifier def __init__(self, context, main_ep, opt_ep=None, worker_q=None): """Init MDPBroker instance. """ socket = context.socket(zmq.XREP) socket.bind(main_ep) self.main_stream = ZMQStream(socket) self.main_stream.on_recv(self.on_message) if opt_ep: socket = context.socket(zmq.XREP) socket.bind(opt_ep) self.client_stream = ZMQStream(socket) self.client_stream.on_recv(self.on_message) else: self.client_stream = self.main_stream self._workers = {} # services contain the worker queue and the request queue self._services = {} self._worker_cmds = { '\x01': self.on_ready, '\x03': self.on_reply, '\x04': self.on_heartbeat, '\x05': self.on_disconnect, } self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL) self.hb_check_timer.start() return def register_worker(self, wid, service): """Register the worker id and add it to the given service. Does nothing if worker is already known. :param wid: the worker id. :type wid: str :param service: the service name. :type service: str :rtype: None """ if wid in self._workers: return self._workers[wid] = WorkerRep(self.WORKER_PROTO, wid, service, self.main_stream) if service in self._services: wq, wr = self._services[service] wq.put(wid) else: q = ServiceQueue() q.put(wid) self._services[service] = (q, []) return def unregister_worker(self, wid): """Unregister the worker with the given id. If the worker id is not registered, nothing happens. Will stop all timers for the worker. :param wid: the worker id. :type wid: str :rtype: None """ try: wrep = self._workers[wid] except KeyError: # not registered, ignore return wrep.shutdown() service = wrep.service if service in self._services: wq, wr = self._services[service] wq.remove(wid) del self._workers[wid] return def disconnect(self, wid): """Send disconnect command and unregister worker. If the worker id is not registered, nothing happens. :param wid: the worker id. :type wid: str :rtype: None """ try: wrep = self._workers[wid] except KeyError: # not registered, ignore return to_send = [ wid, self.WORKER_PROTO, b'\x05' ] self.main_stream.send_multipart(to_send) self.unregister_worker(wid) return def client_response(self, rp, service, msg): """Package and send reply to client. :param rp: return address stack :type rp: list of str :param service: name of service :type service: str :param msg: message parts :type msg: list of str :rtype: None """ to_send = rp[:] to_send.extend([b'', self.CLIENT_PROTO, service]) to_send.extend(msg) self.client_stream.send_multipart(to_send) return def shutdown(self): """Shutdown broker. Will unregister all workers, stop all timers and ignore all further messages. .. warning:: The instance MUST not be used after :func:`shutdown` has been called. :rtype: None """ if self.client_stream == self.main_stream: self.client_stream = None self.main_stream.on_recv(None) self.main_stream.socket.setsockopt(zmq.LINGER, 0) self.main_stream.socket.close() self.main_stream.close() self.main_stream = None if self.client_stream: self.client_stream.on_recv(None) self.client_stream.socket.setsockopt(zmq.LINGER, 0) self.client_stream.socket.close() self.client_stream.close() self.client_stream = None self._workers = {} self._services = {} return def on_timer(self): """Method called on timer expiry. Checks which workers are dead and unregisters them. :rtype: None """ for wrep in self._workers.values(): if not wrep.is_alive(): self.unregister_worker(wrep.id) return def on_ready(self, rp, msg): """Process worker READY command. Registers the worker for a service. :param rp: return address stack :type rp: list of str :param msg: message parts :type msg: list of str :rtype: None """ ret_id = rp[0] self.register_worker(ret_id, msg[0]) return def on_reply(self, rp, msg): """Process worker REPLY command. Route the `msg` to the client given by the address(es) in front of `msg`. :param rp: return address stack :type rp: list of str :param msg: message parts :type msg: list of str :rtype: None """ ret_id = rp[0] wrep = self._workers[ret_id] service = wrep.service # make worker available again try: wq, wr = self._services[service] cp, msg = split_address(msg) self.client_response(cp, service, msg) wq.put(wrep.id) if wr: proto, rp, msg = wr.pop(0) self.on_client(proto, rp, msg) except KeyError: # unknown service self.disconnect(ret_id) return def on_heartbeat(self, rp, msg): """Process worker HEARTBEAT command. :param rp: return address stack :type rp: list of str :param msg: message parts :type msg: list of str :rtype: None """ ret_id = rp[0] try: worker = self._workers[ret_id] if worker.is_alive(): worker.on_heartbeat() except KeyError: # ignore HB for unknown worker pass return def on_disconnect(self, rp, msg): """Process worker DISCONNECT command. Unregisters the worker who sent this message. :param rp: return address stack :type rp: list of str :param msg: message parts :type msg: list of str :rtype: None """ wid = rp[0] self.unregister_worker(wid) return def on_mmi(self, rp, service, msg): """Process MMI request. For now only mmi.service is handled. :param rp: return address stack :type rp: list of str :param service: the protocol id sent :type service: str :param msg: message parts :type msg: list of str :rtype: None """ if service == b'mmi.service': s = msg[0] ret = b'404' for wr in self._workers.values(): if s == wr.service: ret = b'200' break self.client_response(rp, service, [ret]) else: self.client_response(rp, service, [b'501']) return def on_client(self, proto, rp, msg): """Method called on client message. Frame 0 of msg is the requested service. The remaining frames are the request to forward to the worker. .. note:: If the service is unknown to the broker the message is ignored. .. note:: If currently no worker is available for a known service, the message is queued for later delivery. If a worker is available for the requested service, the message is repackaged and sent to the worker. The worker in question is removed from the pool of available workers. If the service name starts with `mmi.`, the message is passed to the internal MMI_ handler. .. _MMI: http://rfc.zeromq.org/spec:8 :param proto: the protocol id sent :type proto: str :param rp: return address stack :type rp: list of str :param msg: message parts :type msg: list of str :rtype: None """ ## print 'client message:' ## pprint(msg) service = msg.pop(0) if service.startswith(b'mmi.'): self.on_mmi(rp, service, msg) return try: wq, wr = self._services[service] wid = wq.get() if not wid: # no worker ready # queue message msg.insert(0, service) wr.append((proto, rp, msg)) return wrep = self._workers[wid] to_send = [ wrep.id, b'', self.WORKER_PROTO, b'\x02'] to_send.extend(rp) to_send.append(b'') to_send.extend(msg) self.main_stream.send_multipart(to_send) except KeyError: # unknwon service # ignore request print 'broker has no service "%s"' % service return def on_worker(self, proto, rp, msg): """Method called on worker message. Frame 0 of msg is the command id. The remaining frames depend on the command. This method determines the command sent by the worker and calls the appropriate method. If the command is unknown the message is ignored and a DISCONNECT is sent. :param proto: the protocol id sent :type proto: str :param rp: return address stack :type rp: list of str :param msg: message parts :type msg: list of str :rtype: None """ cmd = msg.pop(0) if cmd in self._worker_cmds: fnc = self._worker_cmds[cmd] fnc(rp, msg) else: # ignore unknown command # DISCONNECT worker self.disconnect(rp[0]) return def on_message(self, msg): """Processes given message. Decides what kind of message it is -- client or worker -- and calls the appropriate method. If unknown, the message is ignored. :param msg: message parts :type msg: list of str :rtype: None """ rp, msg = split_address(msg) # dispatch on first frame after path t = msg.pop(0) if t.startswith(b'MDPW'): self.on_worker(t, rp, msg) elif t.startswith(b'MDPC'): self.on_client(t, rp, msg) else: print 'Broker unknown Protocol: "%s"' % t return