def init_kernel(self): """Create the Kernel object itself""" shell_stream = ZMQStream(self.shell_socket) control_stream = ZMQStream(self.control_socket) kernel_factory = self.kernel_class.instance kernel = kernel_factory( parent=self, session=self.session, control_stream=control_stream, shell_streams=[shell_stream, control_stream], iopub_thread=self.iopub_thread, iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, log=self.log, profile_dir=self.profile_dir, user_ns=self.user_ns, ) kernel.record_ports( {name + '_port': port for name, port in self.ports.items()}) self.kernel = kernel # Allow the displayhook to get the execution count self.displayhook.get_execution_count = lambda: kernel.execution_count
def __init__(self, context, main_ep, opt_ep=None): """Init MDPBroker instance. """ l = logger.Logger('mq_broker') self.log = l.get_logger() self.log.info("MDP broker startup...") socket = ZmqSocket(context, zmq.ROUTER) socket.bind(main_ep) self.main_stream = ZMQStream(socket) self.main_stream.on_recv(self.on_message) if opt_ep: socket = ZmqSocket(context, zmq.ROUTER) socket.bind(opt_ep) self.client_stream = ZMQStream(socket) self.client_stream.on_recv(self.on_message) else: self.client_stream = self.main_stream self.log.debug("Socket created...") self._workers = {} # services contain the worker queue and the request queue self._services = {} self._worker_cmds = { b'\x01': self.on_ready, b'\x03': self.on_reply, b'\x04': self.on_heartbeat, b'\x05': self.on_disconnect, } self.log.debug("Launch the timer...") self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL) self.hb_check_timer.start() self.log.info("MDP broker started") return
def __new__(cls, backend_url=None, frontend_url=None, basedir=None, daemon_flag=None, worker_bin=None, deployment=None): _LOG.debug('lruqueue instance called %s', LRUQueue.__instance) if LRUQueue.__instance is None: LRUQueue.__instance = super().__new__(cls) # Prepare our context and sockets context = zmq.Context() frontend_socket = context.socket(zmq.ROUTER) frontend_socket.bind(frontend_url) backend_socket = context.socket(zmq.ROUTER) backend_socket.bind(backend_url) LRUQueue.__instance.backend_url = backend_url LRUQueue.__instance.frontend = ZMQStream(frontend_socket) LRUQueue.__instance.backend = ZMQStream(backend_socket) LRUQueue.__instance.backend.on_recv( LRUQueue.__instance.handle_backend) LRUQueue.__instance.frontend.on_recv( LRUQueue.__instance.handle_frontend) LRUQueue.__instance.workers = dict() LRUQueue.__instance.basedir = basedir LRUQueue.__instance.daemon = daemon_flag LRUQueue.__instance.worker_bin = worker_bin LRUQueue.__instance.deployment = deployment _LOG.debug('lruqueue instance new done') return LRUQueue.__instance
def _setup_mgmt_sockets(self): self._mgmt_sockets = dict() # adress for the communication from master to worker(s) mgmt_master_worker = 'inproc://master/worker/coordination/' # connect the master with the worker # the master is a ZMQStream because we are sending msgs from the test sock = self._ctx.socket(zmq.PUB) sock.bind(mgmt_master_worker) self._mgmt_sockets['tmp1'] = sock self._mgmt_sockets['master_pub'] = ZMQStream(sock, self._io_loop) # the worker stream is created inside the ZmqMgmt class self._mgmt_sockets['worker_sub'] = self._ctx.socket(zmq.SUB) self._mgmt_sockets['worker_sub'].setsockopt(zmq.SUBSCRIBE, "") self._mgmt_sockets['worker_sub'].connect(mgmt_master_worker) # adress for the communication from worker(s) to master mgmt_worker_master = 'inproc://worker/master/coordination/' # connect the worker with the master self._mgmt_sockets['worker_pub'] = self._ctx.socket(zmq.PUB) self._mgmt_sockets['worker_pub'].bind(mgmt_worker_master) sock = self._ctx.socket(zmq.SUB) sock.setsockopt(zmq.SUBSCRIBE, "") sock.connect(mgmt_worker_master) self._mgmt_sockets['tmp2'] = sock self._mgmt_sockets['master_sub'] = ZMQStream(sock, self._io_loop)
def __init__(self, frontier, data_in_sock='ipc:///tmp/robot-data-w2m.sock', data_out_sock='ipc:///tmp/robot-data-m2w.sock', msg_in_sock='ipc:///tmp/robot-msg-w2m.sock', msg_out_sock='ipc:///tmp/robot-msg-m2w.sock', io_loop=None): self.identity = 'master:%s:%s' % (socket.gethostname(), os.getpid()) context = zmq.Context() self._io_loop = io_loop or IOLoop.instance() self._in_socket = context.socket(zmq.SUB) self._in_socket.setsockopt(zmq.SUBSCRIBE, '') self._in_socket.bind(data_in_sock) self._in_stream = ZMQStream(self._in_socket, io_loop) self._out_socket = context.socket(zmq.PUSH) self._out_socket.bind(data_out_sock) self._out_stream = ZMQStream(self._out_socket, io_loop) self._online_workers = set() self._running = False self._updater = PeriodicCallback(self._send_next, 100, io_loop=io_loop) self._reloader = PeriodicCallback(self.reload, 1000, io_loop=io_loop) self.frontier = frontier self.messenger = ServerMessenger(msg_in_sock, msg_out_sock, context, io_loop)
def connect(self): """ Creates and starts all necessary threads and sockets for heartbeating, job polling, and message passing. Returns: [0] on success [-1] error (not connected to hub) """ logger.debug("[WORKER] hub.connect()") endpoint = "tcp://" + self.worker.config.hub_ip + ':' + self.worker.config.hub_port context = zmq.Context.instance() self.send_socket = context.socket(zmq.DEALER) self.send_socket.identity = self.worker.heart_ip + '/send' self.send_socket.connect(endpoint) self.job_socket = context.socket(zmq.DEALER) self.job_socket.identity = self.worker.heart_ip + '/jobs' self.job_socket.connect(endpoint) self.job_stream = ZMQStream(self.job_socket) # self.job_stream.on_recv(self.poll_hub) self.heart_socket = context.socket(zmq.DEALER) self.heart_socket.identity = self.worker.heart_ip + '/heart' self.heart_socket.connect(endpoint) self.heart_stream = ZMQStream(self.heart_socket) self.heartbeat_timer = tornado.ioloop.PeriodicCallback( self.heartbeat, self.heartbeat_interval, self.loop) return 0
def open(self, key): logger.debug("websocket opened for key %s", key) self.key = key # openeing corresponding socket of model # open push socket to forward incoming zmq messages push = self.ctx.socket(zmq.PUSH) pull_port = self.database[key]["ports"]['PULL'] node = "localhost" # node = self.database[key]["node"] push.connect("tcp://%s:%d" % (node, pull_port)) self.pushstream = ZMQStream(push) sub = self.ctx.socket(zmq.SUB) pub_port = self.database[key]["ports"]['PUB'] sub.connect("tcp://%s:%d" % (node, pub_port)) # Accept all messages sub.setsockopt(zmq.SUBSCRIBE, '') self.substream = ZMQStream(sub) def send(messages): """forward messages to this websocket""" logger.info("received %s messages", len(messages)) for message in messages: try: json.loads(message) binary = False except ValueError: binary = True self.write_message(message, binary) self.substream.on_recv(send)
def __init__(self, context, main_ep, opt_ep=None, service_q=None): """Init MDPBroker instance. """ if service_q is None: self.service_q = ServiceQueue else: self.service_q = service_q socket = context.socket(zmq.ROUTER) socket.bind(main_ep) self.main_stream = ZMQStream(socket) self.main_stream.on_recv(self.on_message) if opt_ep: socket = context.socket(zmq.ROUTER) socket.bind(opt_ep) self.client_stream = ZMQStream(socket) self.client_stream.on_recv(self.on_message) else: self.client_stream = self.main_stream self._workers = {} # services contain the service queue and the request queue self._services = {} self._worker_cmds = { b'\x01': self.on_ready, b'\x03': self.on_reply, b'\x04': self.on_heartbeat, b'\x05': self.on_disconnect, } self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL) self.hb_check_timer.start() return
def __init__(self, port=5556): self.port = port self.ctx = zmq.Context() self.kvmap = {} self.loop = IOLoop.instance() # Set up our clone server sockets self.snapshot = self.ctx.socket(zmq.ROUTER) self.publisher = self.ctx.socket(zmq.PUB) self.collector = self.ctx.socket(zmq.PULL) self.snapshot.bind("tcp://*:%d" % self.port) self.publisher.bind("tcp://*:%d" % (self.port + 1)) self.collector.bind("tcp://*:%d" % (self.port + 2)) # Wrap sockets in ZMQStreams for IOLoop handlers self.snapshot = ZMQStream(self.snapshot) self.publisher = ZMQStream(self.publisher) self.collector = ZMQStream(self.collector) # Register our handlers with reactor self.snapshot.on_recv(self.handle_snapshot) self.collector.on_recv(self.handle_collect) self.flush_callback = PeriodicCallback(self.flush_ttl, 1000) # basic log formatting: logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def __init__(self, settings, identity, insocket, outsocket, mgmt, frontier, log_handler, log_level, io_loop): """ Initialize the master. """ LoggingMixin.__init__(self, log_handler, log_level) self._identity = identity self._io_loop = io_loop or IOLoop.instance() self._in_stream = ZMQStream(insocket, io_loop) self._out_stream = ZMQStream(outsocket, io_loop) self._mgmt = mgmt self._frontier = frontier self._running = False self._available_workers = [] # periodically check if there are pending URIs to crawl self._periodic_update = PeriodicCallback(self._send_next_uri, settings.MASTER_PERIODIC_UPDATE_INTERVAL, io_loop=io_loop) # start this periodic callback when you are waiting for the workers to # finish self._periodic_shutdown = PeriodicCallback(self._shutdown_wait, 500, io_loop=io_loop) self._shutdown_counter = 0 self._logger.debug("zmqmaster::initialized")
def __init__(self, robot, data_in_sock='ipc:///tmp/robot-data-m2w.sock', data_out_sock='ipc:///tmp/robot-data-w2m.sock', msg_in_sock='ipc:///tmp/robot-msg-m2w.sock', msg_out_sock='ipc:///tmp/robot-msg-w2m.sock', io_loop=None): self.identity = 'worker:%s:%s' % (socket.gethostname(), os.getpid()) context = zmq.Context() self._io_loop = io_loop or IOLoop.instance() self._in_socket = context.socket(zmq.PULL) self._in_socket.connect(data_in_sock) self._in_stream = ZMQStream(self._in_socket, io_loop) self._out_socket = context.socket(zmq.PUB) self._out_socket.connect(data_out_sock) self._out_stream = ZMQStream(self._out_socket, io_loop) self._running = False self.robot = robot self.robot.set_worker_identity(self.identity) self.messenger = ClientMessenger(msg_in_sock, msg_out_sock, context, io_loop)
def __set_callback(self): self.frontend = ZMQStream(self.frontend_sock) self.backend = ZMQStream(self.backend_sock) self.peerend = ZMQStream(self.peerend_sock) self.frontend.on_recv(self.handle_frontend) self.backend.on_recv(self.handle_backend) self.peerend.on_recv(self.handle_peerend)
def register_pollin(self): with ImportExtensions(required=True): import tornado.ioloop get_or_reuse_loop() self.io_loop = tornado.ioloop.IOLoop.current() self.in_sock = ZMQStream(self.in_sock, self.io_loop) self.out_sock = ZMQStream(self.out_sock, self.io_loop) self.ctrl_sock = ZMQStream(self.ctrl_sock, self.io_loop) self.in_sock.stop_on_recv()
def __init__(self, backend_socket, frontend_socket, clients, workers): self.avaliable_workers = 0 self.workers = [] self.worker_num = workers self.client_num = clients self.backend = ZMQStream(backend_socket) self.frontend = ZMQStream(frontend_socket) self.backend.on_recv(self.handle_backend) self.loop = IOLoop.instance()
async def _setup_heartmonitor( ctx, ping_url, pong_url, monitor_url, log_level=logging.INFO, curve_publickey=None, curve_secretkey=None, **heart_monitor_kwargs, ): """Set up heart monitor For use in a background process, via Process(target=start_heartmonitor) """ ping_socket = ctx.socket(zmq.PUB) bind( ping_socket, ping_url, curve_publickey=curve_publickey, curve_secretkey=curve_secretkey, ) ping_stream = ZMQStream(ping_socket) pong_socket = ctx.socket(zmq.ROUTER) set_hwm(pong_socket, 0) bind( pong_socket, pong_url, curve_publickey=curve_publickey, curve_secretkey=curve_secretkey, ) pong_stream = ZMQStream(pong_socket) monitor_socket = ctx.socket(zmq.XPUB) connect( monitor_socket, monitor_url, curve_publickey=curve_publickey, curve_secretkey=curve_secretkey, curve_serverkey=curve_publickey, ) monitor_stream = ZMQStream(monitor_socket) # reinitialize logging after fork from .app import IPController app = IPController(log_level=log_level) heart_monitor_kwargs['log'] = app.log heart_monitor = HeartMonitor( ping_stream=ping_stream, pong_stream=pong_stream, monitor_stream=monitor_stream, **heart_monitor_kwargs, ) heart_monitor.start()
def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, config=None, logname='ZMQ', log_addr=None, loglevel=logging.DEBUG, scheme='lru', identity=b'task'): from zmq.eventloop import ioloop from zmq.eventloop.zmqstream import ZMQStream if config: # unwrap dict back into Config config = Config(config) ctx = zmq.Context() loop = ioloop.IOLoop() ins = ZMQStream(ctx.socket(zmq.XREP), loop) ins.setsockopt(zmq.IDENTITY, identity) ins.bind(in_addr) outs = ZMQStream(ctx.socket(zmq.XREP), loop) outs.setsockopt(zmq.IDENTITY, identity) outs.bind(out_addr) mons = ZMQStream(ctx.socket(zmq.PUB), loop) mons.connect(mon_addr) nots = ZMQStream(ctx.socket(zmq.SUB), loop) nots.setsockopt(zmq.SUBSCRIBE, '') nots.connect(not_addr) scheme = globals().get(scheme, None) # setup logging if log_addr: connect_logger(logname, ctx, log_addr, root="scheduler", loglevel=loglevel) else: local_logger(logname, loglevel) scheduler = TaskScheduler(client_stream=ins, engine_stream=outs, mon_stream=mons, notifier_stream=nots, scheme=scheme, loop=loop, logname=logname, config=config) scheduler.start() try: loop.start() except KeyboardInterrupt: print("interrupted, exiting...", file=sys.__stderr__)
def __init__(self, backend_socket, frontend_socket): self.available_workers = 0 self.workers = [] self.client_nbr = NBR_CLIENTS self.backend = ZMQStream(backend_socket) self.frontend = ZMQStream(frontend_socket) self.backend.on_recv(self.handle_backend) self.loop = IOLoop.instance()
def _register_pollin(self): """Register :attr:`in_sock`, :attr:`ctrl_sock` and :attr:`out_sock` in poller.""" with ImportExtensions(required=True): import tornado.ioloop get_or_reuse_loop() self.io_loop = tornado.ioloop.IOLoop.current() self.in_sock = ZMQStream(self.in_sock, self.io_loop) self.out_sock = ZMQStream(self.out_sock, self.io_loop) self.ctrl_sock = ZMQStream(self.ctrl_sock, self.io_loop) self.in_sock.stop_on_recv()
def register_pollin(self): use_uvloop() import asyncio asyncio.set_event_loop(asyncio.new_event_loop()) with ImportExtensions(required=True): import tornado.ioloop self.io_loop = tornado.ioloop.IOLoop.current() self.in_sock = ZMQStream(self.in_sock, self.io_loop) self.out_sock = ZMQStream(self.out_sock, self.io_loop) self.ctrl_sock = ZMQStream(self.ctrl_sock, self.io_loop) self.in_sock.stop_on_recv()
def __init__(self, context, main_ep, opt_ep=None, service_q=None, data_q=None): """Init MNBroker instance. """ if service_q is None: self.service_q = ServiceQueue else: self.service_q = service_q if data_q is None: self.data_q = ServiceQueue else: self.data_q = data_q socket = context.socket(zmq.ROUTER) socket.bind(main_ep) socket.setsockopt(zmq.IDENTITY, b'BROKER') self.main_stream = ZMQStream(socket) self.main_stream.on_recv(self.on_message) if opt_ep: socket = context.socket(zmq.ROUTER) socket.bind(opt_ep) self.client_stream = ZMQStream(socket) self.client_stream.on_recv(self.on_message) else: self.client_stream = self.main_stream # TODO: merge worker_tracker and info self._workers = {} self._workers_info = {} self._services = { } # TODO: each worker must have his own request queue self._worker_cmds = { MSG_READY: self.on_ready, MSG_REPLY: self.on_reply, MSG_HEARTBEAT: self.on_heartbeat, MSG_DISCONNECT: self.on_disconnect, } self._local_cmds = { MSG_WINFO: self.get_workers_info, } self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL) self.hb_check_timer.start() self.hb_get_winfo = PeriodicCallback(self.collect_workers_info, HB_INTERVAL) self.hb_get_winfo.start() self.register_worker_info(self.main_stream.getsockopt( zmq.IDENTITY)) # register this instance _LOG.info("Broker initialized and can be found at '%s'" % main_ep) return
def start(self): self.log.info( f"Starting kernel nanny for engine {self.engine_id}, pid={self.pid}, nanny pid={os.getpid()}" ) self._watcher_thread = Thread(target=self.wait_for_parent_thread, name="WatchParent", daemon=True) self._watcher_thread.start() # ignore SIGINT sent to parent signal.signal(signal.SIGINT, signal.SIG_IGN) self.loop = IOLoop.current() self.context = zmq.Context() # set up control socket (connection to Scheduler) self.control_socket = self.context.socket(zmq.ROUTER) self.control_socket.identity = self.identity util.connect( self.control_socket, self.control_url, curve_serverkey=self.curve_serverkey, ) self.control_stream = ZMQStream(self.control_socket) self.control_stream.on_recv_stream(self.dispatch_control) # set up relay socket (connection to parent's control socket) self.parent_socket = self.context.socket(zmq.DEALER) if self.curve_secretkey: self.parent_socket.setsockopt(zmq.CURVE_SERVER, 1) self.parent_socket.setsockopt(zmq.CURVE_SECRETKEY, self.curve_secretkey) port = self.parent_socket.bind_to_random_port("tcp://127.0.0.1") # now that we've bound, pass port to parent via AsyncResult self.pipe.write(f"tcp://127.0.0.1:{port}\n") if not sys.platform.startswith("win"): # watch for the stdout pipe to close # as a signal that our parent is shutting down self.loop.add_handler(self.pipe, self.pipe_handler, IOLoop.READ | IOLoop.ERROR) self.parent_stream = ZMQStream(self.parent_socket) self.parent_stream.on_recv_stream(self.dispatch_parent) try: self.loop.start() finally: self.loop.close(all_fds=True) self.context.term() try: self.pipe.close() except BrokenPipeError: pass self.log.debug("exiting")
def test_that_creating_mgmt_works(self): ctx = zmq.Context() io_loop = IOLoop.instance() def stop_looping(_msg): io_loop.stop() settings = Settings() settings.ZEROMQ_MASTER_PUSH = 'inproc://spyder-zmq-master-push' settings.ZEROMQ_WORKER_PROC_FETCHER_PULL = \ settings.ZEROMQ_MASTER_PUSH settings.ZEROMQ_MASTER_SUB = 'inproc://spyder-zmq-master-sub' settings.ZEROMQ_WORKER_PROC_EXTRACTOR_PUB = \ settings.ZEROMQ_MASTER_SUB settings.ZEROMQ_MGMT_MASTER = 'inproc://spyder-zmq-mgmt-master' settings.ZEROMQ_MGMT_WORKER = 'inproc://spyder-zmq-mgmt-worker' pubsocket = ctx.socket(zmq.PUB) pubsocket.bind(settings.ZEROMQ_MGMT_MASTER) pub_stream = ZMQStream(pubsocket, io_loop) subsocket = ctx.socket(zmq.SUB) subsocket.setsockopt(zmq.SUBSCRIBE, "") subsocket.bind(settings.ZEROMQ_MGMT_WORKER) sub_stream = ZMQStream(subsocket, io_loop) mgmt = workerprocess.create_worker_management(settings, ctx, io_loop) mgmt.add_callback(ZMQ_SPYDER_MGMT_WORKER, stop_looping) mgmt.start() def assert_quit_message(msg): self.assertEqual(ZMQ_SPYDER_MGMT_WORKER_QUIT_ACK, msg.data) sub_stream.on_recv(assert_quit_message) death = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER, data=ZMQ_SPYDER_MGMT_WORKER_QUIT) pub_stream.send_multipart(death.serialize()) io_loop.start() mgmt._out_stream.close() mgmt._in_stream.close() mgmt._publisher.close() mgmt._subscriber.close() pub_stream.close() pubsocket.close() sub_stream.close() subsocket.close() ctx.term()
def on_open(self, info): self.prog_socket = context.socket(zmq.SUB) self.prog_socket.connect('ipc:///tmp/progressOut.ipc') self.prog_socket.setsockopt(zmq.SUBSCRIBE, '') self.prog_stream = ZMQStream(self.prog_socket, tornado.ioloop.IOLoop.instance()) self.prog_stream.on_recv(self.process_prog) self.peak_socket = context.socket(zmq.SUB) self.peak_socket.connect('ipc:///tmp/peaks.ipc') self.peak_socket.setsockopt(zmq.SUBSCRIBE, '') self.peak_stream = ZMQStream(self.peak_socket, tornado.ioloop.IOLoop.instance()) self.peak_stream.on_recv(self.process_peaks)
def __init__(self, primary=True, ports=(5556, 5566)): self.primary = primary if primary: self.port, self.peer = ports frontend = "tcp://*:5003" backend = "tcp://localhost:5004" self.kvmap = {} else: self.peer, self.port = ports frontend = "tcp://*:5004" backend = "tcp://localhost:5003" self.ctx = zmq.Context.instance() self.pending = [] self.bstar = BinaryStar(primary, frontend, backend) self.bstar.register_voter("tcp://*:%i" % self.port, zmq.ROUTER, self.handle_snapshot) # Set up our clone server sockets self.publisher = self.ctx.socket(zmq.PUB) self.collector = self.ctx.socket(zmq.SUB) self.collector.setsockopt(zmq.SUBSCRIBE, b'') self.publisher.bind("tcp://*:%d" % (self.port + 1)) self.collector.bind("tcp://*:%d" % (self.port + 2)) # Set up our own clone client interface to peer self.subscriber = self.ctx.socket(zmq.SUB) self.subscriber.setsockopt(zmq.SUBSCRIBE, b'') self.subscriber.connect("tcp://localhost:%d" % (self.peer + 1)) # Register state change handlers self.bstar.master_callback = self.become_master self.bstar.slave_callback = self.become_slave # Wrap sockets in ZMQStreams for IOLoop handlers self.publisher = ZMQStream(self.publisher) self.subscriber = ZMQStream(self.subscriber) self.collector = ZMQStream(self.collector) # Register our handlers with reactor self.collector.on_recv(self.handle_collect) self.flush_callback = PeriodicCallback(self.flush_ttl, 1000) self.hugz_callback = PeriodicCallback(self.send_hugz, 1000) # basic log formatting: logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def __init__(self, context, main_ep, client_ep, hb_ep, service_q=None): """Init MDPBroker instance. """ if service_q is None: self.service_q = ServiceQueue else: self.service_q = service_q # # Setup the zmq sockets. # socket = context.socket(zmq.ROUTER) socket.bind(main_ep) self.main_stream = ZMQStream(socket) self.main_stream.on_recv(self.on_message) socket = context.socket(zmq.ROUTER) socket.bind(client_ep) self.client_stream = ZMQStream(socket) self.client_stream.on_recv(self.on_message) socket = context.socket(zmq.ROUTER) socket.bind(hb_ep) self.hb_stream = ZMQStream(socket) self.hb_stream.on_recv(self.on_message) self._workers = {} # # services contain the service queue and the request queue # self._services = {} # # Mapping of worker commands and callbacks. # self._worker_cmds = { W_READY: self.on_ready, W_REPLY: self.on_reply, W_HEARTBEAT: self.on_heartbeat, W_DISCONNECT: self.on_disconnect, } # # 'Cleanup' timer for workers without heartbeat. # self.hb_check_timer = PeriodicCallback(self.on_timer, HB_INTERVAL) self.hb_check_timer.start()
def _setup_streams(self): """ Setup ZMQ streams. These need to be constructed within the right active event loop, i.e. this must run in the background thread. """ assert threading.current_thread() is self.thread assert self.loop from zmq.eventloop.zmqstream import ZMQStream with self._condition: self._shell_stream = ZMQStream(self._shell_socket, io_loop=self.loop) self._control_stream = ZMQStream(self._control_socket, io_loop=self.loop) self._condition.notify_all()
def _register_pollin(self): """Register :attr:`in_sock`, :attr:`ctrl_sock` and :attr:`out_sock` in poller.""" with ImportExtensions(required=True): import tornado.ioloop get_or_reuse_loop() self.io_loop = tornado.ioloop.IOLoop.current() self.io_loop.add_callback(callback=lambda: self.is_ready_event.set()) self.in_sock = ZMQStream(self.in_sock, self.io_loop) self.out_sock = ZMQStream(self.out_sock, self.io_loop) self.ctrl_sock = ZMQStream(self.ctrl_sock, self.io_loop) if self.in_connect_sock is not None: self.in_connect_sock = ZMQStream(self.in_connect_sock, self.io_loop) self.in_sock.stop_on_recv()
def subscribe(self): projects, error = yield state.project_list() self.projects = [x['_id'] for x in projects] self.uid = uuid.uuid4().hex self.connections = self.application.admin_connections context = zmq.Context() subscribe_socket = context.socket(zmq.SUB) if self.application.zmq_pub_sub_proxy: subscribe_socket.connect(self.application.zmq_xpub) else: for address in self.application.zmq_sub_address: subscribe_socket.connect(address) for project_id in self.projects: if project_id not in self.connections: self.connections[project_id] = {} self.connections[project_id][self.uid] = self channel_to_subscribe = create_project_channel_name(project_id) subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, six.u(channel_to_subscribe)) self.subscribe_stream = ZMQStream(subscribe_socket) self.subscribe_stream.on_recv(self.on_message_published) logger.info('admin connected')
def __init__(self): #procControllerAddr = '165.227.24.226' # I am client to HostController #procControllerPort = '5557' hostControllerPort = '5556' # I server to device print("Host Controller Starting\n") self.context = zmq.Context() # get context self.loop = IOLoop.instance() # self.clientSetup = ClientSetup(context) # instantiate the ClientSetup object self.serverSetup = ServerSetup(self.context) # instantiate the ServerSetup object # set up separate server and client sockets self.serverSocket = self.serverSetup.createServerSocket() # get a server socket self.serverSetup.serverBind(hostControllerPort, self.serverSocket) # bind to an address # self.clientSocket = self.clientSetup.createClientSocket() # get a client socket # NOTE: setIdentity() MUST BE CALLED BEFORE clientConnect or the identity will # not take effect # self.clientSetup.setIdentity(MasterId().getDevId()) # get the device id # self.clientSetup.clientConnect(hostControllerAddr, hostControllerPort, self.clientSocket) # connect to server using clientSocket self.serverSocket = ZMQStream(self.serverSocket) self.serverSocket.on_recv(self.onServerRecv) self.messages = Messages() # instantiate a Messages object self.inDict = {} self.outDict = {}
def subscribe(self, zmq_context, trade_pub_connection_string, trade_client): """" subscribe. """ self.md_pub_socket = zmq_context.socket(zmq.SUB) self.md_pub_socket.connect(trade_pub_connection_string) self.md_pub_socket.setsockopt(zmq.SUBSCRIBE, "MD_FULL_REFRESH_" + self.symbol) self.md_pub_socket.setsockopt(zmq.SUBSCRIBE, "MD_TRADE_" + self.symbol) self.md_pub_socket.setsockopt(zmq.SUBSCRIBE, "MD_INCREMENTAL_" + self.symbol + ".0") self.md_pub_socket.setsockopt(zmq.SUBSCRIBE, "MD_INCREMENTAL_" + self.symbol + ".1") self.md_pub_socket_stream = ZMQStream(self.md_pub_socket) self.md_pub_socket_stream.on_recv(self.on_md_publish) md_subscription_msg = { 'MsgType': 'V', 'MDReqID': '0', # not important. 'SubscriptionRequestType': '0', 'MarketDepth': 0, 'TradeDate': time.strftime("%Y%m%d", time.localtime()), 'MDUpdateType': '0', 'MDEntryTypes': ['0', '1', '2'], 'Instruments': [self.symbol] } return trade_client.sendJSON(md_subscription_msg)