def run_worker(): context = Context(1) worker = context.socket(zmq.REQ) identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000)) worker.setsockopt_string(zmq.IDENTITY, identity) worker.connect("tcp://localhost:5556") print("I: (%s) worker ready" % identity) yield worker.send_string(LRU_READY) cycles = 0 while True: msg = yield worker.recv_multipart() if not msg: break cycles += 1 if cycles > 3 and randint(0, 5) == 0: print("I: (%s) simulating a crash" % identity) break elif cycles > 3 and randint(0, 5) == 0: print("I: (%s) simulating CPU overload" % identity) yield gen.sleep(3) print("I: (%s) normal reply" % identity) # Do some heavy work yield gen.sleep(1) yield worker.send_multipart(msg)
def socket(): """Context manager to give the bound socket.""" ctx = Context() sock = ctx.socket(zmq.PAIR) sock.bind("tcp://*:8889") yield sock sock.close()
async def open(self): context = Context() sock = context.socket(zmq.SUB) sock.setsockopt(zmq.CONFLATE, 1) sock.connect("tcp://localhost:{}".format(settings.ZMQ_PORT)) sock.subscribe(b'') self.sock = sock self._stopped = False logger.debug("client connected: %s", self.request.remote_ip) tornado.ioloop.IOLoop.current().spawn_callback(self.pipe_message)
def run_queue(): context = Context(1) frontend = context.socket(zmq.ROUTER) # ROUTER backend = context.socket(zmq.ROUTER) # ROUTER frontend.bind("tcp://*:5555") # For clients backend.bind("tcp://*:5556") # For workers poll_workers = Poller() poll_workers.register(backend, zmq.POLLIN) poll_both = Poller() poll_both.register(frontend, zmq.POLLIN) poll_both.register(backend, zmq.POLLIN) workers = [] while True: if workers: socks = yield poll_both.poll() else: socks = yield poll_workers.poll() socks = dict(socks) # Handle worker activity on backend if socks.get(backend) == zmq.POLLIN: # Use worker address for LRU routing msg = yield backend.recv_multipart() if not msg: break print('I: received msg: {}'.format(msg)) address = msg[0] workers.append(address) # Everything after the second (delimiter) frame is reply reply = msg[2:] # Forward message to client if it's not a READY if reply[0] != LRU_READY: print('I: sending -- reply: {}'.format(reply)) yield frontend.send_multipart(reply) else: print('I: received ready -- address: {}'.format(address)) if socks.get(frontend) == zmq.POLLIN: # Get client request, route to first available worker msg = yield frontend.recv_multipart() worker = workers.pop(0) request = [worker, b''] + msg print('I: sending -- worker: {} msg: {}'.format(worker, msg)) yield backend.send_multipart(request)
def publisher(port=8135): context = Context() pub_uuid = str(uuid.uuid4()) pub = context.socket(zmq.PUB) pub.connect("tcp://localhost:%s" % port) poller = Poller() poller.register(pub, zmq.POLLOUT) while True: topic = 'heartbeat' utc = arrow.utcnow() raw = json.dumps({"timestamp":utc.timestamp, "uuid": pub_uuid}) message = '{0} {1}'.format(topic, raw) yield pub.send(message) yield gen.sleep(1)
def subscriber(port=8135): ''' Bind Subscriber ''' logging.warning("Binding SUB socket on port: {0}".format(port)) context = Context() sub = context.socket(zmq.SUB) sub.bind("tcp://*:%s" % port) sub.setsockopt(zmq.SUBSCRIBE, " ") sub.setsockopt(zmq.SUBSCRIBE, "heartbeat") sub.setsockopt(zmq.SUBSCRIBE, "asterisk") sub.setsockopt(zmq.SUBSCRIBE, "logging") sub.setsockopt(zmq.SUBSCRIBE, "upload") sub.setsockopt(zmq.SUBSCRIBE, "beam") poller = Poller() poller.register(sub, zmq.POLLIN) http_client = _http_client.AsyncHTTPClient() while True: events = yield poller.poll(timeout=1000) if sub in dict(events): # receive raw msg from sub msg = yield sub.recv() # get topic and message topic = msg.split(' ')[0] message = ' '.join(msg.split(' ')[1:]) # this make more sense directly on the beam # that why we're moving, still this sub_bind.py # clear some ideas directly. if topic.startswith('heartbeat'): print(topic, message) elif topic.startswith('asterisk'): print(topic, message) elif topic.startswith('logging'): print(topic, message) elif topic.startswith('upload'): print(topic, message) elif topic.startswith('beam'): print(topic, message) else: # let it crash logging.warning('let it crash') print(msg) else: #logging.warning('nothing receive') pass
def _start(self): self.context = Context() self.push = self.context.socket(zmq.PUSH) log.debug("Connecting PUSH socket to %s", self.returner_address) self.push.connect(self.returner_address) minion_opts = self.opts.copy() minion_opts["file_client"] = "local" self.event = salt.utils.event.get_event( "master", opts=minion_opts, io_loop=self.io_loop, listen=True ) self.event.subscribe("") self.event.set_event_handler(self.handle_event) event_tag = "salt/master/{}/start".format(self.id) log.info("Firing event on engine start. Tag: %s", event_tag) load = {"id": self.id, "tag": event_tag, "data": {}} self.event.fire_event(load, event_tag)
def run(loop): context = Context.instance() client = context.socket(zmq.ROUTER) client.bind("tcp://*:5671") responses = yield [worker_task(idx) for idx in range(NBR_WORKERS)] + [requestor(client)] print('responses: {}'.format(responses))
def run(loop): context = Context.instance() client = context.socket(zmq.ROUTER) client.bind("tcp://*:5671") responses = yield [ worker_task(idx) for idx in range(NBR_WORKERS) ] + [requestor(client)] print('responses: {}'.format(responses))
def step1(loop, context=None): """Step 1""" context = context or Context.instance() # Signal downstream to step 2 sender = context.socket(zmq.PAIR) sender.connect("inproc://step2") msg = b'message from step1' sender.send(msg) printdbg('(step1) sent msg: {}'.format(msg))
def run(loop): context = Context.instance() client = context.socket(zmq.ROUTER) client.bind("ipc://routing.ipc") responses = yield [ worker_a(context), worker_b(context), dealer(client), ] print('responses: {}'.format(responses))
def run(loop): """ server routine """ # Prepare our context and sockets context = Context.instance() # Bind to inproc: endpoint, then start upstream thread receiver = context.socket(zmq.PAIR) receiver.bind("inproc://step3") loop.add_callback(partial(step2, loop)) # Wait for signal msg = yield receiver.recv() print("Test successful! msg: {}".format(msg))
def run(loop): ctx = Context() a, b = zpipe(ctx) responses = yield [ client_task(ctx, b), server_task(ctx), monitor(a), ] print('responses: {}'.format(responses)) del a, b print('(run) finished')
async def get(self): ctx = FutureContext.instance() s = ctx.socket(zmq.DEALER) s.connect('tcp://127.0.0.1:5555') # send request to worker await s.send(b"hello") # finish web request with worker's reply reply = await s.recv_string() print("\nfinishing with %r\n" % reply) self.write(reply)
def get(self): ctx = FutureContext.instance() s = ctx.socket(zmq.DEALER) s.connect('tcp://127.0.0.1:5555') # send request to worker yield s.send(b'hello') # finish web request with worker's reply reply = yield s.recv_string() print("\nfinishing with %r\n" % reply) self.write(reply)
class PyTestEngine(object): def __init__(self, opts): self.opts = opts self.id = opts["id"] self.role = opts["__role"] self.returner_address = opts["pytest-{}".format(self.role)]["returner_address"] def start(self): log.info("Starting Pytest Event Forwarder Engine(forwarding to %s)", self.returner_address) self.io_loop = ioloop.IOLoop() self.io_loop.make_current() self.io_loop.add_callback(self._start) atexit.register(self.stop) self.io_loop.start() @gen.coroutine def _start(self): self.context = Context() self.push = self.context.socket(zmq.PUSH) log.debug("Connecting PUSH socket to %s", self.returner_address) self.push.connect(self.returner_address) minion_opts = self.opts.copy() minion_opts["file_client"] = "local" self.event = salt.utils.event.get_event( "master", opts=minion_opts, io_loop=self.io_loop, listen=True ) self.event.subscribe("") self.event.set_event_handler(self.handle_event) event_tag = "salt/master/{}/start".format(self.id) log.info("Firing event on engine start. Tag: %s", event_tag) load = {"id": self.id, "tag": event_tag, "data": {}} self.event.fire_event(load, event_tag) def stop(self): push = self.push context = self.context event = self.event self.push = self.context = self.event = None if event: event.unsubscribe("") event.destroy() if push and context: push.close(1000) context.term() self.io_loop.add_callback(self.io_loop.stop) @gen.coroutine def handle_event(self, payload): tag, data = salt.utils.event.SaltEvent.unpack(payload) log.debug("Received Event; TAG: %r DATA: %r", tag, data) forward = salt.utils.msgpack.dumps((self.id, tag, data), use_bin_type=True) yield self.push.send(forward)
def run_server(): context = Context() server = context.socket(zmq.REP) server.bind(SERVER_ADDR) cycles = 0 while True: request = yield server.recv() cycles += 1 # Simulate various problems, after a few cycles if cycles > 3 and randint(0, 3) == 0: print("I: Simulating a crash") server.unbind(SERVER_ADDR) # Delay for a bit, else we get "Address already in use" error. # Note that to really simulate a crash, we should probably kill # this process and start another. yield gen.sleep(2) break elif cycles > 3 and randint(0, 3) == 0: print("I: Simulating CPU overload") yield gen.sleep(2) print("I: Normal request (%s)" % request) yield gen.sleep(1) # Do some heavy work yield server.send(request) raise gen.Return((context, server))
def step2(loop, context=None): """Step 2""" context = context or Context.instance() # Bind to inproc: endpoint, then start upstream thread receiver = context.socket(zmq.PAIR) receiver.bind("inproc://step2") loop.add_callback(partial(step1, loop)) # Wait for signal msg = yield receiver.recv() printdbg('(step2) received msg: {}'.format(msg)) # Signal downstream to step 3 sender = context.socket(zmq.PAIR) sender.connect("inproc://step3") msg = b'message from step2' yield sender.send(msg) printdbg('(step2) sent msg: {}'.format(msg))
def __init__(self, url: str, logger, request_timeout: int = None, database: str = None): self._logger = logger self._database = database self._context = Context.instance() self._poller = Poller() self._request = self._context.socket(zmq.DEALER) self._request_timeout = request_timeout or 60 self._rds_bus_url = url self._request.connect(self._rds_bus_url) self._request_dict = dict() self._io_loop = ioloop.IOLoop.current() self._io_loop.add_callback(self.start)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Ensure the kernel we work with uses Futures on recv, so we can await them self.future_context = ctx = Context() # Our subscription to messages from the kernel we launch self.iosub = ctx.socket(zmq.SUB) self.iosub.subscribe = b"" # From kernelapp.py, shell_streams is typically shell_stream, control_stream self.shell_stream = self.shell_streams[0] # Start with no child kernel self.child_kernel = None self.kernel_config = None self.acquiring_kernel = asyncio.Lock() self.kernel_launched = asyncio.Event()
def worker_task(id, context=None): context = context or Context.instance() worker = context.socket(zmq.REQ) # We use a string identity for ease here zhelpers.set_id(worker) worker.connect("tcp://localhost:5671") total = 0 while True: # Tell the router we're ready for work yield worker.send(b"ready") # Get workload from router, until finished workload = yield worker.recv() #print('(worker {}) received: {}'.format(id, workload)) finished = workload == b"END" if finished: print("worker %d processed: %d tasks" % (id, total)) break total += 1 # Do some random work yield gen.sleep(0.1 * random.random()) raise gen.Return(('worker {}'.format(id), total))
def run(loop): context = Context() yield run_sink(context)
def run(loop): context = Context() yield run_ventilator(context)
def run(loop): context = Context(1) while True: yield run_worker(context)
def run(loop, zipcodes): context = Context() yield run_client_parallel(context, zipcodes)
async def run(loop): context = Context() while True: await run_server(context)
Hello World server in Python. Binds REP socket to Url. Expects b"Hello" from client, replies with b"World". Modified for tornado/ioloop: Dave Kuhlman <dkuhlman(at)davekuhlman(dot)org> usage: python hwserver.py """ import sys import zmq from zmq.eventloop.future import Context from zmq.eventloop.ioloop import IOLoop from tornado import gen Url = 'tcp://127.0.0.1:5555' Ctx = Context() @gen.coroutine def run(): print("Getting ready for hello world client. Ctrl-C to exit.\n") socket = Ctx.socket(zmq.REP) socket.bind(Url) while True: # Wait for next request from client message = yield socket.recv() print("Received request: {}".format(message)) # Do some 'work' yield gen.sleep(1) # Send reply back to client message = message.decode('utf-8')
def run(loop): context = Context() yield run_worker(context)
def run(loop, ident, num_workers): context = Context() yield run_worker_parallel(context, ident, num_workers)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.future_context = ctx = Context() self.iosub = ctx.socket(zmq.SUB) self.iosub.subscribe = b'' self.shell_stream = self.shell_streams[0]
def run(host, port): context = Context() yield zstreams.run_producer(context, host, port - 3)
#!/usr/bin/env python """A basic ZMQ echo server with zmq.eventloop.future""" import zmq from tornado import gen, ioloop from zmq.eventloop.future import Context @gen.coroutine def echo(sock): while True: msg = yield sock.recv_multipart() yield sock.send_multipart(msg) ctx = Context.instance() s = ctx.socket(zmq.ROUTER) s.bind('tcp://127.0.0.1:5555') loop = ioloop.IOLoop.current() loop.spawn_callback(echo, s) loop.start()
def run_broker(loop): """ main broker method """ url_worker = "inproc://workers" url_client = "inproc://clients" client_nbr = NBR_CLIENTS * 3 # Prepare our context and sockets context = Context() frontend = context.socket(zmq.ROUTER) frontend.bind(url_client) backend = context.socket(zmq.ROUTER) backend.bind(url_worker) # create workers and clients threads # worker_tasks = [] for idx in range(NBR_WORKERS): loop.add_callback(partial(run_worker, url_worker, context, idx)) # worker_tasks.append(task) # client_tasks = [] for idx in range(NBR_CLIENTS): loop.add_callback(partial(run_client, url_client, context, idx)) # client_tasks.append(task) # Logic of LRU loop # - Poll backend always, frontend only if 1+ worker ready # - If worker replies, queue worker as ready and forward reply # to client if necessary # - If client requests, pop next worker and send request to it # Queue of available workers available_workers = 0 workers_list = [] all_workers = set() # init poller poller = Poller() # Always poll for worker activity on backend poller.register(backend, zmq.POLLIN) # Poll front-end only if we have available workers poller.register(frontend, zmq.POLLIN) while True: socks = yield poller.poll() socks = dict(socks) # Handle worker activity on backend if backend in socks and socks[backend] == zmq.POLLIN: # Queue worker address for LRU routing message = yield backend.recv_multipart() assert available_workers < NBR_WORKERS worker_addr = message[0] # add worker back to the list of workers available_workers += 1 workers_list.append(worker_addr) all_workers.add(worker_addr) # Second frame is empty empty = message[1] assert empty == b"" # Third frame is READY or else a client reply address client_addr = message[2] # If client reply, send rest back to frontend if client_addr != b"READY": # Following frame is empty empty = message[3] assert empty == b"" reply = message[4] yield frontend.send_multipart([client_addr, b"", reply]) printdbg('(run_broker) to frontend -- reply: "{}"'.format(reply)) client_nbr -= 1 if client_nbr == 0: printdbg("(run_broker) exiting") break # Exit after N messages # poll on frontend only if workers are available if available_workers > 0: if frontend in socks and socks[frontend] == zmq.POLLIN: # Now get next client request, route to LRU worker # Client request is [address][empty][request] response = yield frontend.recv_multipart() [client_addr, empty, request] = response assert empty == b"" # Dequeue and drop the next worker address available_workers += -1 worker_id = workers_list.pop() yield backend.send_multipart([worker_id, b"", client_addr, b"", request]) printdbg('(run_broker) to backend -- request: "{}"'.format(request)) # out of infinite loop: do some housekeeping printdbg("(run_broker) finishing") for worker_id in workers_list: yield backend.send_multipart([worker_id, b"", b"", b"", b"Stop"]) printdbg("(run_broker) workers cancelled") yield gen.sleep(1) frontend.close() backend.close() # context.term() # Caution: calling term() blocks. printdbg("(run_broker) returning") result = "finished ok" raise gen.Return(result)
async def run(loop, zip_filter): context = Context() await run_client(context, zip_filter)
def run(loop): context = Context() server = Server(loop, context) yield server.run_server() printdbg('(run) finished')
async def run(loop, zipcodes): context = Context() await run_client_parallel(context, zipcodes)