def socket(): """Context manager to give the bound socket.""" ctx = Context() sock = ctx.socket(zmq.PAIR) sock.bind("tcp://*:8889") yield sock sock.close()
def run_worker(): context = Context(1) worker = context.socket(zmq.REQ) identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000)) worker.setsockopt_string(zmq.IDENTITY, identity) worker.connect("tcp://localhost:5556") print("I: (%s) worker ready" % identity) yield worker.send_string(LRU_READY) cycles = 0 while True: msg = yield worker.recv_multipart() if not msg: break cycles += 1 if cycles > 3 and randint(0, 5) == 0: print("I: (%s) simulating a crash" % identity) break elif cycles > 3 and randint(0, 5) == 0: print("I: (%s) simulating CPU overload" % identity) yield gen.sleep(3) print("I: (%s) normal reply" % identity) # Do some heavy work yield gen.sleep(1) yield worker.send_multipart(msg)
async def open(self): context = Context() sock = context.socket(zmq.SUB) sock.setsockopt(zmq.CONFLATE, 1) sock.connect("tcp://localhost:{}".format(settings.ZMQ_PORT)) sock.subscribe(b'') self.sock = sock self._stopped = False logger.debug("client connected: %s", self.request.remote_ip) tornado.ioloop.IOLoop.current().spawn_callback(self.pipe_message)
def run(loop): ctx = Context() a, b = zpipe(ctx) responses = yield [ client_task(ctx, b), server_task(ctx), monitor(a), ] print('responses: {}'.format(responses)) del a, b print('(run) finished')
def run_queue(): context = Context(1) frontend = context.socket(zmq.ROUTER) # ROUTER backend = context.socket(zmq.ROUTER) # ROUTER frontend.bind("tcp://*:5555") # For clients backend.bind("tcp://*:5556") # For workers poll_workers = Poller() poll_workers.register(backend, zmq.POLLIN) poll_both = Poller() poll_both.register(frontend, zmq.POLLIN) poll_both.register(backend, zmq.POLLIN) workers = [] while True: if workers: socks = yield poll_both.poll() else: socks = yield poll_workers.poll() socks = dict(socks) # Handle worker activity on backend if socks.get(backend) == zmq.POLLIN: # Use worker address for LRU routing msg = yield backend.recv_multipart() if not msg: break print('I: received msg: {}'.format(msg)) address = msg[0] workers.append(address) # Everything after the second (delimiter) frame is reply reply = msg[2:] # Forward message to client if it's not a READY if reply[0] != LRU_READY: print('I: sending -- reply: {}'.format(reply)) yield frontend.send_multipart(reply) else: print('I: received ready -- address: {}'.format(address)) if socks.get(frontend) == zmq.POLLIN: # Get client request, route to first available worker msg = yield frontend.recv_multipart() worker = workers.pop(0) request = [worker, b''] + msg print('I: sending -- worker: {} msg: {}'.format(worker, msg)) yield backend.send_multipart(request)
def _start(self): self.context = Context() self.push = self.context.socket(zmq.PUSH) log.debug("Connecting PUSH socket to %s", self.returner_address) self.push.connect(self.returner_address) minion_opts = self.opts.copy() minion_opts["file_client"] = "local" self.event = salt.utils.event.get_event( "master", opts=minion_opts, io_loop=self.io_loop, listen=True ) self.event.subscribe("") self.event.set_event_handler(self.handle_event) event_tag = "salt/master/{}/start".format(self.id) log.info("Firing event on engine start. Tag: %s", event_tag) load = {"id": self.id, "tag": event_tag, "data": {}} self.event.fire_event(load, event_tag)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Ensure the kernel we work with uses Futures on recv, so we can await them self.future_context = ctx = Context() # Our subscription to messages from the kernel we launch self.iosub = ctx.socket(zmq.SUB) self.iosub.subscribe = b"" # From kernelapp.py, shell_streams is typically shell_stream, control_stream self.shell_stream = self.shell_streams[0] # Start with no child kernel self.child_kernel = None self.kernel_config = None self.acquiring_kernel = asyncio.Lock() self.kernel_launched = asyncio.Event()
def run_server(): context = Context() server = context.socket(zmq.REP) server.bind(SERVER_ADDR) cycles = 0 while True: request = yield server.recv() cycles += 1 # Simulate various problems, after a few cycles if cycles > 3 and randint(0, 3) == 0: print("I: Simulating a crash") server.unbind(SERVER_ADDR) # Delay for a bit, else we get "Address already in use" error. # Note that to really simulate a crash, we should probably kill # this process and start another. yield gen.sleep(2) break elif cycles > 3 and randint(0, 3) == 0: print("I: Simulating CPU overload") yield gen.sleep(2) print("I: Normal request (%s)" % request) yield gen.sleep(1) # Do some heavy work yield server.send(request) raise gen.Return((context, server))
async def run(loop): context = Context() while True: await run_server(context)
def run(loop): context = Context() yield run_worker(context)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.future_context = ctx = Context() self.iosub = ctx.socket(zmq.SUB) self.iosub.subscribe = b'' self.shell_stream = self.shell_streams[0]
def run(loop): context = Context() yield run_broker(context)
async def run(loop, zip_filter): context = Context() await run_client(context, zip_filter)
def run(loop, ident, num_workers): context = Context() yield run_worker_parallel(context, ident, num_workers)
def run(loop): context = Context() server = Server(loop, context) yield server.run_server() printdbg('(run) finished')
def run(loop): context = Context() while True: yield run_queue(context)
def run(loop): context = Context(1) while True: yield run_worker(context)
def run(loop, zipcodes): context = Context() yield run_client_parallel(context, zipcodes)
def run(loop): context = Context() yield run_sink(context)
Hello World server in Python. Binds REP socket to Url. Expects b"Hello" from client, replies with b"World". Modified for tornado/ioloop: Dave Kuhlman <dkuhlman(at)davekuhlman(dot)org> usage: python hwserver.py """ import sys import zmq from zmq.eventloop.future import Context from zmq.eventloop.ioloop import IOLoop from tornado import gen Url = 'tcp://127.0.0.1:5555' Ctx = Context() @gen.coroutine def run(): print("Getting ready for hello world client. Ctrl-C to exit.\n") socket = Ctx.socket(zmq.REP) socket.bind(Url) while True: # Wait for next request from client message = yield socket.recv() print("Received request: {}".format(message)) # Do some 'work' yield gen.sleep(1) # Send reply back to client message = message.decode('utf-8')
async def run(loop, zipcodes): context = Context() await run_client_parallel(context, zipcodes)
def run(host, port): context = Context() yield zstreams.run_producer(context, host, port - 3)
def run_broker(loop): """ main broker method """ url_worker = "inproc://workers" url_client = "inproc://clients" client_nbr = NBR_CLIENTS * 3 # Prepare our context and sockets context = Context() frontend = context.socket(zmq.ROUTER) frontend.bind(url_client) backend = context.socket(zmq.ROUTER) backend.bind(url_worker) # create workers and clients threads #worker_tasks = [] for idx in range(NBR_WORKERS): loop.add_callback(partial(run_worker, url_worker, context, idx)) #worker_tasks.append(task) #client_tasks = [] for idx in range(NBR_CLIENTS): loop.add_callback(partial(run_client, url_client, context, idx)) #client_tasks.append(task) # Logic of LRU loop # - Poll backend always, frontend only if 1+ worker ready # - If worker replies, queue worker as ready and forward reply # to client if necessary # - If client requests, pop next worker and send request to it # Queue of available workers available_workers = 0 workers_list = [] all_workers = set() # init poller poller = Poller() # Always poll for worker activity on backend poller.register(backend, zmq.POLLIN) # Poll front-end only if we have available workers poller.register(frontend, zmq.POLLIN) while True: socks = yield poller.poll() socks = dict(socks) # Handle worker activity on backend if (backend in socks and socks[backend] == zmq.POLLIN): # Queue worker address for LRU routing message = yield backend.recv_multipart() assert available_workers < NBR_WORKERS worker_addr = message[0] # add worker back to the list of workers available_workers += 1 workers_list.append(worker_addr) all_workers.add(worker_addr) # Second frame is empty empty = message[1] assert empty == b"" # Third frame is READY or else a client reply address client_addr = message[2] # If client reply, send rest back to frontend if client_addr != b'READY': # Following frame is empty empty = message[3] assert empty == b"" reply = message[4] yield frontend.send_multipart([client_addr, b"", reply]) printdbg('(run_broker) to frontend -- reply: "{}"'.format( reply)) client_nbr -= 1 if client_nbr == 0: printdbg('(run_broker) exiting') break # Exit after N messages # poll on frontend only if workers are available if available_workers > 0: if (frontend in socks and socks[frontend] == zmq.POLLIN): # Now get next client request, route to LRU worker # Client request is [address][empty][request] response = yield frontend.recv_multipart() [client_addr, empty, request] = response assert empty == b"" # Dequeue and drop the next worker address available_workers += -1 worker_id = workers_list.pop() yield backend.send_multipart( [worker_id, b"", client_addr, b"", request]) printdbg('(run_broker) to backend -- request: "{}"'.format( request)) #out of infinite loop: do some housekeeping printdbg('(run_broker) finishing') for worker_id in workers_list: yield backend.send_multipart([worker_id, b"", b"", b"", b"Stop"]) printdbg('(run_broker) workers cancelled') yield gen.sleep(1) frontend.close() backend.close() #context.term() # Caution: calling term() blocks. printdbg('(run_broker) returning') result = 'finished ok' raise gen.Return(result)
def run(loop): context = Context() yield run_ventilator(context)
def run(loop): context = Context() while True: yield run_client(context)