def run_queue(): context = Context(1) frontend = context.socket(zmq.ROUTER) # ROUTER backend = context.socket(zmq.ROUTER) # ROUTER frontend.bind("tcp://*:5555") # For clients backend.bind("tcp://*:5556") # For workers poll_workers = Poller() poll_workers.register(backend, zmq.POLLIN) poll_both = Poller() poll_both.register(frontend, zmq.POLLIN) poll_both.register(backend, zmq.POLLIN) workers = [] while True: if workers: socks = yield from poll_both.poll() else: socks = yield from poll_workers.poll() socks = dict(socks) # Handle worker activity on backend if socks.get(backend) == zmq.POLLIN: # Use worker address for LRU routing msg = yield from backend.recv_multipart() if not msg: break print('I: received msg: {}'.format(msg)) address = msg[0] workers.append(address) # Everything after the second (delimiter) frame is reply reply = msg[2:] # Forward message to client if it's not a READY if reply[0] != LRU_READY: print('I: sending -- reply: {}'.format(reply)) yield from frontend.send_multipart(reply) else: print('I: received ready -- address: {}'.format(address)) if socks.get(frontend) == zmq.POLLIN: # Get client request, route to first available worker msg = yield from frontend.recv_multipart() worker = workers.pop(0) request = [worker, b''] + msg print('I: sending -- worker: {} msg: {}'.format(worker, msg)) yield from backend.send_multipart(request)
def run_worker(context): poller = Poller() liveness = HEARTBEAT_LIVENESS interval = INTERVAL_INIT heartbeat_at = time.time() + HEARTBEAT_INTERVAL worker = yield from worker_socket(context, poller) cycles = 0 while True: socks = yield from poller.poll(HEARTBEAT_INTERVAL * 1000) socks = dict(socks) # Handle worker activity on backend if socks.get(worker) == zmq.POLLIN: # Get message # - 3-part envelope + content -> request # - 1-part HEARTBEAT -> heartbeat frames = yield from worker.recv_multipart() if not frames: break # Interrupted if len(frames) == 3: # Simulate various problems, after a few cycles cycles += 1 if cycles > 3 and randint(0, 5) == 0: print("I: Simulating a crash") break if cycles > 3 and randint(0, 5) == 0: print("I: Simulating CPU overload") yield from asyncio.sleep(3) print("I: Normal reply") yield from worker.send_multipart(frames) liveness = HEARTBEAT_LIVENESS yield from asyncio.sleep(1) # Do some heavy work elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT: print("I: Queue heartbeat") liveness = HEARTBEAT_LIVENESS else: print("E: Invalid message: %s" % frames) interval = INTERVAL_INIT else: liveness -= 1 if liveness == 0: print("W: Heartbeat failure, can't reach queue") print("W: Reconnecting in %0.2fs..." % interval) yield from asyncio.sleep(interval) if interval < INTERVAL_MAX: interval *= 2 poller.unregister(worker) worker.setsockopt(zmq.LINGER, 0) worker.close() worker = yield from worker_socket(context, poller) liveness = HEARTBEAT_LIVENESS if time.time() > heartbeat_at: heartbeat_at = time.time() + HEARTBEAT_INTERVAL print("I: Worker heartbeat") yield from worker.send(PPP_HEARTBEAT)
def run_worker(context): # Socket to receive messages on receiver = context.socket(zmq.PULL) receiver.connect("tcp://localhost:5557") # Socket to send messages to sender = context.socket(zmq.PUSH) sender.connect("tcp://localhost:5558") # Socket for control input controller = context.socket(zmq.SUB) controller.connect("tcp://localhost:5559") controller.setsockopt(zmq.SUBSCRIBE, b"") # Process messages from receiver and controller poller = Poller() poller.register(receiver, zmq.POLLIN) poller.register(controller, zmq.POLLIN) # Process messages from both sockets while True: socks = yield from poller.poll() socks = dict(socks) if socks.get(receiver) == zmq.POLLIN: message = yield from receiver.recv() # Process task workload = int(message) # Workload in msecs # Do the work yield from asyncio.sleep(workload / 1000.0) # Send results to sink yield from sender.send(message) # Simple progress indicator for the viewer sys.stdout.write(".") sys.stdout.flush() # Any waiting controller command acts as 'KILL' if socks.get(controller) == zmq.POLLIN: break
def receiver(): """receive messages with polling""" pull = ctx.socket(zmq.PULL) pull.connect(url) poller = Poller() poller.register(pull, zmq.POLLIN) while True: events = yield from poller.poll() if pull in dict(events): print("recving", events) msg = yield from pull.recv_multipart() print('recvd', msg)
def run_client(context): print("I: Connecting to server...") client = context.socket(zmq.REQ) client.connect(SERVER_ENDPOINT) poll = Poller() poll.register(client, zmq.POLLIN) sequence = 0 retries_left = REQUEST_RETRIES while retries_left: sequence += 1 request = str(sequence) print("I: Sending (%s)" % request) yield from client.send_string(request) expect_reply = True while expect_reply: socks = yield from poll.poll(REQUEST_TIMEOUT) socks = dict(socks) if socks.get(client) == zmq.POLLIN: reply = yield from client.recv() if not reply: break if int(reply) == sequence: print("I: Server replied OK (%s)" % reply) retries_left = REQUEST_RETRIES expect_reply = False else: print("E: Malformed reply from server: %s" % reply) else: print("W: No response from server, retrying...") # Socket is confused. Close and remove it. print('W: confused') client.setsockopt(zmq.LINGER, 0) client.unbind(SERVER_ENDPOINT) #client.close() poll.unregister(client) retries_left -= 1 if retries_left == 0: print("E: Server seems to be offline, abandoning") return print("I: Reconnecting and resending (%s)" % request) # Create new connection client = context.socket(zmq.REQ) client.connect(SERVER_ENDPOINT) poll.register(client, zmq.POLLIN) yield from client.send_string(request)
def run_proxy(socket_from, socket_to): poller = Poller() poller.register(socket_from, zmq.POLLIN) poller.register(socket_to, zmq.POLLIN) printdbg('(run_proxy) started') while True: events = yield from poller.poll() events = dict(events) if socket_from in events: msg = yield from socket_from.recv_multipart() printdbg('(run_proxy) received from frontend -- msg: {}'.format( msg)) yield from socket_to.send_multipart(msg) printdbg('(run_proxy) sent to backend -- msg: {}'.format(msg)) elif socket_to in events: msg = yield from socket_to.recv_multipart() printdbg('(run_proxy) received from backend -- msg: {}'.format( msg)) yield from socket_from.send_multipart(msg) printdbg('(run_proxy) sent to frontend -- msg: {}'.format(msg))
def run_server(loop): """Server routine""" # Prepare our context and sockets # Socket to talk to clients clients = Ctx.socket(zmq.ROUTER) clients.bind(Url_client) workers = Ctx.socket(zmq.DEALER) workers.bind(Url_worker) # Start the workers tasks = [] for idx in range(5): ident = 'worker {}'.format(idx) task = asyncio.ensure_future(run_worker(ident)) tasks.append(task) poller = Poller() poller.register(clients, zmq.POLLIN) poller.register(workers, zmq.POLLIN) print('mtserver ready for requests') while True: events = yield from poller.poll() events = dict(events) if clients in events: message = yield from clients.recv_multipart() printdbg('(run) received from client message_parts: {}'.format( message)) client, empty, message = message[:3] printdbg('(run) received from client message: {}'.format( message)) printdbg('(run) sending message to workers: {}'.format(message)) yield from workers.send_multipart([client, b'', message]) elif workers in events: message = yield from workers.recv_multipart() printdbg('(run) received from worker message_parts: {}'.format( message)) client, empty, message = message[:3] printdbg('(run) received from worker message: {}'.format( message)) yield from clients.send_multipart([client, b'', message]) printdbg('(run) sent message to clients: {}'.format(message))
def run_broker(context): # Prepare our context and sockets frontend = context.socket(zmq.ROUTER) backend = context.socket(zmq.DEALER) frontend.bind("tcp://*:5559") backend.bind("tcp://*:5560") # Initialize poll set poller = Poller() poller.register(frontend, zmq.POLLIN) poller.register(backend, zmq.POLLIN) # Switch messages between sockets while True: socks = yield from poller.poll() socks = dict(socks) if socks.get(frontend) == zmq.POLLIN: message = yield from frontend.recv_multipart() print('received from frontend: {}'.format(message)) yield from backend.send_multipart(message) if socks.get(backend) == zmq.POLLIN: message = yield from backend.recv_multipart() print('received from backend: {}'.format(message)) yield from frontend.send_multipart(message)
def run_server(loop): """Server routine""" # Prepare our context and sockets # Socket to talk to clients clients = Ctx.socket(zmq.ROUTER) clients.bind(Url_client) workers = Ctx.socket(zmq.DEALER) workers.bind(Url_worker) # Start the workers tasks = [] for idx in range(5): ident = 'worker {}'.format(idx) task = asyncio.ensure_future(run_worker(ident)) tasks.append(task) poller = Poller() poller.register(clients, zmq.POLLIN) poller.register(workers, zmq.POLLIN) print('mtserver ready for requests') while True: events = yield from poller.poll() events = dict(events) if clients in events: message = yield from clients.recv_multipart() printdbg( '(run) received from client message_parts: {}'.format(message)) client, empty, message = message[:3] printdbg('(run) received from client message: {}'.format(message)) printdbg('(run) sending message to workers: {}'.format(message)) yield from workers.send_multipart([client, b'', message]) elif workers in events: message = yield from workers.recv_multipart() printdbg( '(run) received from worker message_parts: {}'.format(message)) client, empty, message = message[:3] printdbg('(run) received from worker message: {}'.format(message)) yield from clients.send_multipart([client, b'', message]) printdbg('(run) sent message to clients: {}'.format(message))
class QWeatherClient: """Client class for the QWeather messaging framework""" class serverclass: """Support class to represent the available servers as objects, with their exposed functions as callable attributes. The __repr__ makes it look like they are server objects""" def __init__(self,name,addr,methods,client): self.name = name self.addr = addr self.client = client for amethod in methods: setattr(self,amethod[0],self.bindingfunc(amethod[0],amethod[1])) def bindingfunc(self,methodname,methoddoc): """Ensures that "calling" the attribute of the "server"object with the name of a server function, sends a request to the server to execute that function and return the response""" def func(*args,**kwargs): timeout = kwargs.pop('timeout',CSYNCTIMEOUT) # This pops the value for timeout if it exists in kwargs, or returns the default timeout value. So this saves a line of code on logic check return self.client.send_request([self.name.encode(),methodname.encode(),pickle.dumps([args,kwargs])],timeout=timeout) func.__name__ = methodname func.__doc__ = methoddoc func.__repr__ = lambda: methoddoc func.is_remote_server_method = True return func def __repr__(self): msg = "" lst = [getattr(self,method) for method in dir(self) if getattr(getattr(self,method),'is_remote_server_method',False)] if len(lst) == 0: return 'No servers connected' else: for amethod in lst: msg += amethod.__name__ +"\n" return msg.strip() context = None socket = None poller = None futureobjectdict = {} def __init__(self,QWeatherStationIP,name = None,loop = None,debug=False,verbose=False): IpAndPort = re.search(IPREPATTERN,QWeatherStationIP) assert IpAndPort != None, 'Ip not understood (tcp://xxx.xxx.xxx.xxx:XXXX or txp://localhost:XXXX)' self.QWeatherStationIP = IpAndPort.group(1) self.QWeatherStationSocket = IpAndPort.group(2) assert self.QWeatherStationIP[:6] == 'tcp://', 'Ip not understood (tcp://xxx.xxx.xxx.xxx:XXXX or txp://localhost:XXXX)' assert len(self.QWeatherStationSocket) == 4, 'Port not understood (tcp://xxx.xxx.xxx.xxx:XXXX or txp://localhost:XXXX)' if loop is None: self.loop = asyncio.get_event_loop() else: self.loop = loop if name is None: import socket name = socket.gethostname() formatting = '{:}: %(levelname)s: %(message)s'.format(name) if debug: logging.basicConfig(format=formatting,level=logging.DEBUG) if verbose: logging.basicConfig(format=formatting,level=logging.INFO) self.name = name.encode() self.reconnect() # self.ping_broker() self.loop.run_until_complete(self.get_server_info()) self.running = False self.messageid = 0 atexit.register(self.close) def reconnect(self): '''connects or reconnects to the broker''' if self.poller: self.poller.unregister(self.socket) if self.socket: self.socket.close() self.context = Context() self.socket = self.context.socket(zmq.DEALER) self.socket.connect(self.QWeatherStationIP + ':' + self.QWeatherStationSocket) self.subsocket = self.context.socket(zmq.SUB) self.subsocket.connect(self.QWeatherStationIP + ':' + str(int(self.QWeatherStationSocket) + SUBSOCKET)) self.poller = Poller() self.poller.register(self.socket,zmq.POLLIN) self.poller.register(self.subsocket,zmq.POLLIN) def subscribe(self,servername,function): """Subscribe to a server with a callback function""" self.subsocket.setsockopt(zmq.SUBSCRIBE,servername.encode()) self.subscribers[servername] = function def unsubscribe(self,servername): """Unsubscribe from a server""" self.subsocket.setsockopt(zmq.UNSUBSCRIBE,servername.encode()) self.subscribers.pop(servername) async def get_server_info(self): """Get information about servers from the broker""" msg = [b'',b'C',CREADY,PCLIENT,self.name] self.send_message(msg) msg = await self.recieve_message() empty = msg.pop(0) assert empty == b'' command = msg.pop(0) self.serverlist = [] self.subscribers = {} if command == CREADY + CFAIL: raise Exception(msg.pop(0).decode()) else: serverdict = pickle.loads(msg.pop(0)) servermethoddict = pickle.loads(msg.pop(0)) for addr,name in serverdict.items(): methods = servermethoddict[addr] server = self.serverclass(name,addr,methods,self) server.is_remote_server = True setattr(self,name,server) self.serverlist.append(server) def send_request(self,body,timeout): """Send a request. If the client is running (i.e. in async mode) send an async request, else send a synchronous request\n Attach a messageID to each request. (0-255)""" self.messageid+=1 if self.messageid > 255: self.messageid = 0 if self.running: result = asyncio.get_event_loop().create_task(self.async_send_request(body,self.messageid.to_bytes(1,'big'))) else: result = self.sync_send_request(body,self.messageid.to_bytes(1,'big'),timeout) return result def ping_broker(self): """Ping the broker""" self.send_message([b'',b'P']) try: if len(self.loop.run_until_complete(self.poller.poll(timeout=2000))) == 0: #wait 2 seconds for a ping from the broker raise Exception('QWeatherStation not found') else: msg = self.loop.run_until_complete(self.recieve_message()) empty = msg.pop(0) pong = msg.pop(0) logging.debug('Recieved Pong: {:}'.format(pong)) if pong != b'b': raise Exception('QWeatherStation sent wrong Pong') except Exception as e: self.poller.unregister(self.socket) self.socket.close() raise e def sync_send_request(self,body,ident,timeout): """Synchronously send request. Timeout with the default timeoutvalue [FINDOUTHOWTOLINKTOTHECONSTANTSPAGETOSHOWDEFAULTVALUE]""" msg = [b'',b'C',CREQUEST,ident] + body server = body[0] self.send_message(msg) if len(self.loop.run_until_complete(self.poller.poll(timeout=timeout))) == 0: return Exception('Synchronous request timed out. Try adding following keyword to function call: "timeout=XX" in ms') else: msg = self.loop.run_until_complete(self.recieve_message()) empty = msg.pop(0) assert empty == b'' command = msg.pop(0) ident = msg.pop(0) server = msg.pop(0) answ = pickle.loads(msg[0]) return answ async def async_send_request(self,body,ident): """Ansynchronously send request. No explicit timeout on the client side for this. Relies on the "servertimeout" on the broker side""" server = body[0] msg = [b'',b'C',CREQUEST,ident] + body self.send_message(msg) answ = await self.recieve_future_message(ident+server) #Waits here until the future is set to completed self.futureobjectdict.pop(ident+server) return answ def send_message(self,msg): """Send a multi-frame-message over the ZMQ socket""" self.socket.send_multipart(msg) def recieve_future_message(self,id): """Create a future for the async request, add it to the dict of futures (id = messageid+server""" tmp = self.loop.create_future() self.futureobjectdict[id] = tmp return tmp async def recieve_message(self): """Recieve a multi-frame-message over the zmq socket""" msg = await self.socket.recv_multipart() return msg def handle_message(self,msg): """First step of handling an incoming message\n First asserts that the first frame is empty\n Then sorts the message into either request+success, request+fail or ping""" empty = msg.pop(0) assert empty == b'' command = msg.pop(0) if command == CREQUEST + CSUCCESS: messageid = msg.pop(0) servername = msg.pop(0) msg = pickle.loads(msg[0]) self.handle_request_success(messageid,servername,msg) elif command == CREQUEST + CFAIL: messageid = msg.pop(0) servername = msg.pop(0) self.handle_request_fail(messageid,servername) elif command == CPING: ping = msg.pop(0) if ping != b'P': raise Exception('QWeatherStation sent wrong ping') logging.debug('Recieved Ping from QWeatherStation') self.send_message([b'',b'b']) def handle_request_success(self,messageid,servername,msg): """Handle successful request by setting the result of the future (manually finishing the future)""" self.futureobjectdict[messageid + servername].set_result(msg) def handle_request_fail(self,messageid,servername): """Handle a failed request by setting the future to an exception""" self.futureobjectdict[messageid+server].set_exception(Exception(msg.pop(0))) def handle_broadcast(self,msg): """Handle a message on the broadcast socket by calling the callback function connected to the relevant server""" server= msg.pop(0).decode() msg = pickle.loads(msg.pop(0)) self.subscribers[server](msg) async def run(self): """Asynchronously run the client by repeatedly polling the recieving socket""" self.running = True while True: try: socks = await self.poller.poll(1000) socks = dict(socks) if self.socket in socks: msg = await self.recieve_message() self.handle_message(msg) elif self.subsocket in socks: msg = await self.recieve_message() self.handle_broadcast(msg) except KeyboardInterrupt: self.close() break def close(self): """Closing function. Tells the broker that it disconnects. Is not called if the terminal is closed or the process is force-killed""" self.send_message([b'',b'C',CDISCONNECT]) self.poller.unregister(self.socket) self.socket.close() def __repr__(self): msg = "" if len(self.serverlist) == 0: return 'No servers connected' else: for aserver in self.serverlist: msg += aserver.name + "\n" return msg.strip() def __iter__(self): return (aserv for aserv in self.serverlist) def __getitem__(self,key): return self.serverlist[key]
def run_broker(loop): """ main broker method """ print('(run_broker) starting') url_worker = "inproc://workers" url_client = "inproc://clients" client_nbr = NBR_CLIENTS * 3 # Prepare our context and sockets context = Context() frontend = context.socket(zmq.ROUTER) frontend.bind(url_client) backend = context.socket(zmq.ROUTER) backend.bind(url_worker) print('(run_broker) creating workers and clients') # create workers and clients threads worker_tasks = [] for idx in range(NBR_WORKERS): task = asyncio.ensure_future(run_worker(url_worker, context, idx)) worker_tasks.append(task) client_tasks = [] for idx in range(NBR_CLIENTS): task = asyncio.ensure_future(run_client(url_client, context, idx)) client_tasks.append(task) print('(run_broker) after creating workers and clients') # Logic of LRU loop # - Poll backend always, frontend only if 1+ worker ready # - If worker replies, queue worker as ready and forward reply # to client if necessary # - If client requests, pop next worker and send request to it # Queue of available workers available_workers = 0 workers_list = [] # init poller poller = Poller() # Always poll for worker activity on backend poller.register(backend, zmq.POLLIN) # Poll front-end only if we have available workers poller.register(frontend, zmq.POLLIN) while True: socks = yield from poller.poll() socks = dict(socks) # Handle worker activity on backend if (backend in socks and socks[backend] == zmq.POLLIN): # Queue worker address for LRU routing message = yield from backend.recv_multipart() assert available_workers < NBR_WORKERS worker_addr = message[0] # add worker back to the list of workers available_workers += 1 workers_list.append(worker_addr) # Second frame is empty empty = message[1] assert empty == b"" # Third frame is READY or else a client reply address client_addr = message[2] # If client reply, send rest back to frontend if client_addr != b'READY': # Following frame is empty empty = message[3] assert empty == b"" reply = message[4] yield from frontend.send_multipart([client_addr, b"", reply]) printdbg( '(run_broker) to frontend -- reply: "{}"'.format(reply)) client_nbr -= 1 if client_nbr == 0: printdbg('(run_broker) exiting') break # Exit after N messages # poll on frontend only if workers are available if available_workers > 0: if (frontend in socks and socks[frontend] == zmq.POLLIN): # Now get next client request, route to LRU worker # Client request is [address][empty][request] response = yield from frontend.recv_multipart() [client_addr, empty, request] = response assert empty == b"" # Dequeue and drop the next worker address available_workers += -1 worker_id = workers_list.pop() yield from backend.send_multipart( [worker_id, b"", client_addr, b"", request]) printdbg( '(run_broker) to backend -- request: "{}"'.format(request)) #out of infinite loop: do some housekeeping printdbg('(run_broker) finished') for worker_task in worker_tasks: worker_task.cancel() printdbg('(run_broker) workers cancelled') yield from asyncio.sleep(1) frontend.close() backend.close() #context.term() # Caution: calling term() blocks. loop.stop() printdbg('(run_broker) returning') return 'finished ok'
class SchedulerConnection(object): __slots__ = ( 'address', # context object to open socket connections 'context', # pull socket to receive check definitions from scheduler 'pull', # poller object for `pull` socket 'poller', # monitor socket for `pull` socket 'monitor_socket', # poller object for monitor socket 'monitor_poller', 'first_missing', ) def __init__(self, address): self.pull = self.poller = None self.monitor_poller = self.monitor_socket = None self.address = address self.context = Context.instance() self.open() self.first_missing = None def __str__(self): return self.address def __repr__(self): return 'Scheduler({})'.format(self.address) def open(self): self.pull = self.context.socket(PULL) logger.info('%s - opening pull socket ...', self) self.pull.connect(self.address) if settings.SCHEDULER_MONITOR: logger.info('%s - opening monitor socket ...', self) self.monitor_socket = self.pull.get_monitor_socket( events=EVENT_DISCONNECTED ) self.register() def register(self): self.poller = Poller() self.poller.register(self.pull, POLLIN) if settings.SCHEDULER_MONITOR: self.monitor_poller = Poller() self.monitor_poller.register(self.monitor_socket, POLLIN) logger.info('%s - all sockets are successfully registered ' 'in poller objects ...', self) def close(self): """Unregister open sockets from poller objects and close them.""" self.unregister() logger.info('%s - closing open sockets ...', self) self.pull.close() if settings.SCHEDULER_MONITOR: self.monitor_socket.close() logger.info('%s - connection closed successfully ...', self) def unregister(self): """Unregister open sockets from poller object.""" logger.info('%s - unregistering sockets from poller objects ...', self) self.poller.unregister(self.pull) if settings.SCHEDULER_MONITOR: self.monitor_poller.unregister(self.monitor_socket) def reconnect(self): self.close() self.open() self.first_missing = None @asyncio.coroutine def receive(self): check = None events = yield from self.poller.poll(timeout=2000) if self.pull in dict(events): check = yield from self.pull.recv_multipart() check = jsonapi.loads(check[0]) if check: self.first_missing = None elif self.first_missing is None: self.first_missing = datetime.now(tz=pytz.utc) if self.first_missing: diff = datetime.now(tz=pytz.utc) - self.first_missing delta = timedelta(minutes=settings.SCHEDULER_LIVENESS_IN_MINUTES) if diff > delta: logger.warning( 'Alamo worker `%s` pid `%s` try to reconnect to ' '`%s` scheduler.', settings.WORKER_FQDN, settings.WORKER_PID, self ) self.reconnect() return check @asyncio.coroutine def receive_event(self): event = None events = yield from self.monitor_poller.poll(timeout=2000) if self.monitor_socket in dict(events): msg = yield from self.monitor_socket.recv_multipart( flags=NOBLOCK) event = parse_monitor_message(msg) return event
def run_broker(loop): """ main broker method """ print('(run_broker) starting') url_worker = "inproc://workers" url_client = "inproc://clients" client_nbr = NBR_CLIENTS * 3 # Prepare our context and sockets context = Context() frontend = context.socket(zmq.ROUTER) frontend.bind(url_client) backend = context.socket(zmq.ROUTER) backend.bind(url_worker) print('(run_broker) creating workers and clients') # create workers and clients threads worker_tasks = [] for idx in range(NBR_WORKERS): task = asyncio.ensure_future(run_worker(url_worker, context, idx)) worker_tasks.append(task) client_tasks = [] for idx in range(NBR_CLIENTS): task = asyncio.ensure_future(run_client(url_client, context, idx)) client_tasks.append(task) print('(run_broker) after creating workers and clients') # Logic of LRU loop # - Poll backend always, frontend only if 1+ worker ready # - If worker replies, queue worker as ready and forward reply # to client if necessary # - If client requests, pop next worker and send request to it # Queue of available workers available_workers = 0 workers_list = [] # init poller poller = Poller() # Always poll for worker activity on backend poller.register(backend, zmq.POLLIN) # Poll front-end only if we have available workers poller.register(frontend, zmq.POLLIN) while True: socks = yield from poller.poll() socks = dict(socks) # Handle worker activity on backend if (backend in socks and socks[backend] == zmq.POLLIN): # Queue worker address for LRU routing message = yield from backend.recv_multipart() assert available_workers < NBR_WORKERS worker_addr = message[0] # add worker back to the list of workers available_workers += 1 workers_list.append(worker_addr) # Second frame is empty empty = message[1] assert empty == b"" # Third frame is READY or else a client reply address client_addr = message[2] # If client reply, send rest back to frontend if client_addr != b'READY': # Following frame is empty empty = message[3] assert empty == b"" reply = message[4] yield from frontend.send_multipart([client_addr, b"", reply]) printdbg('(run_broker) to frontend -- reply: "{}"'.format( reply)) client_nbr -= 1 if client_nbr == 0: printdbg('(run_broker) exiting') break # Exit after N messages # poll on frontend only if workers are available if available_workers > 0: if (frontend in socks and socks[frontend] == zmq.POLLIN): # Now get next client request, route to LRU worker # Client request is [address][empty][request] response = yield from frontend.recv_multipart() [client_addr, empty, request] = response assert empty == b"" # Dequeue and drop the next worker address available_workers += -1 worker_id = workers_list.pop() yield from backend.send_multipart( [worker_id, b"", client_addr, b"", request]) printdbg('(run_broker) to backend -- request: "{}"'.format( request)) #out of infinite loop: do some housekeeping printdbg('(run_broker) finished') for worker_task in worker_tasks: worker_task.cancel() printdbg('(run_broker) workers cancelled') yield from asyncio.sleep(1) frontend.close() backend.close() #context.term() # Caution: calling term() blocks. loop.stop() printdbg('(run_broker) returning') return 'finished ok'