def record(self): try: t1 = datetime.now().time().hour t2 = self.startTime.hour if t1 < t2: wait = HOURS(t2 - t1) self.debugOut( "waiting for {0} seconds to start...".format(wait)) yield asyncio.From(asyncio.sleep(wait)) self.startedTime = datetime.now() self.debugOut("starting time lapse video") while True: try: imgData = self.dataQueue.get(False) img = dataToImage(imgData, True) height, width, layers = img.shape cv2.putText(img, "date: {0}".format(datetime.now()), (0, height - 20), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255)) for i in xrange(self.fps): self.video.write(img) cv2.waitKey(1) yield asyncio.From(asyncio.sleep(1 / self.fps)) except Empty: pass if self.videoDate <= datetime.now(): self.debugOut("finalizing time lapse video") self.video.release() self.startVideo() yield asyncio.From(asyncio.sleep(self.interval)) except KeyboardInterrupt: self.video.release() except: class FakeOutput: msg = "" def write(self, msg): self.msg += "\n" + msg fakeout = FakeOutput() self.debugOut("recording crashed!!") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=4, file=fakeout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=10, file=fakeout) self.debugOut(fakeout.msg)
def set_waveform(key, value): exception = None result = signals.signal('set-%s' % key).send(value) if result: try: receivers, co_callbacks = zip(*result) if receivers: results = yield asyncio.From(asyncio.gather(*co_callbacks)) except asyncio.CancelledError: raise except Exception as exception: pass else: if receivers: raise asyncio.Return(zip(receivers, results)) if exception is not None: message = ('Error setting **%s**: `%s`' % (key, exception)) else: message = ('No waveform generators available to set **%s**.' % key) yield asyncio.From( _warning(signals.signal('warning'), message, title='Warning: failed to set %s' % key, key='waveform-%s' % key))
def _set_inferior_tty(): if self.proc_inftty: if self.proc_inftty.returncode is None: self.proc_inftty.terminate() self.proc_inftty = None try: self.proc_inftty = proc = yield asyncio.From( asyncio.create_subprocess_exec(*args)) info('inferiortty: {}'.format(args)) except OSError as e: self.console_print('Cannot spawn terminal: {}\n'.format(e)) else: start = time.time() while time.time() - start < 2: try: with open(result_file.name) as f: lines = f.readlines() # Commands found in the result file. if len(lines) == 2: set_inferior_tty_cb(lines[0]) set_inferior_tty_cb(lines[1]) break except IOError as e: self.console_print( 'Cannot set the inferior tty: {}\n'.format(e)) proc.terminate() break yield asyncio.From(asyncio.sleep(.100, loop=self.vim.loop)) else: self.console_print('Failed to start inferior_tty.py.\n') proc.terminate()
def handle_messages(): connection = yield trollius.From( trollius_redis.Connection.create(host='0.0.0.0', port=6379, db=0)) # Create subscriber. subscriber = yield trollius.From(connection.start_subscribe()) # Subscribe to channel. yield trollius.From(subscriber.subscribe(['pagure.hook'])) # Inside a while loop, wait for incoming events. while True: reply = yield trollius.From(subscriber.next_published()) log.info('Received: %s on channel: %s', repr(reply.value), reply.channel) data = json.loads(reply.value) username = None if '/' in data['project']: username, projectname = data['project'].split('/', 1) else: projectname = data['project'] project = pagure.lib.get_project(session=pagure.SESSION, name=projectname, user=username) log.info('Got the project, going to the webhooks') call_web_hooks(project, data['topic'], data['msg'])
def connect(self): self.connection = None self.connected_server = None port = self.port for server in self.servers: if ':' in server: server, port = server.split(':') for i in range(self.attempts): try: self.connection = yield asyncio.From( self.loop.create_connection(self, server, port)) self.connected_server = server break except (OSError, socket.gaierror, socket.herror) as ex: self.connection = None self.connected_server = None self.logger.warn( 'Failed to connect to CoreMQ %s: %s. Retrying in 1 second...' % (server, ex)) yield asyncio.From(asyncio.sleep(1)) if self.connection: break if not self.connection and self.lost_connection_callback and self.connected_once: yield asyncio.From( self.loop.create_task(self.lost_connection_callback())) elif self.connection: self.connected_once = True
def run(self): protocol_factory = functools.partial(netbeans.Netbeans, self.signal, self.connection[2]) self.nbserver = yield asyncio.From( self.loop.create_server(protocol_factory, self.connection[0], self.connection[1])) timeout = self.loop.call_later(CONNECTION_TIMEOUT, connection_timeout) while True: event = yield asyncio.From(self.events.get()) if timeout: timeout.cancel() timeout = None if isinstance(event, netbeans.Netbeans): if event is self.netbeans: if not self.netbeans.connected: if self.netbeans.debugger is not None: # Wait until the debugger has signaled it is closed. continue else: info('signaled netbeans is disconnected') break # Netbeans signaling it is ready. info(self.netbeans) self.netbeans.set_debugger(self.debugger) # Daemonize now, no more critical startup errors to print on # the console. if self.options.daemon: daemonize() info('pyclewn version %s and the %s debugger', __version__, self.module) elif self.netbeans: nb = event if nb.connected: info( 'rejecting connection from %s:' ' netbeans already connected', str(nb.addr)) nb.close() else: # Netbeans connection accepted. self.netbeans = event self.nbserver.close() self.nbserver = None elif self.netbeans and isinstance(event, debugger.Debugger): if self.netbeans.connected: # The debugger has been closed, instantiate a new one. self.debugger = self.clazz(self) self.netbeans.set_debugger(self.debugger) info('new "%s" instance', self.module) else: info('signaled debugger closed and netbeans disconnected') break else: info('got signal %s', event) break
def create_ws(self, qlist, symbolList): retry = True while retry: try: response = yield trollius.From(self.get_ws_token(qlist)) if response["msg_code"] == 1: token = response["result"] self.logger.info( u"成功获取到token, symbolList = {}".format(symbolList)) retry = False else: self.logger.warning("{},{}".format(response, qlist)) if response["msg_code"] == -11: time.sleep(2) self.logger.warning(u"尝试重新登录新浪") self.sina = V("Sina") self.sina.login(verify=False) except Exception as e: self.logger.warning(e) url_wss = 'wss://ff.sinajs.cn/wskt?token=' + token + '&list=' + qlist while True: # 建立websocket连接 try: ws = yield trollius.From(websockets.connect(url_wss)) self.websockets[symbolList[0]] = dict() self.websockets[symbolList[0]]["ws"] = ws self.websockets[symbolList[0]]["qlist"] = qlist self.websockets[symbolList[0]]["token"] = token self.websockets[symbolList[0]]["renewed"] = datetime.now() self.websockets[symbolList[0]]["trialTime"] = 0 self.logger.info(u"成功建立ws连接, {}, symbolList = {}".format( threading.current_thread().name, symbolList)) break except Exception as e: self.logger.warning( u"重试 websockets.connect , {}, symbolList = {}".format( threading.current_thread().name, symbolList)) while self._active: try: message = yield trollius.From(ws.recv()) event = Event(event_type='SinaLevel2WS', data=message) for q in self._subscriber: q.put(event) # print( "Producer:\n{},{}".format( event.data,datetime.now() ) ) except Exception as e: self.logger.error("{},{}".format( e, threading.current_thread().name)) ws.close() yield trollius.From( self.create_ws(qlist=qlist, symbolList=symbolList))
def run(self): self.end_time = time() + options.duration cb = make_instance() yield trollius.From(cb.connect()) while time() < self.end_time: begin_time = time() yield trollius.From(cb.upsert_multi(self.kv, format=FMT_BYTES)) self.wait_time += time() - begin_time self.opcount += options.batch
def copy_in_to_out(self): transport = None try: f_in = os.fdopen(self.fdin, 'r') f_out = os.fdopen(self.fdout, 'w') yield asyncio.From(self.loop.connect_read_pipe( lambda: self.reader_proto, f_in)) if self.fdout_isfifo or self.fdout_istty: transport, protocol = yield asyncio.From(self.loop.connect_write_pipe( lambda: self.writer_proto, f_out)) writer = asyncio.StreamWriter( transport, protocol, self.reader, self.loop) # Remove the reader added by connect_write_pipe() as a # workaround to Tulip issue 147: # "Fix _UnixWritePipeTransport to support TTY". if self.fdout_istty: self.loop.remove_reader(f_out.fileno()) while True: try: chunk = yield asyncio.From(self.reader.read(BUFFER_SIZE)) except OSError as e: # The read() syscall returns -1 when the slave side of the # pty is closed. if not self.fdin_istty or e.errno != errno.EIO: raise break if not chunk: # An EOF character (Ctl-D) has been received by # the pty_forkexec terminal interface (if any). break if self.fdout_isfifo or self.fdout_istty: writer.write(chunk) yield asyncio.From(writer.drain()) else: os.write(self.fdout, chunk) except asyncio.CancelledError: pass finally: if self.reader._transport: self.reader._transport.close() if transport: # When the slave side of the pty is closed, write() syscalls # to the pty return -1, EAGAIN, and the BlockingIOError # exception being ignored by 'writer', leads to an infinite # loop in 'to_pty' until the task is cancelled. # Abort the 'to_pty' channel transport. if self.name == 'to_pty': transport.abort() else: transport.close()
def _attempt_connect(**kwargs): ignore = kwargs.pop('ignore', []) try: # Attempt to connect to automatically selected port. dropbot = db.SerialProxy(port=port, ignore=ignore, **kwargs) raise asyncio.Return(dropbot) except db.proxy.NoPower as exception: # No 12V power supply detected on DropBot. _L().debug('No 12V power supply detected.') responses = signals.signal('no-power').send('keep_alive') for t in asyncio.as_completed( [loop.create_task(r[1]) for r in responses]): response = yield asyncio.From(t) if response == 'ignore': ignore.append(db.proxy.NoPower) break else: raise exception except bnr.proxy.DeviceVersionMismatch as exception: # Firmware version does not match driver version. _L().debug( 'Driver version (`%s`) does not match firmware ' 'version (`%s`)', db.__version__, exception.device_version) responses = signals.signal('version-mismatch')\ .send('keep_alive', driver_version=db.__version__, firmware_version=exception.device_version) update = False for t in asyncio.as_completed( [loop.create_task(r[1]) for r in responses]): response = yield asyncio.From(t) if response == 'ignore': ignore.append(bnr.proxy.DeviceVersionMismatch) break elif response == 'update': update = True break else: raise if update: # Flash firmware and retry connection. _L().info('Flash firmware and retry connection.') yield asyncio.From(co_flash_firmware()) dropbot = yield asyncio.From( _attempt_connect(ignore=ignore, **kwargs)) raise asyncio.Return(dropbot)
def pdb_run(self, clewn_thread_ready): """Run the pdb clewn thread.""" protocol_factory = functools.partial(netbeans.Netbeans, self.signal, self.connection[2]) self.nbserver = yield asyncio.From( self.loop.create_server(protocol_factory, self.connection[0], self.connection[1])) clewn_thread_ready.set() while True: event = yield asyncio.From(self.events.get()) if isinstance(event, netbeans.Netbeans): if event is self.netbeans: if not self.netbeans.connected: if self.netbeans.debugger is not None: info('the current netbeans session is closed') else: info('signaled netbeans is disconnected') self.netbeans.close() self.netbeans = None continue # Netbeans signaling it is ready. info(self.netbeans) self.netbeans.set_debugger(self.debugger) info('pyclewn version %s and the %s debugger', __version__, self.module) elif self.netbeans: nb = event if nb.connected: info( 'rejecting connection from %s:' ' netbeans already connected', str(nb.addr)) nb.ready = True nb.push('There is already an active pdb session at' ' (%s, %s), connection rejected.' % (self.connection[0], self.connection[1])) nb.close() else: info('netbeans connection accepted') self.netbeans = event elif isinstance(event, debugger.Debugger): # Ignore debugger close events since this only indicates here # that the netbeans socket is closed. The target thread is # responsible for terminating the clewn thread by calling # pdb.exit(). info('ignoring a debugger close event') else: info('got signal %s', event) if self.netbeans: self.netbeans.close() break
def handle_echo(reader, writer): while True: data = yield trollius.From(reader.readline()) message = data.decode() addr = writer.get_extra_info('peername') print("Received %r from %r" % (message, addr)) print("Send: %r" % message) writer.write(message.encode()) yield trollius.From(writer.drain()) print("Close the client socket") writer.close()
def handle_messages(): host = pagure.APP.config.get('REDIS_HOST', '0.0.0.0') port = pagure.APP.config.get('REDIS_PORT', 6379) dbname = pagure.APP.config.get('REDIS_DB', 0) connection = yield trollius.From( trollius_redis.Connection.create(host=host, port=port, db=dbname)) # Create subscriber. subscriber = yield trollius.From(connection.start_subscribe()) # Subscribe to channel. yield trollius.From(subscriber.subscribe(['pagure.hook'])) # Inside a while loop, wait for incoming events. while True: reply = yield trollius.From(subscriber.next_published()) log.info('Received: %s on channel: %s', repr(reply.value), reply.channel) data = json.loads(reply.value) username = None if data['project'].startswith('forks'): username, projectname = data['project'].split('/', 2)[1:] else: projectname = data['project'] namespace = None if '/' in projectname: namespace, projectname = projectname.split('/', 1) log.info('Searching %s/%s/%s' % (username, namespace, projectname)) session = pagure.lib.create_session(pagure.APP.config['DB_URL']) project = pagure.lib._get_project(session=session, name=projectname, user=username, namespace=namespace, case=pagure.APP.config.get( 'CASE_SENSITIVE', False)) if not project: log.info('No project found with these criteria') session.close() continue urls = project.settings.get('Web-hooks') session.close() if not urls: log.info('No URLs set: %s' % urls) continue urls = urls.split('\n') log.info('Got the project, going to the webhooks') call_web_hooks(project, data['topic'], data['msg'], urls)
def connect(*args): ServerState.mq_connection = None while True: yield asyncio.From(mq_factory.connect()) if not mq_factory.connection: ServerState.logger.warn( 'No CoreMQ servers found. Retrying in 3 seconds...') yield asyncio.From(asyncio.sleep(3)) else: conn = mq_factory.connection[1] conn.connected_future.add_done_callback( lambda _: conn.begin_replication('%s:%s' % ( socket.gethostname(), port))) ServerState.mq_connection = conn break
def actuate(proxy, channels, callback): '''Actuate channels and wait for callback to return `True`. Parameters ---------- channels : list List of channel numbers to actuate. callback Callback function accepting a list of ``capacitance-updated`` messages as only argument. ''' # Actuate channels. yield asyncio.From(actuate_channels(proxy, channels)) # Wait for callback. result = yield asyncio.From(wait_on_capacitance(proxy, callback)) raise asyncio.Return(result)
def read_frame(signals): ''' :py:mod:`trollius` `asyncio` wrapper to return a single frame produced by a ``frame-ready`` event signalled by :func:`dropbot_chip_qc.video.chip_video_process()`. Parameters ---------- signals : blinker.Namespace DMF chip webcam monitor signals (see :func:`dropbot_chip_qc.video.chip_video_process()`). ''' loop = asyncio.get_event_loop() frame_ready = asyncio.Event() response = {} def on_frame_ready(sender, **message): response.update(message) loop.call_soon_threadsafe(frame_ready.set) signals.signal('frame-ready').connect(on_frame_ready) yield asyncio.From(frame_ready.wait()) raise asyncio.Return(response)
def poll(self): print("poll task started") while True: try: msg = self.debugQueue.get_nowait() print(msg) except Empty: pass except: exc_type, exc_value, exc_traceback = sys.exc_info() print("poll exception") traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) try: result = self.resultQueue.get_nowait() self.hasResult(result) except Empty: pass except: exc_type, exc_value, exc_traceback = sys.exc_info() print("poll exception") traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) finally: yield asyncio.From(asyncio.sleep(self.pollRate))
def schema_input(schema, message='**Please fill in the following fields:**', description=None): ''' Asynchronous GTK input dialog based on JSON schema. Parameters ---------- schema : dict JSON schema. message : str, optional Message presented in dialog. description : str, optional Title of input dialog. Returns ------- dict or None Mapping from field name to entered value if ``OK`` button was pressed. Otherwise, ``None``. ''' # Wrap dialog function call in partial since `gtk_threadsafe` does # not support passing keyword arguments. dialog_func = ft.partial(schema_dialog, schema, title=description, type=gtk.MESSAGE_OTHER, message_format=markdown2pango(message), buttons=gtk.BUTTONS_OK_CANCEL) # Queue dialog to be launched in GTK thread and wait for response. response = yield asyncio.From(sync(gtk_threadsafe)(dialog_func)()) if response is None: raise RuntimeError('Cancelled in response to message `%s`.' % message) raise asyncio.Return(response)
def poll(self): print("WSResourceServer: poll()") rs = {} while True: try: rs = {} rs["resources"] = [] for resource in self.resources: r = {} r["resourceName"] = resource.name r["variables"] = resource.variables r["lastUpdated"] = str(datetime.now()) rs['resources'].append(r) self.broadcast(json.dumps(rs)) except: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=2, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=6, file=sys.stdout) yield asyncio.From(asyncio.sleep(self.pollRate))
def run(): snmpEngine = SnmpEngine() iterator = getCmd(snmpEngine, CommunityData('public', mpModel=0), UdpTransportTarget(('demo.snmplabs.com', 161)), ContextData(), ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0))) (errorIndication, errorStatus, errorIndex, varBinds) = yield trollius.From(iterator) if errorIndication: print(errorIndication) elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?')) else: for varBind in varBinds: print(' = '.join([x.prettyPrint() for x in varBind])) snmpEngine.transportDispatcher.closeDispatcher()
def run(varBinds): snmpEngine = SnmpEngine() while True: (errorIndication, errorStatus, errorIndex, varBindTable) = yield trollius.From( bulkCmd(snmpEngine, UsmUserData('usr-none-none'), UdpTransportTarget(('demo.snmplabs.com', 161)), ContextData(), 0, 50, *varBinds) ) if errorIndication: print(errorIndication) break elif errorStatus: print('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?')) else: for varBindRow in varBindTable: for varBind in varBindRow: print(' = '.join([x.prettyPrint() for x in varBind])) varBinds = varBindTable[-1] if isEndOfMib(varBinds): break snmpEngine.transportDispatcher.closeDispatcher()
def run(): snmpEngine = SnmpEngine() (errorIndication, errorStatus, errorIndex, varBinds) = yield trollius.From( sendNotification( snmpEngine, CommunityData('public'), # mpModel=0), UdpTransportTarget(('demo.snmplabs.com', 162)), ContextData(), 'inform', NotificationType( ObjectIdentity('1.3.6.1.6.3.1.1.5.2') ).addVarBinds( ('1.3.6.1.6.3.1.1.4.3.0', '1.3.6.1.4.1.20408.4.1.1.2'), ('1.3.6.1.2.1.1.1.0', OctetString('my system')) ) ) ) if errorIndication: print(errorIndication) elif errorStatus: print('%s: at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?')) else: for varBind in varBinds: print(' = '.join([x.prettyPrint() for x in varBind])) snmpEngine.transportDispatcher.closeDispatcher()
def renew_token(self, symbol): try: response = yield trollius.From( self.get_ws_token(self.websockets[symbol]["qlist"])) if response["msg_code"] == 1: token = response["result"] self.websockets[symbol]["token"] = token self.websockets[symbol]["renewed"] = datetime.now() yield trollius.From(websockets[symbol]["ws"].send("*" + token)) self.websockets[symbol]["trialTime"] = 0 else: self.websockets[symbol]["trialTime"] += 1 self.logger.info(response["result"]) except Exception as e: self.websockets[symbol]["trialTime"] += 1 self.logger.warning(u"token获取失败第{}次,待会儿重试".format( self.websockets[symbol]["trialTime"]))
def call(ES_URL, queries): results = [] results_append = results.append futures = _do_calls(ES_URL, queries) for future in futures: result = yield trollius.From(future) results_append(result) raise trollius.Return(results)
def dump_lines_from_fd(fd, print_func): """Given a file descriptor (integer), asyncronously read lines from it. Sanitize each line and pass as a sole argument to @p print_func. """ fdobj = os.fdopen(fd, 'r') loop = asyncio.get_event_loop() reader = asyncio.streams.StreamReader(loop=loop) transport, _ = yield asyncio.From( loop.connect_read_pipe( lambda: asyncio.streams.StreamReaderProtocol(reader), fdobj)) while True: line = yield asyncio.From(reader.readline()) if line == '': # EOF break print_func(sanitize_stdout(line)) transport.close()
def execute_test(*args, **kwargs): yield asyncio.From(set_capacitance_update_interval()) try: result = yield asyncio\ .From(qc.ui.plan.transfer_windows(*args, **kwargs)) except qc.ui.plan.TransferFailed as exception: # Save intermediate result. result = dict( channel_plan=exception.channel_plan, completed_transfers=exception.completed_transfers) signals.signal('test-interrupt').send(caller_name(0), **result) self.completed_results.append(result) yield asyncio.From( aproxy.set_state_of_channels(pd.Series(), append=False)) # result = dict(channel_plan=channel_plan_i, # completed_transfers=completed_transfers_i) raise asyncio.Return(result)
def _next_transfer(channel_plan, completed_transfers, co_transfer, n=2): transfer_channels = channel_plan[:n] result = yield asyncio.From(co_transfer(transfer_channels)) raise asyncio.Return( channel_plan[1:], completed_transfers + [{ 'channels': transfer_channels, 'result': result }])
def set_capacitance_update_interval(): state = yield asyncio.From(aproxy.state) max_update_interval = int(.5 * min_duration * 1e3) if state.capacitance_update_interval_ms > max_update_interval \ or state.capacitance_update_interval_ms == 0: yield asyncio\ .From(aproxy.update_state(capacitance_update_interval_ms= max_update_interval))
def actuate_channels(self, channels, allow_disabled=True): ''' Parameters ---------- channels : list List of channel numbers to actuate. allow_disabled : bool, optional If ``False``, verify actuated channels match specified channels _exactly_. Otherwise, ensure that all actuated channels belong to the specified set of channels, _even if_ not _all_ specified channels are actuated. This supports attempting to actuate channels that are disabled. Returns ------- list List of actuated channels. If :data:`allow_disabled` is ``True``, the returned list of channels may differ from the specified list of channels. Raises ------ RuntimeError If list actuated channels does not match the requested channels (missing disabled channels are ignored if ``allowed_disabled`` is `True`). ''' loop = asyncio.get_event_loop() channels_updated = asyncio.Event() def _on_channels_updated(message): channels_updated.actuated = message.get('actuated') loop.call_soon_threadsafe(channels_updated.set) # Enable `channels-updated` DropBot signal. self.enable_event(db.proxy.EVENT_CHANNELS_UPDATED) # Request to be notified when the set of actuated channels changes. signal = self.signals.signal('channels-updated') signal.connect(_on_channels_updated) # Request actuation of the specified channels. self.set_state_of_channels(pd.Series(1, index=channels), append=False) yield asyncio.From(channels_updated.wait()) if not allow_disabled and (set(channels_updated.actuated) != set(channels)): raise RuntimeError('Actuated channels `%s` do not match ' 'expected channels `%s`' % (channels_updated.actuated, channels)) elif set(channels_updated.actuated) - set(channels): # Disabled channels are allowed. raise RuntimeError('Actuated channels `%s` are not included in' ' expected channels `%s`' % (channels_updated.actuated, channels)) raise asyncio.Return(channels_updated.actuated)
def _exec_task(): self.zmq_plugin = ZmqPlugin('microdrop', get_hub_uri()) self.zmq_plugin.reset() zmq_ready.set() event = asyncio.Event() try: yield asyncio.From(event.wait()) except asyncio.CancelledError: _L().info('closing ZeroMQ execution event loop')