def shoot(cfgs, status_callback): """ Performs multi-tank multi-config test. Accepts one or more config dicts. Returns list of session ID's. Raises TankLocked and TestFailed. """ try: sessions = [ SessionWrapper(ft.partial(status_callback, i), **cfg) for i, cfg in enumerate(cfgs) # pylint: disable=W0142 ] except Exception: logger.exception("Failed to initialize session objects, config:\n%s", yaml.safe_dump(cfgs)) raise prepares = [] runs = [] stops = [] try: try: prepares = [async(session.prepare()) for session in sessions] yield From(gather(*prepares)) # pylint: disable=W0142 logger.info("All tanks are prepared. STARTING TO SHOOT.") runs = [async(session.run_until_finish()) for session in sessions] yield From(gather(*runs)) # pylint: disable=W0142 except KeyboardInterrupt: logger.info("Test interrupted") raise except CancelledError: logger.info("Test cancelled") raise except TestFailed: logger.info("Test failed") raise except Exception: logger.exception("Exception occured in Test.run_until_finish()") raise except BaseException: logger.exception("Something strange caught by Test.run_until_finish()") raise except BaseException as ex: logger.info("Stopping remaining tank sessions...") stops = [async(session.stop()) for session in sessions if not session.finished] yield From(gather(*stops, return_exceptions=True)) # pylint: disable=W0142 raise ex finally: for task in prepares + runs + stops: task.cancel() logger.info("All tanks are done.") raise Return([session.session.s_id if session.session is not None else None for session in sessions])
def go(self): coroutines = [ work(self.work_q, self.send_q, self.data, self.metadata_addr, self.address, self.loop), control( self.control_q, { 'compute': self.work_q, 'send': self.send_q, 'get-data': self.data_q, 'put-data': self.data_q, 'delete-data': self.data_q, 'ping': self.pingpong_q }), send(self.send_q, self.outgoing_q, self.signal_q), pingpong(self.pingpong_q, self.send_q), comm(self.ip, self.port, self.bind_ip, self.signal_q, self.control_q, self.outgoing_q, self.loop, self.context), manage_data(self.data_q, self.send_q, self.data, self.metadata_addr, self.address) ] try: yield From( asyncio.wait(coroutines, return_when=asyncio.FIRST_COMPLETED)) except: import pdb pdb.set_trace() finally: self.close() print("Closing") yield From(asyncio.gather(*coroutines))
def config(self, key=None, **kw): contents = yield From(self.list_backend(prefix=self.prefixify(key))) contents = contents.get('Contents', []) tasks = [self.get(key=obj['Key'], **kw) for obj in contents] results = yield From(asyncio.gather(*tasks)) keys = [k['Key'].split('/')[-1] for k in contents] raise Return(OrderedDict(zip(keys, results)))
def test_compute(): with everything() as (loop, c, w, sock): w.data['x'] = 123 c.who_has['x'].add(w.address) c.has_what[w.address].add('x') @asyncio.coroutine def f(): msg = { 'op': 'compute', 'key': 'y', 'function': add, 'args': ('x', 10), 'needed': ['x'], 'reply': True } for i in range(3): sock.send(dumps(msg)) result = yield From(delay(loop, sock.recv)) assert loads(result) == { 'op': 'computation-finished', 'key': 'y' } w.close() c.close() loop.run_until_complete(asyncio.gather(w.go(), c.go(), f()))
def test_remote_gather(): with Loop() as loop, center(loop=loop) as c, worker( metadata_port=c.port, port=1234, loop=loop) as a, worker(metadata_port=c.port, port=4321, loop=loop) as b, dealer(a.address) as sock: # Put 'x' in b's data. Register with metadata store b.data['x'] = 123 c.who_has['x'].add(b.address) c.has_what[b.address].add('x') @asyncio.coroutine def f(): msg = { 'op': 'compute', 'key': 'y', 'function': add, 'args': ('x', 10), 'needed': ['x'], 'reply': True } sock.send(dumps(msg)) # send to a, will need to get from b result = yield From(delay(loop, sock.recv)) assert loads(result) == {'op': 'computation-finished', 'key': 'y'} a.close() b.close() c.close() loop.run_until_complete(asyncio.gather(a.go(), b.go(), c.go(), f())) assert a.data['y'] == 10 + 123 assert c.who_has['y'] == set([a.address]) assert 'y' in c.has_what[a.address]
def loop_ips(ip, log=False): """ looping through the ips from ch_ip till 255 and detect using det_ccast and returning a list of potential chromecasts """ active_ccasts = [] # detected chrome casts stored here loop = asyncio.get_event_loop() if version.startswith('3'): # if python3 import it, to avoid syntax issues from chrome_cut.fix3 import det_ccast as det_ccast3 tasks = [ det_ccast3('.'.join(ip.split('.')[0:-1]) + '.' + str(i), log) for i in range(1, 256) ] else: tasks = [ det_ccast( # fetching the range of ips to async function '.'.join(ip.split('.')[0:-1]) + '.' + str(i), log) for i in range(1, 256) ] results = loop.run_until_complete(asyncio.gather(asyncio.wait(tasks))) # loop.close() should be stopped in the before exist # FIXME: register loop.close() to before exit register(loop.close) for result in results[0][0]: # looking for successful ones global counter counter = 0 # clearing up the global counter if result.result()[0]: active_ccasts.append(result.result()[1]) return active_ccasts if len(active_ccasts) >= 1 else None
def set_waveform(key, value): exception = None result = signals.signal('set-%s' % key).send(value) if result: try: receivers, co_callbacks = zip(*result) if receivers: results = yield asyncio.From(asyncio.gather(*co_callbacks)) except asyncio.CancelledError: raise except Exception as exception: pass else: if receivers: raise asyncio.Return(zip(receivers, results)) if exception is not None: message = ('Error setting **%s**: `%s`' % (key, exception)) else: message = ('No waveform generators available to set **%s**.' % key) yield asyncio.From( _warning(signals.signal('warning'), message, title='Warning: failed to set %s' % key, key='waveform-%s' % key))
def config(self, key=None, **kw): contents = yield From(self.list_backend(prefix=self.prefixify(key))) contents = contents.get('Contents', []) kw.setdefault('in_group_check', True) tasks = [self.get(key=obj['Key'], **kw) for obj in contents] try: results = yield From(asyncio.gather(*tasks)) except ValueError as e: raise Return({}) keys = [k['Key'].split('/')[-1] for k in contents] raise Return(OrderedDict(zip(keys, results)))
def test_get_data(): with everything() as (loop, c, w, sock): w.data['x'] = 123 @asyncio.coroutine def f(): msg = {'op': 'get-data', 'keys': ['x']} for i in range(3): sock.send(dumps(msg)) result = yield From(delay(loop, sock.recv)) assert loads(result) == {'x': 123} w.close() c.close() loop.run_until_complete(asyncio.gather(w.go(), c.go(), f()))
def _main_task_done(main_task): """Callback to be called when the top-level coroutine dies""" if main_task.cancelled(): logger.info("Main task cancelled") else: ex = main_task.exception() if ex is None: logger.info("Main task finished") else: logger.error("Main task failed") logger.info("Finding remaining tasks...") pending = set(task for task in asyncio.Task.all_tasks() if not task.done()) logger.info("Waiting for %s remaining tasks to terminate", len(pending)) gth = asyncio.gather(*pending, return_exceptions=True) gth.add_done_callback(_stop_loop)
def test_Worker(): with Loop() as loop, center(loop) as c, worker(metadata_port=c.port, loop=loop) as w, dealer( w.address) as sock: @asyncio.coroutine def f(): msg = {'op': 'ping'} for i in range(3): sock.send(dumps(msg)) result = yield From(delay(loop, sock.recv)) assert result == b'pong' w.close() loop.run_until_complete(asyncio.gather(w.go(), f()))
def get_remote_data(loop, keys, metadata_addr): msg = {'op': 'who-has', 'keys': keys} who_has = yield From(dealer_send_recv(loop, metadata_addr, msg)) lost = set(keys) - set(k for k, v in who_has.items() if v) if lost: raise KeyError("Missing keys {%s}" % ', '.join(map(str, lost))) # get those keys from remote sources print("Collecting %s" % who_has) coroutines = [ get_datum(loop, random.choice(list(who_has[k])), [k]) for k in keys ] result = yield From(asyncio.gather(*coroutines)) raise Return(merge(result))
def test_center(): with Loop() as loop: with center(loop) as c: with dealer(c.address) as sock: @asyncio.coroutine def f(): msg = { 'op': 'register', 'address': 'hank', 'keys': ['x', 'y'], 'reply': True } sock.send(dumps(msg)) ack = yield From(delay(loop, sock.recv)) assert ack == b'OK' assert 'hank' in c.who_has['x'] assert 'hank' in c.who_has['y'] assert c.has_what['hank'] == set(['x', 'y']) msg = {'op': 'who-has', 'keys': ['x']} sock.send(dumps(msg)) result = yield From(delay(loop, sock.recv)) assert loads(result) == {'x': set(['hank'])} msg = {'op': 'list', 'number': 0} sock.send(dumps(msg)) result = yield From(delay(loop, sock.recv)) assert loads(result) == set(['hank']) msg = { 'op': 'unregister', 'address': 'hank', 'keys': ['x'], 'reply': True } sock.send(dumps(msg)) result = yield From(delay(loop, sock.recv)) assert c.who_has['x'] == set() assert c.who_has['y'] == set(['hank']) assert c.has_what['hank'] == set(['y']) c.close() loop.run_until_complete(asyncio.gather(c.go(), f()))
def main(): asyncio.set_event_loop(None) if args.iocp: from trollius.windows_events import ProactorEventLoop loop = ProactorEventLoop() else: loop = asyncio.new_event_loop() sslctx = None if args.tls: sslctx = test_utils.dummy_ssl_context() cache = CacheClient(args.host, args.port, sslctx=sslctx, loop=loop) try: loop.run_until_complete( asyncio.gather( *[testing(i, cache, loop) for i in range(args.ntasks)], loop=loop)) finally: loop.close()
def config(self, key=None, **kw): args = prepare() contents = yield From(self.list_backend(prefix=self.prefixify(key))) contents = contents.get('Contents', []) kw.setdefault('in_group_check', False) kw.setdefault('no_files', True) tasks = [self._get(key=obj['Key'], **kw) for obj in contents] try: results = yield From(asyncio.gather(*tasks)) except ValueError as e: raise Return({}) result = OrderedDict() project_env = "{}/{}".format(args.project, args.env) for k, v in enumerate(results): if kw.get('in_group_check'): # for grouped keys strip prefixes key_ = contents[k]['Key'].split('/')[-1] else: # otherwise full key key_ = contents[k]['Key'].replace(project_env, '').lstrip('/') if args.skip_files and v['is_file']: continue result[key_] = v['data'] raise Return(result)
def run(): connection = yield From( trollius_redis.Pool.create(host=u'localhost', port=6379, poolsize=50)) try: # === Benchmark 1 == print(u'1. How much time does it take to set 10,000 values ' u'in Redis? (without pipelining)') print(u'Starting...') start = time.time() # Do 10,000 set requests for i in range(10 * 1000): # By using yield from here, we wait for the answer. yield From(connection.set(u'key', u'value')) print(u'Done. Duration=', time.time() - start) print() # === Benchmark 2 (should be at least 3x as fast) == print(u'2. How much time does it take if we use asyncio.gather, ' u'and pipeline requests?') print(u'Starting...') start = time.time() # Do 10,000 set requests futures = [ asyncio.Task(connection.set(u'key', u'value')) for x in range(10 * 1000) ] yield From(asyncio.gather(*futures)) print(u'Done. Duration=', time.time() - start) finally: connection.close()
def go(self): coroutines = [ control(self.control_q, {'who-has': self.metadata_q, 'register': self.metadata_q, 'unregister': self.metadata_q, 'list': self.metadata_q, 'ping': self.pingpong_q, 'send': self.send_q}), send(self.send_q, self.outgoing_q, self.signal_q), pingpong(self.pingpong_q, self.send_q), comm(self.ip, self.port, self.bind_ip, self.signal_q, self.control_q, self.outgoing_q, self.loop, self.context), metadata(self.metadata_q, self.send_q, self.who_has, self.has_what) ] first, other = yield From(asyncio.wait(coroutines, return_when=asyncio.FIRST_COMPLETED)) print("Closing") yield From(asyncio.gather(*other))
def _warning(signal, message, **kwargs): ''' XXX Coroutine XXX Send warning signal and gather response from receivers. If no receivers are available or any receiver raises an exception, raise the specified warning message as a `RuntimeError`. Raises ------ RuntimeError If no receivers are available or any receiver raises an exception. ''' responses = signal.send(message, **kwargs) try: receivers, co_callbacks = zip(*responses) results = yield asyncio.From(asyncio.gather(*co_callbacks)) except Exception: raise RuntimeError(message) else: raise asyncio.Return(zip(receivers, results))
def test_no_data_found(): with everything() as (loop, c, w, sock): @asyncio.coroutine def f(): msg = { 'op': 'compute', 'key': 'y', 'function': add, 'args': ('asdf', 10), 'needed': ['asdf'], 'reply': True } sock.send(dumps(msg)) result = yield From(delay(loop, sock.recv)) result = loads(result) assert result['op'] == 'computation-failed' assert isinstance(result['error'], KeyError) assert 'asdf' in str(result['error']) w.close() c.close() loop.run_until_complete(asyncio.gather(w.go(), c.go(), f()))
def on_actuation_request(electrode_states, duration_s=0): ''' XXX Coroutine XXX Actuate electrodes according to specified states. Parameters ---------- electrode_states : pandas.Series duration_s : float, optional If ``volume_threshold`` step option is set, maximum duration before timing out. Otherwise, time to actuate before actuation is considered completed. Returns ------- actuated_electrodes : list List of actuated electrode IDs. ''' try: result = yield asyncio\ .From(actuate(proxy, dmf_device, electrode_states, duration_s=duration_s, volume_threshold=volume_threshold, c_unit_area=c_unit_area)) # Notify other plugins that actuation completed. responses = signals.signal('actuation-completed')\ .send('dropbot_plugin', **result) yield asyncio.From(asyncio.gather(*(r[1] for r in responses))) except asyncio.CancelledError: raise except: logging.info('on_actuation_request', exc_info=True) raise else: raise asyncio.Return(result['actuated_electrodes'])
def run(): connection = yield From(trollius_redis.Pool.create( host=u'localhost', port=6379, poolsize=50)) try: # === Benchmark 1 == print( u'1. How much time does it take to set 10,000 values ' u'in Redis? (without pipelining)') print(u'Starting...') start = time.time() # Do 10,000 set requests for i in range(10 * 1000): # By using yield from here, we wait for the answer. yield From(connection.set(u'key', u'value')) print(u'Done. Duration=', time.time() - start) print() # === Benchmark 2 (should be at least 3x as fast) == print(u'2. How much time does it take if we use asyncio.gather, ' u'and pipeline requests?') print(u'Starting...') start = time.time() # Do 10,000 set requests futures = [asyncio.Task(connection.set(u'key', u'value')) for x in range(10*1000)] yield From(asyncio.gather(*futures)) print(u'Done. Duration=', time.time() - start) finally: connection.close()
def test_gather(self): fut = asyncio.Future() fut.set_result("ok") results = self.loop.run_until_complete(trollius.gather(fut)) self.assertEqual(results, ["ok"])
def execute_actuation(signals, static_states, dynamic_states, voltage, frequency, duration_s): ''' XXX Coroutine XXX Execute specified *static* and *dynamic* electrode actuations. Parameters ---------- signals : blinker.Namespace Signals namespace. static_states : pandas.Series Static electrode actuation states, indexed by electrode ID, (e.g., `"electrode001"`). dynamic_states : pandas.Series Dynamic electrode actuation states, indexed by electrode ID. voltage : float Actuation amplitude as RMS AC voltage (in volts). frequency : float Actuation frequency (in Hz). duration_s : float Actuation duration (in seconds). If not specified, use value from step options. Returns ------- dict Response with fields: - ``start``: actuation start timestamp (`datetime.datetime`). - ``end``: actuation start timestamp (`datetime.datetime`). - ``actuated_electrodes``: actuated electrode IDs (`list`). See Also -------- execute_actuations .. versionchanged:: 2.25 Still apply for specified duration even if _no electrodes_ are specified for actuation. .. versionchanged:: 2.28.2 Allow user to optionally ignore failed actuations. .. versionchanged:: 2.30 Add `signals`, `voltage` and `frequency` parameters. Refactor the to set waveform parameters using ``signals`` namespace instead. .. versionchanged:: 2.30 Refactor electrode actuation requests to use :data:`signals` interface instead of using pyutilib :func:`emit_signal()`. .. versionchanged:: 2.31.1 Prevent error dialog prompt if coroutine is cancelled while calling ``set_waveform()`` callbacks. ''' # Notify other plugins that dynamic electrodes states have changed. responses = (signals.signal('dynamic-electrode-states-changed').send( NAME, electrode_states=dynamic_states)) yield asyncio.From(asyncio.gather(*(r[1] for r in responses))) static_electrodes_to_actuate = set(static_states[static_states > 0].index) dynamic_electrodes_to_actuate = set( dynamic_states[dynamic_states > 0].index) electrodes_to_actuate = (dynamic_electrodes_to_actuate | static_electrodes_to_actuate) # Execute `set_electrode_states` command through ZeroMQ plugin # API to notify electrode actuator plugins (i.e., plugins # implementing the `IElectrodeActuator` interface) of the # electrodes to actuate. s_electrodes_to_actuate = \ pd.Series(True, index=sorted(electrodes_to_actuate)) @asyncio.coroutine def set_waveform(key, value): exception = None result = signals.signal('set-%s' % key).send(value) if result: try: receivers, co_callbacks = zip(*result) if receivers: results = yield asyncio.From(asyncio.gather(*co_callbacks)) except asyncio.CancelledError: raise except Exception as exception: pass else: if receivers: raise asyncio.Return(zip(receivers, results)) if exception is not None: message = ('Error setting **%s**: `%s`' % (key, exception)) else: message = ('No waveform generators available to set **%s**.' % key) yield asyncio.From( _warning(signals.signal('warning'), message, title='Warning: failed to set %s' % key, key='waveform-%s' % key)) for key, value, unit in (('frequency', frequency, 'Hz'), ('voltage', voltage, 'V')): waveform_result = yield asyncio.From(set_waveform(key, value)) if waveform_result: _L().info('%s set to %s%s (receivers: `%s`)', key, si.si_format(value), unit, zip(*waveform_result)[0]) electrode_actuators = signals.signal('on-actuation-request')\ .send(s_electrodes_to_actuate, duration_s=duration_s) if not electrode_actuators: title = 'Warning: failed to actuate all electrodes' message = ('No electrode actuators registered to **actuate**: `%s`' % list(electrodes_to_actuate)) yield asyncio.From( _warning(signals.signal('warning'), message, title=title, key='no-actuators')) # Simulate actuation by waiting for specified duration. yield asyncio.From(asyncio.sleep(duration_s)) else: actuation_tasks = zip(*electrode_actuators)[1] # Wait for actuations to complete. start = dt.datetime.now() done, pending = yield asyncio.From(asyncio.wait(actuation_tasks)) end = dt.datetime.now() actuated_electrodes = set() exceptions = [] for d in done: try: actuated_electrodes.update(d.result()) except Exception as exception: # Actuation error occurred. Save exception and check # remaining responses from actuators. exceptions.append(exception) if (electrodes_to_actuate - actuated_electrodes) or exceptions: def _error_message(): missing_electrodes = (electrodes_to_actuate - actuated_electrodes) messages = [] if missing_electrodes: messages.append('Failed to actuate the following ' 'electrodes: %s' % ', '.join('`%s`' % e for e in missing_electrodes)) if len(exceptions) == 1: messages.append('**Actuation error:** `%s`' % exceptions[0]) elif exceptions: messages.append('**Actuation errors:**\n%s' % '\n'.join(' - ' + '`%s`' % e for e in exceptions)) return '\n\n'.join(messages) # Send `'warning'` signal to give other plugins an opportunity # to handle the warning. yield asyncio.From( _warning(signals.signal('warning'), _error_message(), title='Warning:' ' actuation error', key='actuation-error')) _L().info('Ignored actuation error(s): `%s`', exceptions) # Simulate actuation by waiting for remaining duration. remaining_duration = (duration_s - (dt.datetime.now() - start).total_seconds()) if remaining_duration > 0: yield asyncio.From(asyncio.sleep(remaining_duration)) else: # Requested actuations were completed successfully. _L().info('actuation completed (actuated electrodes: %s)', actuated_electrodes) raise asyncio.Return({ 'start': start, 'end': end, 'actuated_electrodes': sorted(actuated_electrodes) })
def co_callback(message): listeners = signals.signal(name).send( 'keep_alive', **message) yield asyncio.From( asyncio.gather(*(l[1] for l in listeners)))
def main(): def make_stream(sources): bug_compat = spead2.BUG_COMPAT_PYSPEAD_0_5_2 if args.pyspead else 0 stream = spead2.recv.trollius.Stream(thread_pool, bug_compat, args.heaps, args.ring_heaps) if memory_pool is not None: stream.set_memory_allocator(memory_pool) if args.memcpy_nt: stream.set_memcpy(spead2.MEMCPY_NONTEMPORAL) ibv_endpoints = [] for source in sources: try: if ':' in source: host, port = source.rsplit(':', 1) port = int(port) else: host = '' port = int(source) except ValueError: try: stream.add_udp_pcap_file_reader(source) except AttributeError: raise RuntimeError( 'spead2 was compiled without pcap support') else: if args.tcp: stream.add_tcp_reader(port, args.packet, args.buffer, host) elif 'ibv' in args and args.ibv: if host is None: raise ValueError( 'a multicast group is required when using --ibv') ibv_endpoints.append((host, port)) elif args.bind and host: stream.add_udp_reader(host, port, args.packet, args.buffer, args.bind) else: stream.add_udp_reader(port, args.packet, args.buffer, host) if ibv_endpoints: stream.add_udp_ibv_reader(ibv_endpoints, args.bind, args.packet, args.buffer, args.ibv_vector, args.ibv_max_poll) return stream def make_coro(sources): stream = make_stream(sources) return run_stream(stream, sources[0], args), stream def stop_streams(): for stream in streams: stream.stop() args = get_args() logging.basicConfig(level=getattr(logging, args.log.upper())) if args.affinity is not None and len(args.affinity) > 0: spead2.ThreadPool.set_affinity(args.affinity[0]) thread_pool = spead2.ThreadPool(args.threads, args.affinity[1:] + args.affinity[:1]) else: thread_pool = spead2.ThreadPool(args.threads) memory_pool = None if args.mem_pool: memory_pool = spead2.MemoryPool(args.mem_lower, args.mem_upper, args.mem_max_free, args.mem_initial) if args.joint: coros_and_streams = [make_coro(args.source)] else: coros_and_streams = [make_coro([source]) for source in args.source] coros, streams = zip(*coros_and_streams) main_task = trollius.ensure_future(trollius.gather(*coros)) loop = trollius.get_event_loop() loop.add_signal_handler(signal.SIGINT, stop_streams) try: loop.run_until_complete(main_task) except trollius.CancelledError: pass loop.close()
def monitor(signals): ''' Establish and maintain a DropBot connection. XXX Coroutine XXX If no DropBot is available or if the connection is lost, wait until a DropBot is detected on one of the available serial ports and (re)connect. DropBot signals are forwarded to the supplied :data:`signals` namespace, avoiding the need to manually connect signals after DropBot is (re)connected. DropBot connection is automatically closed when coroutine exits, e.g., when cancelled. Notes ----- On Windows **MUST** be run using a `asyncio.ProactorEventLoop`. Parameters ---------- signals : blinker.Namespace Namespace for DropBot monitor signals. Sends ----- connected When DropBot connection is established, with kwargs:: - ``dropbot``: reference to DropBot proxy instance. disconnected When DropBot connection is lost. chip-inserted When DropBot detects a chip has been inserted. Also sent upon connection to DropBot if a chip is present. chip-removed When DropBot detects a chip has been removed. Also sent upon connection to DropBot if a chip is **not** present. Example ------- >>> import blinker >>> >>> signals = blinker.Namespace() >>> >>> @asyncio.coroutine >>> def dump(*args, **kwargs): >>> print('args=`%s`, kwargs=`%s`' % (args, kwargs)) >>> >>> signals.signal('chip-inserted').connect(dump, weak=False) >>> loop = asyncio.ProactorEventLoop() >>> asyncio.set_event_loop(loop) >>> task = loop.create_task(db.monitor.dropbot_monitor(signals)) >>> # Stop monitor after 15 seconds. >>> loop.call_later(15, task.cancel) >>> loop.run_until_complete(task) .. versionchanged:: 1.67.1 Upon connection, send `'chip-inserted'` if chip is inserted or send `'chip-removed'` if no chip is inserted. .. versionchanged:: 1.68 Send `'no-power'` signal if 12V power supply not connected. Receivers may return `'ignore'` to attempt to connect anyway. ''' loop = asyncio.get_event_loop() dropbot = None @asyncio.coroutine def co_flash_firmware(): if dropbot is not None: dropbot.terminate() db.bin.upload.upload() time.sleep(.5) def flash_firmware(dropbot): loop.create_task(co_flash_firmware()) signals.signal('flash-firmware') \ .connect(lambda *args: loop.call_soon_threadsafe(flash_firmware, dropbot), weak=False) def reboot(dropbot): if dropbot is not None: dropbot._reboot() signals.signal('reboot') \ .connect(lambda *args: loop.call_soon_threadsafe(reboot, dropbot), weak=False) def reconnect(dropbot): if dropbot is not None: dropbot.terminate() signals.signal('reconnect') \ .connect(lambda *args: loop.call_soon_threadsafe(reconnect, dropbot), weak=False) try: while True: # Multiple DropBot devices were found. # Get list of available devices. df_comports = yield asyncio.From( bnr. async ._available_devices(timeout=.1)) if 'device_name' not in df_comports or not df_comports.shape[0]: yield asyncio.From(asyncio.sleep(.1)) continue # Automatically select DropBot with highest version, with ties # going to the lowest port name (i.e., `COM1` before `COM2`). df_comports = df_comports.loc[df_comports.device_name == 'dropbot'].copy() df_comports.reset_index(inplace=True) df_comports.sort_values(['device_version', 'port'], ascending=[False, True], inplace=True) df_comports.set_index('port', inplace=True) port = df_comports.index[0] @asyncio.coroutine def _attempt_connect(**kwargs): ignore = kwargs.pop('ignore', []) try: # Attempt to connect to automatically selected port. dropbot = db.SerialProxy(port=port, ignore=ignore, **kwargs) raise asyncio.Return(dropbot) except db.proxy.NoPower as exception: # No 12V power supply detected on DropBot. _L().debug('No 12V power supply detected.') responses = signals.signal('no-power').send('keep_alive') for t in asyncio.as_completed( [loop.create_task(r[1]) for r in responses]): response = yield asyncio.From(t) if response == 'ignore': ignore.append(db.proxy.NoPower) break else: raise exception except bnr.proxy.DeviceVersionMismatch as exception: # Firmware version does not match driver version. _L().debug( 'Driver version (`%s`) does not match firmware ' 'version (`%s`)', db.__version__, exception.device_version) responses = signals.signal('version-mismatch')\ .send('keep_alive', driver_version=db.__version__, firmware_version=exception.device_version) update = False for t in asyncio.as_completed( [loop.create_task(r[1]) for r in responses]): response = yield asyncio.From(t) if response == 'ignore': ignore.append(bnr.proxy.DeviceVersionMismatch) break elif response == 'update': update = True break else: raise if update: # Flash firmware and retry connection. _L().info('Flash firmware and retry connection.') yield asyncio.From(co_flash_firmware()) dropbot = yield asyncio.From( _attempt_connect(ignore=ignore, **kwargs)) raise asyncio.Return(dropbot) try: dropbot = yield asyncio.From(_attempt_connect()) # except bnr.proxy.DeviceNotFound: except asyncio.CancelledError: raise except Exception: _L().debug('Error connecting to DropBot.', exc_info=True) yield asyncio.From(asyncio.sleep(.1)) continue def co_connect(name): def _wrapped(sender, **message): @asyncio.coroutine def co_callback(message): listeners = signals.signal(name).send( 'keep_alive', **message) yield asyncio.From( asyncio.gather(*(l[1] for l in listeners))) return loop.call_soon_threadsafe( loop.create_task, co_callback(sender, **message)) return _wrapped for name_i in DROPBOT_SIGNAL_NAMES: dropbot.signals.signal(name_i).connect(co_connect(name_i), weak=False) dropbot.signals.signal('output_enabled')\ .connect(co_connect('chip-inserted'), weak=False) dropbot.signals.signal('output_disabled')\ .connect(co_connect('chip-removed'), weak=False) responses = signals.signal('connected').send('keep_alive', dropbot=dropbot) yield asyncio.From(asyncio.gather(*(r[1] for r in responses))) OUTPUT_ENABLE_PIN = 22 # Chip may have been inserted before connecting, so `chip-inserted` # event may have been missed. # Explicitly check if chip is inserted by reading **active low** # `OUTPUT_ENABLE_PIN`. if dropbot.digital_read(OUTPUT_ENABLE_PIN): co_connect('chip-removed')({}) else: co_connect('chip-inserted')({}) disconnected = asyncio.Event() dropbot.serial_signals.signal('disconnected')\ .connect(lambda *args: loop.call_soon_threadsafe(disconnected.set), weak=False) yield asyncio.From(disconnected.wait()) dropbot.terminate() responses = signals.signal('disconnected').send('keep_alive') yield asyncio.From(asyncio.gather(*(r[1] for r in responses))) finally: signals.signal('closed').send('keep_alive') if dropbot is not None: dropbot.terminate()
def shoot(cfgs, status_callback): """ Performs multi-tank multi-config test. Accepts one or more config dicts. Returns list of session ID's. Raises TankLocked and TestFailed. """ try: sessions = [ SessionWrapper( ft.partial( # pylint: disable=W0142 status_callback, i), **cfg) for i, cfg in enumerate(cfgs) ] except Exception: logger.exception("Failed to initialize session objects, config:\n%s", yaml.safe_dump(cfgs)) raise prepares = [] runs = [] stops = [] try: try: prepares = [async (session.prepare()) for session in sessions] yield From(gather(*prepares)) # pylint: disable=W0142 logger.info("All tanks are prepared. STARTING TO SHOOT.") runs = [async (session.run_until_finish()) for session in sessions] yield From(gather(*runs)) # pylint: disable=W0142 except KeyboardInterrupt: logger.info("Test interrupted") raise except CancelledError: logger.info("Test cancelled") raise except TestFailed: logger.info("Test failed") raise except Exception: logger.exception("Exception occured in Test.run_until_finish()") raise except BaseException: logger.exception( "Something strange caught by Test.run_until_finish()") raise except BaseException as ex: logger.info("Stopping remaining tank sessions...") stops = [ async (session.stop()) for session in sessions if not session.finished ] yield From(gather(*stops, return_exceptions=True)) # pylint: disable=W0142 raise ex finally: for task in prepares + runs + stops: task.cancel() logger.info("All tanks are done.") raise Return([ session.session.s_id if session.session is not None else None for session in sessions ])
def main(): def make_stream(sources): bug_compat = spead2.BUG_COMPAT_PYSPEAD_0_5_2 if args.pyspead else 0 stream = spead2.recv.trollius.Stream(thread_pool, bug_compat, args.heaps, args.ring_heaps) if memory_pool is not None: stream.set_memory_allocator(memory_pool) if args.memcpy_nt: stream.set_memcpy(spead2.MEMCPY_NONTEMPORAL) ibv_endpoints = [] for source in sources: try: if ':' in source: host, port = source.rsplit(':', 1) port = int(port) else: host = '' port = int(source) except ValueError: try: stream.add_udp_pcap_file_reader(source) except AttributeError: raise RuntimeError('spead2 was compiled without pcap support') else: if args.tcp: stream.add_tcp_reader(port, args.packet, args.buffer, host) elif 'ibv' in args and args.ibv: if host is None: raise ValueError('a multicast group is required when using --ibv') ibv_endpoints.append((host, port)) elif args.bind and host: stream.add_udp_reader(host, port, args.packet, args.buffer, args.bind) else: stream.add_udp_reader(port, args.packet, args.buffer, host) if ibv_endpoints: stream.add_udp_ibv_reader(ibv_endpoints, args.bind, args.packet, args.buffer, args.ibv_vector, args.ibv_max_poll) return stream def make_coro(sources): stream = make_stream(sources) return run_stream(stream, sources[0], args), stream def stop_streams(): for stream in streams: stream.stop() args = get_args() logging.basicConfig(level=getattr(logging, args.log.upper())) if args.affinity is not None and len(args.affinity) > 0: spead2.ThreadPool.set_affinity(args.affinity[0]) thread_pool = spead2.ThreadPool(args.threads, args.affinity[1:] + args.affinity[:1]) else: thread_pool = spead2.ThreadPool(args.threads) memory_pool = None if args.mem_pool: memory_pool = spead2.MemoryPool(args.mem_lower, args.mem_upper, args.mem_max_free, args.mem_initial) if args.joint: coros_and_streams = [make_coro(args.source)] else: coros_and_streams = [make_coro([source]) for source in args.source] coros, streams = zip(*coros_and_streams) main_task = trollius.ensure_future(trollius.gather(*coros)) loop = trollius.get_event_loop() loop.add_signal_handler(signal.SIGINT, stop_streams) try: loop.run_until_complete(main_task) except trollius.CancelledError: pass loop.close()
def execute_queries_async( self, queries, definitions=None, meta_filter=None, exclude_modified=False): """Runs the given queries on the corpus asynchronously. Arguments: queries ([Query]): The queries to run. definitions (dict): A dictionary defining query terms. meta_filter (dict -> bool): A function taking file meta data and returning whether the file should be queried. exclude_modified (bool): Whether to exclude modified files from the query. Returns: [Result]: An iterator producing the results of the query. """ log = logging.getLogger('uweclang.corpus.manager') results = [] # Get filtered files from corpus. try: files = self.files( meta_filter=meta_filter, exclude_modified=exclude_modified) except Exception as e: raise CorpusException(e) status = { 'completed' : 0, 'total': 0, } # Dictionary needed since `nonlocal` is not in Python 2.7. log.debug('Executing query batch (async.)') # Function for searching a single file. def query_file(meta, tagged, index): results = [] # Extract TaggedToken list from file. text = list(chain.from_iterable(uweclang.read_tagged_string(tagged))) # Execute search. try: for i, query in enumerate(queries): res = query.match(text, source_id=index, definitions=definitions) if res: results.extend(res) except Exception as e: raise QueryExecutionError(e) # Update status variables. status['completed'] += 1 log.debug('Completed file %d', index) percent = int(status['completed'] / status['total'] * 100) log.info('%d%% complete', percent) return results # Worker function for running a file search. @asyncio.coroutine def worker(meta, tagged, index): log.debug('Starting file %d', index) return loop.run_in_executor(None, query_file, meta, tagged, index) # Create asynchronous task list. loop = asyncio.get_event_loop() tasks = [] for index, (meta, tagged) in enumerate(files): log.debug('Added task %d', index) tasks.append(asyncio.ensure_future(worker(meta, tagged, index))) # Run tasks. status['total'] = len(tasks) log.info('Starting %d tasks.', status['total']) data = loop.run_until_complete(asyncio.gather(*tuple(tasks))) # Shutdown event loop and logger. loop.close() logging.shutdown() results = (task.result() for task in tasks if task.result()) return chain.from_iterable(results)
def execute_steps(steps, signals=None): ''' .. versionadded:: 2.32 Parameters ---------- steps : list[dict] List of plugin keyword argument dictionaries. signals : blinker.Namespace, optional Signals namespace where signals are sent through. Signals ------- step-started Parameters:: - ``i``: step index - ``plugin_kwargs``: plugin keyword arguments - ``steps_count``: total number of steps step-completed Parameters:: - ``i``: step index - ``plugin_kwargs``: plugin keyword arguments - ``steps_count``: total number of steps - ``result``: list of plugin step return values ''' if signals is None: signals = blinker.Namespace() for i, step_i in enumerate(steps): # Send notification that step has completed. responses = signals.signal('step-started')\ .send('execute_steps', i=i, plugin_kwargs=step_i, steps_count=len(steps)) yield asyncio.From(asyncio.gather(*(r[1] for r in responses))) # XXX Execute `on_step_run` coroutines in background thread # event-loop. try: done, pending = yield asyncio.From(execute_step(step_i)) exceptions = [] for d in done: try: d.result() except Exception as exception: exceptions.append(exception) _L().debug('Error: %s', exception, exc_info=True) if exceptions: use_markup = False monospace_format = '<tt>%s</tt>' if use_markup else '%s' if len(exceptions) == 1: message = (' ' + monospace_format % exceptions[0]) elif exceptions: message = ('\n%s' % '\n'.join(' - ' + monospace_format % e for e in exceptions)) raise RuntimeError('Error executing step:%s' % message) except asyncio.CancelledError: _L().debug('Cancelling protocol.', exc_info=True) raise except Exception as exception: _L().debug('Error executing step: `%s`', exception, exc_info=True) raise else: # All plugins have completed the step. # Send notification that step has completed. responses = signals.signal('step-completed')\ .send('execute_steps', i=i, plugin_kwargs=step_i, result=[r.result() for r in done], steps_count=len(steps)) yield asyncio.From(asyncio.gather(*(r[1] for r in responses)))