def init_rx_rings(self): dev = self.dev dev.rdi.mini_rcb.disable_ring = 1 logger.debug("mini receive producer ring disabled") self.rx_ring_vaddr, self.rx_ring_len = _init_xx_ring(self, tg.rbd) self.rx_ring_paddr = self.mm.get_paddr(self.rx_ring_vaddr) self.rx_buffers = [0] * self.rx_ring_len dev.rdi.std_rcb.host_addr_hi = self.rx_ring_paddr >> 32 dev.rdi.std_rcb.host_addr_low = self.rx_ring_paddr & 0xffffffff dev.rdi.std_rcb.ring_size = self.rx_ring_len dev.rdi.std_rcb.max_frame_len = 0x600 dev.rdi.std_rcb.nic_addr = 0x6000 dev.rdi.std_rcb.disable_ring = 0 logger.info("standard receive producer ring of size %d allocated at %x", self.rx_ring_len, self.rx_ring_vaddr) dev.rdi.jumbo_rcb.disable_ring = 1 logger.debug("jumbo receive producer ring disabled") dev.hpmb.box[tg.mb_rbd_standard_producer].low = 0 self._std_rbd_pi = 0 self._std_rbd_ci = 0 producers = [] for i in range(self.rx_ring_len): producers += [asyncio.ensure_future(produce_rxb(self, i))] asyncio.wait(producers) self._std_rbd_pi = self.rx_ring_len - 1 dev.hpmb.box[tg.mb_rbd_standard_producer].low = self._std_rbd_pi
def arrive_device(driver, dev): logger.info("device arrival initiated") driver.device = dev dev.init() dev.nvram.acquire_lock() dev.reset() yield From(msleep(0.5)) setup_steps = [ _pci_setup, _msi_setup, _hc_setup, _grc_setup, _bufman_setup, _emac_setup, _rbdi_setup, _rx_ring_setup, ] tasks = [] for step in setup_steps: tasks += [asyncio.ensure_future(step(driver))] try: asyncio.wait(tasks) except Exception: print "there was an exception!" logger.info("device arrival concluded")
def start(cmd, input=None, **kwds): kwds['stdout'] = PIPE kwds['stderr'] = PIPE if input is None and 'stdin' not in kwds: kwds['stdin'] = None else: kwds['stdin'] = PIPE proc = yield From(asyncio.create_subprocess_shell(cmd, **kwds)) tasks = [] if input is not None: tasks.append(send_input(proc.stdin, input)) else: print('No stdin') if proc.stderr is not None: tasks.append(log_errors(proc.stderr)) else: print('No stderr') if proc.stdout is not None: tasks.append(read_stdout(proc.stdout)) else: print('No stdout') if tasks: # feed stdin while consuming stdout to avoid hang # when stdin pipe is full yield From(asyncio.wait(tasks)) exitcode = yield From(proc.wait()) print("exit code: %s" % exitcode)
def asyloop(loop, sock, camera): asyncio.set_event_loop(loop) tasks = [ # asyncio.async(video()), asyncio. async (slackServer(sock, camera)), ] loop.run_until_complete(asyncio.wait(tasks))
def loop_ips(ip, log=False): """ looping through the ips from ch_ip till 255 and detect using det_ccast and returning a list of potential chromecasts """ active_ccasts = [] # detected chrome casts stored here loop = asyncio.get_event_loop() if version.startswith('3'): # if python3 import it, to avoid syntax issues from chrome_cut.fix3 import det_ccast as det_ccast3 tasks = [ det_ccast3('.'.join(ip.split('.')[0:-1]) + '.' + str(i), log) for i in range(1, 256) ] else: tasks = [ det_ccast( # fetching the range of ips to async function '.'.join(ip.split('.')[0:-1]) + '.' + str(i), log) for i in range(1, 256) ] results = loop.run_until_complete(asyncio.gather(asyncio.wait(tasks))) # loop.close() should be stopped in the before exist # FIXME: register loop.close() to before exit register(loop.close) for result in results[0][0]: # looking for successful ones global counter counter = 0 # clearing up the global counter if result.result()[0]: active_ccasts.append(result.result()[1]) return active_ccasts if len(active_ccasts) >= 1 else None
def run_main_loop(G): ''' This runs the stopingibition/visual/audio part of the paradigm using asyncio-replacement trollius. Before and after, we can still present other stimuli. ''' # something like this: # mainClock=clock.Clock() # mainClockContainer[0]=mainClock # put it into my list, that double-serves # as a pointer loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) #tasks = [ # asyncio.async(handleVisual()), # asyncio.async(handleGonogo()), # asyncio.async(handleEscape()), # ] tasks_dbg = [ # asyncio.async(handle_exception(test_it,G,loop)), asyncio. async (handle_exception(handle_audio, G, loop)), asyncio. async (handle_exception(handle_visual, G, loop)), asyncio. async (handle_exception(handle_gonogo, G, loop)), #asyncio.async(handle_exception(handle_visual,loop)), ] tasks = [ # asyncio.async(test_it(G)), # asyncio.async(handle_audio(G)) ] # so to debug, just run tasks_dbg instead of tasks. loop.run_until_complete(asyncio.wait(tasks_dbg)) loop.close()
def execute(self, sqs, db, queue_url, table, stats): tasks = [] for key in stats: stats_per_key = stats[key] tasks.append(self._one_key_request( sqs, db, queue_url, table, stats_per_key)) self.loop.run_until_complete(asyncio.wait(tasks))
def go(self): coroutines = [ work(self.work_q, self.send_q, self.data, self.metadata_addr, self.address, self.loop), control( self.control_q, { 'compute': self.work_q, 'send': self.send_q, 'get-data': self.data_q, 'put-data': self.data_q, 'delete-data': self.data_q, 'ping': self.pingpong_q }), send(self.send_q, self.outgoing_q, self.signal_q), pingpong(self.pingpong_q, self.send_q), comm(self.ip, self.port, self.bind_ip, self.signal_q, self.control_q, self.outgoing_q, self.loop, self.context), manage_data(self.data_q, self.send_q, self.data, self.metadata_addr, self.address) ] try: yield From( asyncio.wait(coroutines, return_when=asyncio.FIRST_COMPLETED)) except: import pdb pdb.set_trace() finally: self.close() print("Closing") yield From(asyncio.gather(*coroutines))
def test_task_done(self): q = asyncio.JoinableQueue(loop=self.loop) for i in range(100): q.put_nowait(i) non_local = {'accumulator': 0} # Two workers get items from the queue and call task_done after each. # Join the queue and assert all items have been processed. running = True @asyncio.coroutine def worker(): while running: item = yield From(q.get()) non_local['accumulator'] += item q.task_done() @asyncio.coroutine def test(): tasks = [asyncio.Task(worker(), loop=self.loop) for index in range(2)] yield From(q.join()) raise Return(tasks) tasks = self.loop.run_until_complete(test()) self.assertEqual(sum(range(100)), non_local['accumulator']) # close running generators running = False for i in range(len(tasks)): q.put_nowait(0) self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
def comm(ip, port, bind_ip, signal_q, control_q, outgoing_q, loop=None, context=None): """ Communications coroutine Input Channels: ZMQ router: from outside world signal_q: to break waits on the router outgoing_q: data that needs to be sent out on the router Output Channels: ZMQ router: to the outside world control_q: put messages from outside world here for handling Interacts with: send, control """ loop = loop or asyncio.get_event_loop() context = context or zmq.Context() router = context.socket(zmq.ROUTER) router.bind('tcp://%s:%d' % (bind_ip, port)) dealer = context.socket(zmq.DEALER) dealer.connect('tcp://127.0.0.1:%d' % port) wait_signal = Task(signal_q.get(), loop=loop) while True: wait_router = delay(loop, router.recv_multipart) [first], [other] = yield From(asyncio.wait([wait_router, wait_signal], return_when=asyncio.FIRST_COMPLETED)) if first is wait_signal: # Interrupt socket recv dealer.send(b'break') addr, data = yield From(wait_router) # should be fast assert data == b'break' while not outgoing_q.empty(): # Flow data out addr, msg = outgoing_q.get_nowait() router.send_multipart([addr, msg]) print("Message sent") if first is wait_signal: # Handle internal messages msg = wait_signal.result() if msg == b'close': control_q.put_nowait((None, b'close')) break elif msg == b'interrupt': wait_signal = Task(signal_q.get(), loop=loop) continue elif first is wait_router: # Handle external messages addr, byts = wait_router.result() msg = loads(byts) print("Communication received: %s" % str(msg)) control_q.put_nowait((addr, msg)) router.close(linger=2) dealer.close(linger=2) raise Return("Comm done")
def async_flush(self): """Asynchronously wait for all enqueued heaps to be sent. Note that this only waits for heaps passed to :meth:`async_send_heap` prior to this call, not ones added while waiting.""" future = self._last_queued_future if future is not None: yield From(trollius.wait([future]))
def test_task_done(self): q = asyncio.JoinableQueue(loop=self.loop) for i in range(100): q.put_nowait(i) non_local = {'accumulator': 0} # Two workers get items from the queue and call task_done after each. # Join the queue and assert all items have been processed. running = True @asyncio.coroutine def worker(): while running: item = yield From(q.get()) non_local['accumulator'] += item q.task_done() @asyncio.coroutine def test(): tasks = [ asyncio.Task(worker(), loop=self.loop) for index in range(2) ] yield From(q.join()) raise Return(tasks) tasks = self.loop.run_until_complete(test()) self.assertEqual(sum(range(100)), non_local['accumulator']) # close running generators running = False for i in range(len(tasks)): q.put_nowait(0) self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
def test_queue_overload(self): http = HTTPConnectionPool('httpbin.org', maxsize=3, block=True, timeout=3) testLoop = asyncio.get_event_loop() testLoop.set_debug(True) global test_queue_overload_count test_queue_overload_count = 0 @asyncio.coroutine def get_page(): global test_queue_overload_count try: resp = yield From(http.request('GET', '/delay/1', pool_timeout=3)) pg = yield From(resp.data) self.assertTrue(b'Connection' in pg, pg) except EmptyPoolError: pass except Exception as e: raise else: test_queue_overload_count += 1 pageGetters = [get_page(), get_page(), get_page(), get_page(), get_page()] testLoop.run_until_complete(asyncio.wait(pageGetters, return_when=asyncio.ALL_COMPLETED)) self.assertGreater(test_queue_overload_count, 4, 'not all page_getters ran')
def execute(self, sqs, db, queue_url, table, stats): tasks = [] for key in stats: stats_per_key = stats[key] tasks.append( self._one_key_request(sqs, db, queue_url, table, stats_per_key)) self.loop.run_until_complete(asyncio.wait(tasks))
def messages(self, sqs, queue_url, message_count): queue = sqs.Queue(queue_url) num_of_calls = message_count // batch_count tasks = [] for i in range(num_of_calls): tasks.append(self._one_request(queue)) self.loop.run_until_complete(asyncio.wait(tasks))
def start_write_loops(file_pairs, interval, random_variation, no_loop): tasks = [] if not 0.1 < interval < 100.0: raise RuntimeError('Invalid time interval value: {}'.format(interval)) loop = trollius.get_event_loop() for input_file, output_file in file_pairs: task = trollius.ensure_future(log_write_loop(input_file, output_file, interval, random_variation, no_loop)) tasks.append(task) loop.run_until_complete(trollius.wait(tasks))
def _available_devices(ports=None, baudrate=9600, timeout=None, settling_time_s=0.): ''' Request list of available serial devices, including device identifier (if available). .. note:: Asynchronous co-routine. Parameters ---------- ports : pd.DataFrame, optional Table of ports to query (in format returned by :func:`serial_device.comports`). **Default: all available ports** baudrate : int, optional Baud rate to use for device identifier request. **Default: 9600** timeout : float, optional Maximum number of seconds to wait for a response from each serial device. settling_time_s : float, optional Time to wait before writing device ID request to serial port. Returns ------- pd.DataFrame Specified :data:`ports` table updated with ``baudrate``, ``device_name``, and ``device_version`` columns. .. versionchanged:: 0.48.4 Make ports argument optional. .. versionchanged:: 0.51.2 Add ``settling_time_s`` keyword argument. ''' if ports is None: ports = sd.comports(only_available=True) if not ports.shape[0]: # No ports raise asyncio.Return(ports) futures = [_read_device_id(port=name_i, baudrate=baudrate, settling_time_s=settling_time_s) for name_i in ports.index] done, pending = yield asyncio.From(asyncio.wait(futures, timeout=timeout)) results = [task_i.result() for task_i in done if task_i.result() is not None] if results: df_results = pd.DataFrame(results).set_index('port') df_results = ports.join(df_results) else: df_results = ports raise asyncio.Return(df_results)
def execute(self, sqs, queue_url, ids): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.set_default_executor(self.executor) id_groups = group_by_10(ids) tasks = [] for id_group in id_groups: tasks.append(self._one_request(loop, sqs, queue_url, id_group)) loop.run_until_complete(asyncio.wait(tasks))
def start(options): gait_driver = MechDriver(options) driver_task = Task(gait_driver.run()) input_task = Task(handle_input(gait_driver)) done, pending = yield From( asyncio.wait([driver_task, input_task], return_when=asyncio.FIRST_EXCEPTION)) for x in done: x.result()
def _run_workers(self): '''Run the consumers. Coroutine. ''' self._running = True self._producer_task = trollius.async(self._run_producer_wrapper()) worker_tasks = self._worker_tasks while self._running: while len(worker_tasks) < self.__concurrent: worker_task = trollius.async(self._run_worker()) worker_tasks.add(worker_task) wait_coroutine = trollius.wait( worker_tasks, return_when=trollius.FIRST_COMPLETED) done_tasks = (yield From(wait_coroutine))[0] for task in done_tasks: task.result() worker_tasks.remove(task) _logger.debug('Exited workers loop.') if worker_tasks: _logger.debug('Waiting for workers to stop.') yield From(trollius.wait(worker_tasks)) _logger.debug('Waiting for producer to stop.') if self._item_get_semaphore.locked(): _logger.warning(__( gettext.ngettext( 'Discarding {num} unprocessed item.', 'Discarding {num} unprocessed items.', self._token_queue.qsize() ), num=self._token_queue.qsize() )) self._item_get_semaphore.release() yield From(self._producer_task)
def _cancel_after_first_completed(tasks): while tasks: done, pending = yield asyncio.From(asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED, loop=loop)) for task in done: info(task) assert task in tasks tasks.remove(task) for task in pending: task.cancel()
def main(): t0 = time.time() loop = asyncio.get_event_loop() tasks = [wget1(host) for host in urls] loop.run_until_complete(asyncio.wait(tasks)) loop.close() elapsed = time.time() - t0 msg = '\n{} flags downloaded in {:.2f}s' print(msg.format(len(urls), elapsed))
def start_write_loops(file_pairs, interval, random_variation, no_loop): tasks = [] if not 0.1 < interval < 100.0: raise RuntimeError('Invalid time interval value: {}'.format(interval)) loop = trollius.get_event_loop() for input_file, output_file in file_pairs: task = trollius.ensure_future( log_write_loop(input_file, output_file, interval, random_variation, no_loop)) tasks.append(task) loop.run_until_complete(trollius.wait(tasks))
def onJoin(self, details): print("Session Joined.") #Setting variables self.lastServoValue = 417 #Assumes it starts in the middle self.servoMin = 315 # Min pulse length out of 4096 self.servoMax = 520 # Max pulse length out of 4096 self.servoMiddle = 417 # middle servo value self.servoChannel = 3 print "What is happening????" self.motorMiddle = 1500 self.motorChannel = 2 self.subscribe(self.joyMonitor, 'aero.near.joystream') print "joystream ok" #subscribe to methods to prevent register conflicts self.subscribe(self.honkCommand, 'aero.near.honkHorn') print "honk ok" self.subscribe(self.emergencyStop, 'aero.near.emergStop') print "emergstop ok" self.subscribe(self.manualOverride, 'aero.near.override') print "About to make the loop" self.gps_data = {'latitude': 0,'longitude': 0,'heading': 0,'speed': 0} self.loop = asyncio.get_event_loop() # self.loop.stop() # future = asyncio.Future() # print "the future exists" # asyncio.async(self.gpsUpdate()) # self.loop.run_until_complete(future) # self.loop = asyncio.new_event_loop() # tasks = [ # asyncio.async(self.honk()), # # asyncio.async(self.lidarRead())] # # asyncio.async(self.gpsUpdate())] # # asyncio.async(self.internet_on())] # print tasks # swag.system('cls' if swag.name == 'nt' else 'clear') # try: self.loop = asyncio.get_event_loop() tasks = [ asyncio.async(self.netDisconnect())] print tasks try: done, pending = yield self.loop.run_until_complete(asyncio.wait(tasks)) except Exception as e: print e print tasks #print "running" self.loop.close() # done, pending = yield self.loop.run_until_complete(asyncio.wait(tasks)) # except Exception as e: # print e # print tasks print "running"
def onJoin(self, details): #print("Session Joined.") #res = yield self.call('aero.near.checkStatus') #print("Got result: {}".format(res)) #servos - initiating variables used in servo movement self.lastServoValue = 417 #Assumes it starts in the middle self.lastMotorValue = 307.2 self.pwm = PWM(0x40,debug=True) self.servoMin = 250 # Min pulse length out of 4096 self.servoMax = 470 # Max pulse length out of 4096 self.servoMiddle = 350 # middle servo value self.pwm.setPWMFreq(50) # Set frequency to 60 Hz self.servoChannel = 3 self.pwm.setPWM(self.servoChannel, 0, self.servoMiddle) #have vehicle wheels turn to center #motor - initiating the motor values self.motorMin = 230 self.motorMiddle = 336 self.motorMax = 400 self.motorChannel = 0 self.pwm.setPWM(self.motorChannel, 0, 0) self.maxPWMChange = 20 #For interpolation #big ol' subscribing block self.subscribe(self.joyMonitor, 'aero.near.joystream') self.subscribe(self.honkCommand, 'aero.near.honkHorn') self.subscribe(self.emergencyStop, 'aero.near.emergStop') self.subscribe(self.manualOverride, 'aero.near.override') self.subscribe() self.register(checkStatus, u'aero.near.checkStatus') #declaring the dictionary for gps values self.gps_data = {'latitude': 0,'longitude': 0,'heading': 0,'speed': 0} #clearing the screen #os.system('cls' if os.name == 'nt' else 'clear') #creating and running the loop self.loop = asyncio.get_event_loop() tasks = [ asyncio.async(self.gpsUpdate()), asyncio.async(self.honk()), asyncio.async(self.lidarRead()), asyncio.async(self.netDisconnect())] print tasks try: done, pending = yield self.loop.run_until_complete(asyncio.wait(tasks)) except Exception as e: print e print tasks #print "running" self.loop.close()
def run(self): """start the tourbillon agent""" logger.info('starting tourbillon...') self._loop.add_signal_handler(signal.SIGINT, self.stop) self._loop.add_signal_handler(signal.SIGTERM, self.stop) self._load_tasks() self._aio_run_event.set() self._thr_run_event.set() logger.info('tourbillon started') self._loop.run_until_complete(asyncio.wait(self._tasks)) logger.info('tourbillon shutdown completed')
def _run_workers(self): '''Run the consumers. Coroutine. ''' self._running = True self._producer_task = trollius. async (self._run_producer_wrapper()) worker_tasks = self._worker_tasks while self._running: while len(worker_tasks) < self.__concurrent: worker_task = trollius. async (self._run_worker()) worker_tasks.add(worker_task) wait_coroutine = trollius.wait( worker_tasks, return_when=trollius.FIRST_COMPLETED) done_tasks = (yield From(wait_coroutine))[0] for task in done_tasks: task.result() worker_tasks.remove(task) _logger.debug('Exited workers loop.') if worker_tasks: _logger.debug('Waiting for workers to stop.') yield From(trollius.wait(worker_tasks)) _logger.debug('Waiting for producer to stop.') if self._item_get_semaphore.locked(): _logger.warning( __(gettext.ngettext('Discarding {num} unprocessed item.', 'Discarding {num} unprocessed items.', self._token_queue.qsize()), num=self._token_queue.qsize())) self._item_get_semaphore.release() yield From(self._producer_task)
def test_exception_waiter(self): stream = asyncio.StreamReader(loop=self.loop) @asyncio.coroutine def set_err(): self.loop.call_soon(stream.set_exception, ValueError()) t1 = asyncio.Task(stream.readline(), loop=self.loop) t2 = asyncio.Task(set_err(), loop=self.loop) self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop)) self.assertRaises(ValueError, t1.result)
def get_stock_data(self): threads = [] for index in range(self.request_num): threads.append(self.get_stocks_by_range(index)) log.debug("url:%s len:%s" % (self.sina_stock_api, len(self.stock_list[index]))) try: loop = asyncio.get_event_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(asyncio.wait(threads)) log.debug("get_stock_data_loop") return self.format_response_data()
def run(self): """start the sentinella agent""" logger.info('starting sentinella...') self._loop.add_signal_handler(signal.SIGINT, self.stop) self._loop.add_signal_handler(signal.SIGTERM, self.stop) self._load_tasks() self._aio_run_event.set() self._thr_run_event.set() logger.info('sentinella started') if len(self._tasks) > 0: self._loop.run_until_complete(asyncio.wait(self._tasks)) else: logger.info('sentinella no tasks assigned') logger.info('sentinella shutdown completed')
def run(urls, concurrency, loop): urls = list(urls) pending = [loop.run_in_executor(None, get, url, self.headers) for url in urls[:concurrency]] rest = urls[concurrency:] while pending: done, pending = yield asyncio.From(asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)) while rest and len(pending) < concurrency: pending.add(loop.run_in_executor(None, get, rest.pop(), self.headers)) for future in done: try: si = future.result() # callback(si, si.original_url) except Exception: logger.info("failed on url", exc_info=True)
def process_intents(): try: done, pending = yield From(asyncio.wait(tasks, timeout=10)) for fut in done: response = fut.result() if 'result' in response: ig_response[response['_model_type']][ response['_model']] = response['result'] for task in pending: task.cancel() except Exception, e: logger.error( "[classify_intent_async::process_intents] intent/entity error: %s" % e)
def main(): player = Player() player.play() loop = asyncio.get_event_loop() try: tasks = [ # asyncio.async(stop_player(player, seconds=3)), asyncio. async (player.init()), asyncio. async (run_sensor(player)) ] loop.run_until_complete(asyncio.wait(tasks)) finally: player.stop() loop.close()
def main(): server = None try: loop = trollius.get_event_loop() tasks = [trollius.async(handle_messages())] loop.run_until_complete(trollius.wait(tasks)) loop.run_forever() except KeyboardInterrupt: pass except trollius.ConnectionResetError: pass log.info("End Connection") loop.close() log.info("End")
def main(): player = Player() player.play() loop = asyncio.get_event_loop() try: tasks = [ # asyncio.async(stop_player(player, seconds=3)), asyncio.async(player.init()), asyncio.async(run_sensor(player)) ] loop.run_until_complete(asyncio.wait(tasks)) finally: player.stop() loop.close()
def token_renewer(self): while True: loop = trollius.new_event_loop() trollius.set_event_loop(loop) tasks = list() for symbol in self.websockets.keys(): ws = self.websockets[symbol]["ws"] if ws.open: if (datetime.now() - self.websockets[symbol]["renewed"] ).total_seconds() > 180: tasks.append(self.renew_token(symbol)) if len(tasks) > 0: loop.run_until_complete(trollius.wait(tasks)) loop.close() time.sleep(1)
def main(): server = None try: loop = trollius.get_event_loop() tasks = [ trollius. async (handle_messages()), ] loop.run_until_complete(trollius.wait(tasks)) loop.run_forever() except KeyboardInterrupt: pass except trollius.ConnectionResetError: pass log.info("End Connection") loop.close() log.info("End")
def main(): try: loop = trollius.get_event_loop() tasks = [ trollius. async (handle_messages()), ] loop.run_until_complete(trollius.wait(tasks)) loop.run_forever() except KeyboardInterrupt: pass except trollius.ConnectionResetError as err: log.exception("ERROR: ConnectionResetError in main") except Exception: log.exception("ERROR: Exception in main") finally: log.info("End Connection") loop.close() log.info("End")
def first_connection(self): self.send_notice("*** Looking up your hostname...") self.send_notice("*** Checking Ident") loop = asyncio.get_event_loop() hosttask = loop.run_in_executor(None, socket.gethostbyaddr, self.addr) hostname = yield From(asyncio.wait(hosttask, timeout=5)) print("Hostname was {}".format(hostname)) lport = self.transport.get_extra_info('sockname')[1] rport = self.peer[1] request = "{}, {}".format(rport, lport) reply = IdentHandler().ident_connection(request, self.addr, 113) print("Ident Reply was {}".format(reply)) if reply == None: self.send_notice("*** No Ident response") self.ident = "~{}".format(self.ident) self.prefix = "{}!{}@{}".format(self.nick, self.ident, self.addr) print("{} is now listed as {}".format(self.nick, self.prefix))
def main(): ''' Start the main async loop. ''' try: loop = trollius.get_event_loop() tasks = [ trollius.async(handle_messages()), ] loop.run_until_complete(trollius.wait(tasks)) loop.run_forever() except KeyboardInterrupt: pass except trollius.ConnectionResetError: pass LOG.info("End Connection") loop.close() LOG.info("End")
def test_over_host_max_limit_cycling(self): pool = ConnectionPool(max_host_count=10, max_count=10) @trollius.coroutine def con_fut(): session = yield From( pool.session('localhost', self.get_http_port())) with session as connection: if connection.closed(): yield From(connection.connect()) futs = [con_fut() for dummy in range(20)] yield From(trollius.wait(futs)) self.assertEqual(1, len(pool.host_pools)) connection_pool_entry = list(pool.host_pools.values())[0] self.assertIsInstance(connection_pool_entry, HostPool) self.assertGreaterEqual(10, connection_pool_entry.count())
def get_stock_data(self, retry_count=3, pause=0.01): threads = [] for index in range(self.request_num): threads.append(self.get_stocks_by_range(index)) log.debug("url len:%s" % (len(self.stock_list[index]))) if self.request_num == 0: threads.append(self.get_stocks_by_range(0)) for _ in range(retry_count): time.sleep(pause) try: loop = asyncio.get_event_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(asyncio.wait(threads)) log.debug('get_stock_data_loop') return self.format_response_data() raise IOError(ct.NETWORK_URL_ERROR_MSG)
def test_over_host_max_limit_cycling(self): pool = ConnectionPool(max_host_count=10, max_count=10) @trollius.coroutine def con_fut(): session = yield From( pool.session('localhost', self.get_http_port()) ) with session as connection: if connection.closed(): yield From(connection.connect()) futs = [con_fut() for dummy in range(20)] yield From(trollius.wait(futs)) self.assertEqual(1, len(pool.host_pools)) connection_pool_entry = list(pool.host_pools.values())[0] self.assertIsInstance(connection_pool_entry, HostPool) self.assertGreaterEqual(10, connection_pool_entry.count())
def token_sender(self): while True: self.logger.info(u"开启话唠模式每55秒的定时与服务器聊天") start = datetime.now() tasks = list() loop = trollius.new_event_loop() trollius.set_event_loop(loop) for symbol in self.websockets.keys(): ws = self.websockets[symbol]["ws"] if ws.open: tasks.append( ws.send("*" + self.websockets[symbol]["token"])) if len(tasks) > 0: loop.run_until_complete(trollius.wait(tasks)) loop.close() self.logger.info(u"消息全部发送完毕. 耗时:%s" % (datetime.now() - start).total_seconds()) time.sleep(55)
def to_asyncio_run(urllist, cmd): results = [] # print "asyncio", @asyncio.coroutine def get_loop_cmd(cmd, url_s): loop = asyncio.get_event_loop() result = yield From(loop.run_in_executor(None, cmd, url_s)) results.append(result) threads = [] for url_s in urllist: threads.append(get_loop_cmd(cmd, url_s)) try: loop = asyncio.get_event_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(asyncio.wait(threads)) return results
def run(urls, concurrency, loop): urls = list(urls) pending = [ loop.run_in_executor(None, get, url, self.headers) for url in urls[:concurrency] ] rest = urls[concurrency:] while pending: done, pending = yield asyncio.From( asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)) while rest and len(pending) < concurrency: pending.add( loop.run_in_executor(None, get, rest.pop(), self.headers)) for future in done: try: si = future.result() # callback(si, si.original_url) except Exception: logger.info('failed on url', exc_info=True)
def run(self): kwargs = {} if self.options.serial_port: kwargs['serial_port'] = self.options.serial_port if self.options.model_name: kwargs['model_name'] = self.options.model_name self.servo = yield From(selector.select_servo( self.options.servo, **kwargs)) self.driver = GaitDriver(self.gait, self.servo) idle_task = Task(self._make_idle()) driver_task = Task(self.driver.run()) done, pending = yield From( asyncio.wait([idle_task, driver_task], return_when=asyncio.FIRST_EXCEPTION)) for x in done: x.result()
message = pygazebo.msg.pose_pb2.Pose() message.position.x = U.translate_speed message.orientation.z = rpy_to_quaternion( Rpy(0, 0, (degree_to_rad(U.rotate_speed)))).z while True: yield From(publisher.publish(message)) yield From(trollius.sleep(0.1)) @trollius.coroutine def server_loop(U): U.server_run() if __name__ == '__main__': util = PyTango.Util(sys.argv) util.add_class(PyDevice, PyTrevorGazebo) U = PyTango.Util.instance() U.server_init() tasks = [ trollius.Task(publish_loop(U)), trollius.Task(server_loop(U)), ] loop = trollius.get_event_loop() loop.run_until_complete(trollius.wait(tasks)) loop.close()
logger = logging.getLogger('uweclang') sh = logging.StreamHandler() sh.setFormatter(logging.Formatter( '[%(name)s] %(filename)s:%(lineno)d at %(asctime)s: %(levelname)s %(message)s', '%H:%M:%S' )) logger.addHandler(sh) logger.setLevel(logging.DEBUG) @asyncio.coroutine def factorial(name, number): f = 1 for i in range(2, number+1): print("Task %s: Compute factorial(%s)..." % (name, i)) logger.debug('test message') yield From(asyncio.sleep(1)) f *= i print("Task %s: factorial(%s) = %s" % (name, number, f)) if __name__ == '__main__': loop = asyncio.get_event_loop() tasks = [ asyncio.ensure_future(factorial("A", 2)), asyncio.ensure_future(factorial("B", 3)), asyncio.ensure_future(factorial("C", 4))] loop.run_until_complete(asyncio.wait(tasks)) loop.close() logging.shutdown() print('Done')