async def revert(ctx): """Reverts the bots name and avatar back to its original.""" await bot.edit_profile(username="******") logo = open("mods/utils/images/other/original.jpg","rb") asyncio.sleep(1) await bot.edit_profile(avatar=logo.read()) await bot.say("ViralBot has successfully been reverted to its original!")
def run_worker(context): poller = Poller() liveness = HEARTBEAT_LIVENESS interval = INTERVAL_INIT heartbeat_at = time.time() + HEARTBEAT_INTERVAL worker = yield from worker_socket(context, poller) cycles = 0 while True: socks = yield from poller.poll(HEARTBEAT_INTERVAL * 1000) socks = dict(socks) # Handle worker activity on backend if socks.get(worker) == zmq.POLLIN: # Get message # - 3-part envelope + content -> request # - 1-part HEARTBEAT -> heartbeat frames = yield from worker.recv_multipart() if not frames: break # Interrupted if len(frames) == 3: # Simulate various problems, after a few cycles cycles += 1 if cycles > 3 and randint(0, 5) == 0: print("I: Simulating a crash") break if cycles > 3 and randint(0, 5) == 0: print("I: Simulating CPU overload") yield from asyncio.sleep(3) print("I: Normal reply") yield from worker.send_multipart(frames) liveness = HEARTBEAT_LIVENESS yield from asyncio.sleep(1) # Do some heavy work elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT: print("I: Queue heartbeat") liveness = HEARTBEAT_LIVENESS else: print("E: Invalid message: %s" % frames) interval = INTERVAL_INIT else: liveness -= 1 if liveness == 0: print("W: Heartbeat failure, can't reach queue") print("W: Reconnecting in %0.2fs..." % interval) yield from asyncio.sleep(interval) if interval < INTERVAL_MAX: interval *= 2 poller.unregister(worker) worker.setsockopt(zmq.LINGER, 0) worker.close() worker = yield from worker_socket(context, poller) liveness = HEARTBEAT_LIVENESS if time.time() > heartbeat_at: heartbeat_at = time.time() + HEARTBEAT_INTERVAL print("I: Worker heartbeat") yield from worker.send(PPP_HEARTBEAT)
def start_auto(self, sp_ext, en_ext, sp_bed, en_bed, fname): if self.gcode_file is not None: return False if en_bed: # Heat up bed first self.set_setpoint("bed", sp_bed) while True: t_bed = self.get_temperature("bed") if t_bed > (sp_bed - 10): break yield from asyncio.sleep(5) if en_ext: # Bed near ok, so start hotend self.set_setpoint("ext", sp_ext) while True: t_ext = self.get_temperature("ext") if t_ext > (sp_ext - 10): break yield from asyncio.sleep(5) # Temperatures high enough to start homing self.reset() yield from self.execute_gcode("G28 X0 Y0") yield from self.wait_for_setpoints() yield from self.execute_gcode("G28 Z0") yield from self.print_file(fname)
async def on_message(self, message): if not message.author.id in self.ai_target.keys(): return elif self.ai_target[message.author.id] != message.channel.id: return if message.author == self.bot.user: return if message.content.startswith('.'): return check = await self.bot.funcs.command_check(message, 'off') if check: del self.ai_target[message.author.id] return ask_msg = message.clean_content[:899] ask_msg = re.sub('[^0-9a-zA-Z]+', ' ', ask_msg) ask_msg = ask_msg.replace('`', '') if len(message.mentions) != 0: for s in message.mentions: ask_msg = ask_msg.replace(s.mention, s.name) try: await self.bot.send_typing(message.channel) msg = "**{0}**\n".format(message.author.name)+str(chatbot.get_response(ask_msg)) if len(msg) != 0: await self.bot.send_message(message.channel, msg) except: del self.ai_target[message.author.id] return asyncio.sleep(1)
def get_vms(self, vm_confs): """ :param vm_confs: job.runner.vms """ memory_required = self.config.get("freemb", 1024) for cfg in vm_confs: memory_required += self.config["vms"][cfg["name"]]["memory"] best = None with (yield from self.get_vms_lock): sleep = self.last + 5 - time.time() if sleep > 1: yield from asyncio.sleep(sleep) while True: random.shuffle(self.hosts) LOG.debug("Chosing from %s" % self.hosts) for host in self.hosts: yield from host.update_stats() if host.free >= memory_required and host.la < self.config.get("maxla", 4): LOG.debug("Chosen host: %s" % host) best = host break if best: break LOG.info("All systems are overloaded. Waiting 30 seconds.") yield from asyncio.sleep(30) self.last = time.time() return (yield from host.get_vms(vm_confs))
async def fetch_task_and_analyze(self): # Fetch task, retrying a few times in case of a timeout # Once the task is fetched, set as the definition and get the provisionerId for attempt in range(1, 6): full_task = await fetch_task(self.id) if full_task: self.definition = full_task break else: log.debug('%r definition fetch attempt %s failed, retrying in 10s...', self, attempt) sleep(10) else: raise TaskFetchFailedError() # No need to create the logs if the provisionerId is not supported if self.provisioner_id not in self.LOG_TEMPLATES: return # Download the logs and store them in this object self.logs = [] for run in self.body['status']['runs']: url = self.LOG_TEMPLATES[self.provisioner_id].format(task_id=self.id, run_id=run['runId']) # Retry log grab in case of network instability for attempt in range(1, 6): task_log = await get_log(url, self.provisioner_id) if task_log: self.logs.append((run['runId'], task_log,)) break else: log.debug('Attempt %s in getting %r run %s log failed, retrying in 10s...', attempt, self, run) sleep(10) else: log.warn('Could not retrieve log for %r run %s.', self, run) continue
def print_file(self, fname): if self.gcode_file is not None: return False print("Starting print:", fname) self.gcode_file = AIOFileReader(fname) asyncio.sleep(0) return True
def preheat(self): if not self.temp and not self.btemp: return pids = [] if self.temp: pids.append(("ext", self.temp)) if self.btemp: pids.append(("bed", self.btemp)) for name, sp in pids: self.printer.launch_pid(name, sp) while True: leave = True for name, sp in pids: tmp = self.printer.get_temperature(name) if tmp < (sp - 3.0): leave = False print(name+": temp =", tmp, "sp =", sp, end=' ') print("") self.loop.run_until_complete(asyncio.sleep(1)) if leave: break # Add some delay here to ensure good heat distribution/melting print("Setpoint reached.") for i in range(30): for name, sp in pids: tmp = self.printer.get_temperature(name) print(name+": temp =", tmp, "sp =", sp, end=' ') print("") self.loop.run_until_complete(asyncio.sleep(1))
def test_close_cancel(loop, test_client): @asyncio.coroutine def handler(request): ws = web.WebSocketResponse() yield from ws.prepare(request) yield from ws.receive_bytes() yield from ws.send_str('test') yield from asyncio.sleep(10, loop=loop) app = web.Application() app.router.add_route('GET', '/', handler) client = yield from test_client(app) resp = yield from client.ws_connect('/', autoclose=False) yield from resp.send_bytes(b'ask') text = yield from resp.receive() assert text.data == 'test' t = loop.create_task(resp.close()) yield from asyncio.sleep(0.1, loop=loop) t.cancel() yield from asyncio.sleep(0.1, loop=loop) assert resp.closed assert resp.exception() is None
def test_wait_first_completed(self): def gen(): when = yield self.assertAlmostEqual(10.0, when) when = yield 0 self.assertAlmostEqual(0.1, when) yield 0.1 loop = test_utils.TestLoop(gen) self.addCleanup(loop.close) a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) task = asyncio.Task( asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED, loop=loop), loop=loop) done, pending = loop.run_until_complete(task) self.assertEqual({b}, done) self.assertEqual({a}, pending) self.assertFalse(a.done()) self.assertTrue(b.done()) self.assertIsNone(b.result()) self.assertAlmostEqual(0.1, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_concurrent_complete(self): def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0 self.assertAlmostEqual(0.15, when) when = yield 0 self.assertAlmostEqual(0.1, when) yield 0.1 loop = test_utils.TestLoop(gen) self.addCleanup(loop.close) a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop) done, pending = loop.run_until_complete( asyncio.wait([b, a], timeout=0.1, loop=loop)) self.assertEqual(done, set([a])) self.assertEqual(pending, set([b])) self.assertAlmostEqual(0.1, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait(self): def gen(): when = yield self.assertAlmostEqual(0.1, when) when = yield 0 self.assertAlmostEqual(0.15, when) yield 0.15 loop = test_utils.TestLoop(gen) self.addCleanup(loop.close) a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop) @asyncio.coroutine def foo(): done, pending = yield from asyncio.wait([b, a], loop=loop) self.assertEqual(done, set([a, b])) self.assertEqual(pending, set()) return 42 res = loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertEqual(res, 42) self.assertAlmostEqual(0.15, loop.time()) # Doing it again should take no time and exercise a different path. res = loop.run_until_complete(asyncio.Task(foo(), loop=loop)) self.assertAlmostEqual(0.15, loop.time()) self.assertEqual(res, 42)
def test_wait_with_global_loop(self): def gen(): when = yield self.assertAlmostEqual(0.01, when) when = yield 0 self.assertAlmostEqual(0.015, when) yield 0.015 loop = test_utils.TestLoop(gen) self.addCleanup(loop.close) a = asyncio.Task(asyncio.sleep(0.01, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.015, loop=loop), loop=loop) @asyncio.coroutine def foo(): done, pending = yield from asyncio.wait([b, a]) self.assertEqual(done, set([a, b])) self.assertEqual(pending, set()) return 42 asyncio.set_event_loop(loop) try: res = loop.run_until_complete( asyncio.Task(foo(), loop=loop)) finally: asyncio.set_event_loop(None) self.assertEqual(res, 42)
def exec_test(): yield from asyncio.sleep(2) urls = [mock.get_request(i).url for i in range(0, 7)] self.assertTrue( "/wot/certifiers-of/7Aqw6Efa9EzE7gtsc8SveLLrM7gm6NEGoywSv4FJx6pZ" in urls, msg="Not found in {0}".format(urls), ) self.assertTrue( "/wot/lookup/7Aqw6Efa9EzE7gtsc8SveLLrM7gm6NEGoywSv4FJx6pZ" in urls, msg="Not found in {0}".format(urls) ) self.assertTrue( "/wot/certified-by/7Aqw6Efa9EzE7gtsc8SveLLrM7gm6NEGoywSv4FJx6pZ" in urls, msg="Not found in {0}".format(urls), ) # requests 1 to 3 are for getting certifiers-of and certified-by # on john, + a lookup QTest.keyClicks(identities_tab.edit_textsearch, "doe") QTest.mouseClick(identities_tab.button_search, Qt.LeftButton) yield from asyncio.sleep(2) req = 7 self.assertEqual(mock.get_request(req).method, "GET") self.assertEqual( mock.get_request(req).url, "/blockchain/memberships/FADxcH5LmXGmGFgdixSes6nWnC4Vb4pRUBYT81zQRhjn" ) req += 1 self.assertEqual(identities_tab.table_identities.model().rowCount(), 1) yield from asyncio.sleep(2) self.lp.call_soon(close_dialog)
async def _sendUpdateRequest(self, when, version, upgrade_id, failTimeout): retryLimit = self.retry_limit while retryLimit: try: msg = UpgradeMessage(version=version).toJson() logger.info("Sending message to control tool: {}".format(msg)) await self._open_connection_and_send(msg) break except Exception as ex: logger.warning("Failed to communicate to control tool: {}" .format(ex)) asyncio.sleep(self.retry_timeout) retryLimit -= 1 if not retryLimit: self._upgrade_failed(version=version, scheduled_on=when, upgrade_id=upgrade_id, reason="problems in communication with " "node control service") self._unscheduleUpgrade() self._upgradeFailedCallback() else: logger.info("Waiting {} minutes for upgrade to be performed" .format(failTimeout)) timesUp = partial(self._declareTimeoutExceeded, when, version, upgrade_id) self._schedule(timesUp, self.get_timeout(failTimeout))
def test_connect_spawn_emits_always(self): signal = AdHocSignal() mock = CoroutineMock() @asyncio.coroutine def coro(*args, **kwargs): yield from mock(*args, **kwargs) signal.connect(coro, AdHocSignal.SPAWN_WITH_LOOP(None)) signal.fire("a", 1, b="c") signal.fire("x") self.assertSequenceEqual(mock.mock_calls, []) run_coroutine(asyncio.sleep(0)) run_coroutine(asyncio.sleep(0)) self.assertSequenceEqual( mock.mock_calls, [ unittest.mock.call("a", 1, b="c"), unittest.mock.call("x"), ] )
def test_async_parallel_updates_with_one(hass): """Test parallel updates with 1 (sequential).""" updates = [] test_lock = asyncio.Lock(loop=hass.loop) test_semaphore = asyncio.Semaphore(1, loop=hass.loop) yield from test_lock.acquire() class AsyncEntity(entity.Entity): def __init__(self, entity_id, count): """Initialize Async test entity.""" self.entity_id = entity_id self.hass = hass self._count = count self.parallel_updates = test_semaphore @asyncio.coroutine def async_update(self): """Test update.""" updates.append(self._count) yield from test_lock.acquire() ent_1 = AsyncEntity("sensor.test_1", 1) ent_2 = AsyncEntity("sensor.test_2", 2) ent_3 = AsyncEntity("sensor.test_3", 3) ent_1.async_schedule_update_ha_state(True) ent_2.async_schedule_update_ha_state(True) ent_3.async_schedule_update_ha_state(True) while True: if len(updates) == 1: break yield from asyncio.sleep(0, loop=hass.loop) assert len(updates) == 1 assert updates == [1] test_lock.release() while True: if len(updates) == 2: break yield from asyncio.sleep(0, loop=hass.loop) assert len(updates) == 2 assert updates == [1, 2] test_lock.release() while True: if len(updates) == 3: break yield from asyncio.sleep(0, loop=hass.loop) assert len(updates) == 3 assert updates == [1, 2, 3] test_lock.release()
def parse_content(self, url): yield from asyncio.sleep(3.0) logging.info('Extracting content for: %s', url) #extract page content try: response = urllib.request.urlopen(url) content = response.read() except URLError: print('Error') return yield from asyncio.sleep(5.0) logging.info('Start to parse content for: %s', url) soup = BeautifulSoup(content, 'html.parser') #parse and store content of pages for s in soup(['style', 'script', '[document]', 'had', 'title']): s.extract() logging.info('Storing Content in for: %s', url) asyncio.Task(Page(url, soup.getText()).save()) logging.info('Updated queue with new links: %s', url) asyncio.Task(self._extract_links(soup)) logging.info('Finish to parse content for: %s', url)
def test_consume(self): # declare yield from self.channel.queue_declare("q", exclusive=True, no_wait=False) yield from self.channel.exchange_declare("e", "fanout") yield from self.channel.queue_bind("q", "e", routing_key='') # get a different channel channel = yield from self.create_channel() # publish yield from channel.publish("coucou", "e", routing_key='',) # assert there is a message to consume queues = self.list_queues() self.assertIn("q", queues) self.assertEqual(1, queues["q"]['messages']) yield from asyncio.sleep(2, loop=self.loop) # start consume yield from channel.basic_consume(self.callback, queue_name="q") # required ? yield from asyncio.sleep(2, loop=self.loop) self.assertTrue(self.consume_future.done()) # get one body, envelope, properties = yield from self.get_callback_result() self.assertIsNotNone(envelope.consumer_tag) self.assertIsNotNone(envelope.delivery_tag) self.assertEqual(b"coucou", body) self.assertIsInstance(properties, Properties)
def test_connections_max_idle_ms(self): host, port = self.kafka_host, self.kafka_port conn = yield from create_conn( host, port, loop=self.loop, max_idle_ms=200) self.assertEqual(conn.connected(), True) yield from asyncio.sleep(0.1, loop=self.loop) # Do some work request = MetadataRequest([]) yield from conn.send(request) yield from asyncio.sleep(0.15, loop=self.loop) # Check if we're stil connected after 250ms, as we were not idle self.assertEqual(conn.connected(), True) # It shouldn't break if we have a long running call either readexactly = conn._reader.readexactly with mock.patch.object(conn._reader, 'readexactly') as mocked: @asyncio.coroutine def long_read(n): yield from asyncio.sleep(0.2, loop=self.loop) return (yield from readexactly(n)) mocked.side_effect = long_read yield from conn.send(MetadataRequest([])) self.assertEqual(conn.connected(), True) yield from asyncio.sleep(0.2, loop=self.loop) self.assertEqual(conn.connected(), False)
def test_061_auto_attach_on_reconnect(self): self.frontend.start() usb_list = self.backend.devices['usb'] ass = qubes.devices.DeviceAssignment(self.backend, self.usbdev_ident, persistent=True) try: self.loop.run_until_complete( self.frontend.devices['usb'].attach(ass)) except qubesusbproxy.core3ext.USBProxyNotInstalled as e: self.skipTest(str(e)) remove_usb_gadget(self.backend) # FIXME: usb-export script may update qubesdb with 1sec delay timeout = 5 while self.usbdev_name in (str(dev) for dev in usb_list): self.loop.run_until_complete(asyncio.sleep(1)) timeout -= 1 self.assertGreater(timeout, 0, 'timeout on device remove') recreate_usb_gadget(self.backend) timeout = 5 while self.usbdev_name not in (str(dev) for dev in usb_list): self.loop.run_until_complete(asyncio.sleep(1)) timeout -= 1 self.assertGreater(timeout, 0, 'timeout on device create') self.loop.run_until_complete(asyncio.sleep(1)) self.assertEqual(self.frontend.run('lsusb -d 1234:1234', wait=True), 0, "Device reconnection failed")
async def right_bumper_callback(self, data): if self.socket: # switch is active low if data[1] == 0: msg = json.dumps({"info": "r_bump"}) self.socket.sendMessage(msg.encode('utf8')) asyncio.sleep(.001)
def _lazy_connect(self): """ Open a connection to the SSH server if necessary. """ if self._conn is not None: return while self._conn is None: try: self._conn, _ = yield from asyncssh.create_connection(lambda: self, self._host, **self._login) except TimeoutError: self.log.debug('Timeout! Sleeping for a bit.') asyncio.sleep(60) self._stdin, self._stdout, _ = yield from self._conn.open_session(encoding=None) # If auto-enable is enabled, the prompt will end with #; otherwise it will end with > result = yield from self.collect_until_prompt(b'^.+[#>] $') # TODO: the result=None case is not handled! if result and result[0].endswith('> '): yield from self._enable() self._send_cmd('terminal pager 0') yield from self.collect_until_prompt(PRIV_PROMPT)
def run(self): nr_top_stories = 30 sleep_per_story = 5 * 60 / nr_top_stories while True: url = self.URL_BASE + 'topstories.json' top_stories = yield from self.async_get_json(url) # Go through the full list every 5 mins slept = 0 for item_nr in top_stories[0:nr_top_stories]: item_nr = str(item_nr) # If we have already announced the story, do not even # bother fetching it. count = yield from to_aio(HackerNewsStory.objects.filter(id=item_nr).count()) if count: continue url = self.URL_BASE + 'item/%s.json' % item_nr item = yield from self.async_get_json(url) if item['score'] < 100: yield from asyncio.sleep(sleep_per_story) slept += 1 continue yield from self.announce_new_story(item) story = HackerNewsStory(id=str(item['id']), time=datetime.fromtimestamp(item['time'])) yield from to_aio(story.save()) yield from asyncio.sleep(sleep_per_story) slept += 1 if slept < nr_top_stories: yield from asyncio.sleep((nr_top_stories - slept) * sleep_per_story)
def wait_for_process_tasklet_running(process_name, max_retries=30): """Wait for the tasklet wrapped by process with the given name to reach RUNNNING state Arguments: process_name - instance name of the process wrapping the tasklet being waited on max_retries - Maximum allowed attempts to determine if tasklet is running (one second elapses between attempts) Returns: instance_name of the running tasklet """ tasklet_name = None process_component = None tasklet_component = None retry_attempts = 1 while tasklet_name == None and retry_attempts <= max_retries: if process_component is None: process_component = yield from get_component_info(process_name) if not process_component or not process_component.rwcomponent_children: process_component = None yield from asyncio.sleep(1.0, loop=self._loop) retry_attempts += 1 continue tasklet_name = process_component.rwcomponent_children[0] tasklet_component = yield from get_component_info(tasklet_name) if tasklet_component is None or tasklet_component.state not in ["RUNNING"]: tasklet_name = None yield from asyncio.sleep(1.0, loop=self._loop) retry_attempts += 1 continue return tasklet_name
async def write(self, data): """ This is an asyncio adapted version of pyserial write. It provides a non-blocking write and returns the number of bytes written upon completion :param data: Data to be written :return: Number of bytes written """ # the secret sauce - it is in your future future = asyncio.Future() try: result = self.my_serial.write(bytes([ord(data)])) except serial.SerialException: if self.log_output: logging.exception('Write exception') else: print('Write exception') loop = asyncio.get_event_loop() for t in asyncio.Task.all_tasks(loop): t.cancel() loop.run_until_complete(asyncio.sleep(.1)) loop.stop() loop.close() self.my_serial.close() sys.exit(0) future.set_result(result) while True: if not future.done(): # spin our asyncio wheels until future completes asyncio.sleep(self.sleep_tune) else: return future.result()
def queue_put(): for update in sequence: if type(update) is dict: yield from qu.put(update) yield from asyncio.sleep(1) else: yield from asyncio.sleep(update)
def get(self): client = self.client cluster_identifier = self.cluster_name not_found_count = 0 while True: try: response = yield from client.describe_clusters( ClusterIdentifier=cluster_identifier, ) except botocore_exceptions.ClientError as e: not_found_and_retry = ( e.response['Error']['Code'] == 'ClusterNotFound' and not_found_count < 0 ) if not_found_and_retry: not_found_count += 1 print('cluster not found yet') print(e.response) yield from asyncio.sleep(15) else: raise e else: cluster = response['Clusters'][0] if cluster['ClusterStatus'] == 'creating': print('Cluster is still creating') print(response) yield from asyncio.sleep(5) else: return cluster['Endpoint']
def handle_one_request(self): self.debug("handle_one_request") monitor = ensure_future(self.monitor_remote_connection()) yield from self.read_local_headers() monitor.cancel() yield from asyncio.sleep(0) self.debug("Handling request (%s, %s)", self.request_verb, self.request_uri,) yield from self.process_request() yield from self.ensure_remote_connection() self.begin_send_request() try: yield from self.handle_one_response() except: self._send_request_future.cancel() raise else: if not self._send_request_future.done(): # Last chance to clean up yield from asyncio.sleep(0) if not self._send_request_future.done(): self._send_request_future.cancel() yield from asyncio.sleep(0) raise RuntimeError("Got response before request is sent") self._send_request_future.result() self.reset_request()
async def accel_axis_callback(self, data): datax = str(float("{0:.2f}".format(data[3]))) datay = str(float("{0:.2f}".format(data[4]))) dataz = str(float("{0:.2f}".format(data[5]))) x = data[0] y = data[1] z = data[2] angle_xz = 180 * math.atan2(x, z) / math.pi angle_xz = str(float("{0:.2f}".format(angle_xz))) angle_xy = 180 * math.atan2(x, y) / math.pi angle_xy = str(float("{0:.2f}".format(angle_xy))) angle_yz = 180 * math.atan2(y, z) / math.pi angle_yz = str(float("{0:.2f}".format(angle_yz))) x = str(data[0]) y = str(data[1]) z = str(data[2]) msg = json.dumps({"info": "axis", "xg": datax, "yg": datay, "zg": dataz, "raw_x": x, "raw_y": y, "raw_z": z, "angle_x": angle_xz, "angle_y": angle_xy, "angle_z": angle_yz}) if self.socket: self.socket.sendMessage(msg.encode('utf8')) asyncio.sleep(.001)
async def game_loop(game: Game, speed: float=0.033): while True: await wait([ game.step(), sleep(speed), ])
def update_log_files_timer(self): """Update log files every UPDATE_LOG_FILES_TIMEOUT secods.""" while True: logger.debug("Timer uploading log files.") yield from self.upload_log_files() yield from asyncio.sleep(UPDATE_LOG_FILES_TIMEOUT)
def test_user_stream(self): self.ev_loop.run_until_complete(asyncio.sleep(20.0)) print(self.user_stream_tracker.user_stream)
def stop(): for dataevent in dataevents.values(): dataevent.shutdown() asyncio.wait(asyncio.sleep(1)) loop.stop()
def hello_world(): print("Hello ...") yield from sleep(1) print("... World")
def test_worker(): loop.run_until_complete(asyncio.sleep(0.1)) worker = BatchWorker(print) #loop.run_until_complete(worker.put('abc')) #loop.run_until_complete(worker.put('def')) loop.run_until_complete(worker.stop())
def hello(): print('Hello, wrold!') r = yield from asyncio.sleep(1) print('Hello, again!')
def _poll(self): device = yield from self.i2c_platform.configure_i2c(self.number) # check id self.platform.log.info("Checking ID of device at: %s", self.number) device_id = yield from device.i2c_read8(0x0D) if device_id != 0x1A: raise AssertionError( "Device ID does not match MMA8451. Detected: {}".format( device_id)) # reset self.platform.log.info("Resetting device at: %s", self.number) device.i2c_write8(0x2B, 0x40) yield from asyncio.sleep(.3, loop=self.platform.machine.clock.loop) result = -1 for _ in range(10): result = yield from device.i2c_read8(0x2B) if result == 0: break self.platform.log.warning("Failed to reset: %s at %s", result, self.number) yield from asyncio.sleep(.5, loop=self.platform.machine.clock.loop) else: raise AssertionError( "Failed to reset MMA8451 accelerometer. Result: {}".format( result)) # set resolution to 2g device.i2c_write8(0x2B, 0x02) # set ready device.i2c_write8(0x2D, 0x01) device.i2c_write8(0x2E, 0x01) # turn on orientation device.i2c_write8(0x11, 0x40) # low noise mode, 12,5Hz and activate device.i2c_write8(0x2A, 0x2D) # wait for activate yield from asyncio.sleep(.3, loop=self.platform.machine.clock.loop) self.platform.log.info("Init done for device at: %s", self.number) while True: data = yield from device.i2c_read_block(0x01, 6) x = ((data[0] << 8) | data[1]) >> 2 y = ((data[2] << 8) | data[3]) >> 2 z = ((data[4] << 8) | data[5]) >> 2 max_val = 2**(14 - 1) - 1 signed_max = 2**14 x -= signed_max if x > max_val else 0 y -= signed_max if y > max_val else 0 z -= signed_max if z > max_val else 0 range_divisor = 4096 / 9.80665 x = round((float(x)) / range_divisor, 3) y = round((float(y)) / range_divisor, 3) z = round((float(z)) / range_divisor, 3) self.callback.update_acceleration(x, y, z) yield from asyncio.sleep(.1, loop=self.platform.machine.clock.loop)
def client_connected(self, listener_name, reader: ReaderAdapter, writer: WriterAdapter): # Wait for connection available on listener server = self._servers.get(listener_name, None) if not server: raise BrokerException("Invalid listener name '%s'" % listener_name) yield from server.acquire_connection() remote_address, remote_port = writer.get_peer_info() self.logger.info("Connection from %s:%d on listener '%s'" % (remote_address, remote_port, listener_name)) # Wait for first packet and expect a CONNECT try: handler, client_session = yield from BrokerProtocolHandler.init_from_connect(reader, writer, self.plugins_manager, loop=self._loop) except HBMQTTException as exc: self.logger.warning("[MQTT-3.1.0-1] %s: Can't read first packet an CONNECT: %s" % (format_client_message(address=remote_address, port=remote_port), exc)) #yield from writer.close() self.logger.debug("Connection closed") return except MQTTException as me: self.logger.error('Invalid connection from %s : %s' % (format_client_message(address=remote_address, port=remote_port), me)) yield from writer.close() self.logger.debug("Connection closed") return if client_session.clean_session: # Delete existing session and create a new one if client_session.client_id is not None and client_session.client_id != "": self.delete_session(client_session.client_id) else: client_session.client_id = gen_client_id() client_session.parent = 0 else: # Get session from cache if client_session.client_id in self._sessions: self.logger.debug("Found old session %s" % repr(self._sessions[client_session.client_id])) (client_session, h) = self._sessions[client_session.client_id] client_session.parent = 1 else: client_session.parent = 0 if client_session.keep_alive > 0: client_session.keep_alive += self.config['timeout-disconnect-delay'] self.logger.debug("Keep-alive timeout=%d" % client_session.keep_alive) handler.attach(client_session, reader, writer) self._sessions[client_session.client_id] = (client_session, handler) authenticated = yield from self.authenticate(client_session, self.listeners_config[listener_name]) if not authenticated: yield from writer.close() server.release_connection() # Delete client from connections list return while True: try: client_session.transitions.connect() break except (MachineError, ValueError): # Backwards compat: MachineError is raised by transitions < 0.5.0. self.logger.warning("Client %s is reconnecting too quickly, make it wait" % client_session.client_id) # Wait a bit may be client is reconnecting too fast yield from asyncio.sleep(1, loop=self._loop) yield from handler.mqtt_connack_authorize(authenticated) yield from self.plugins_manager.fire_event(EVENT_BROKER_CLIENT_CONNECTED, client_id=client_session.client_id) self.logger.debug("%s Start messages handling" % client_session.client_id) yield from handler.start() self.logger.debug("Retained messages queue size: %d" % client_session.retained_messages.qsize()) yield from self.publish_session_retained_messages(client_session) # Init and start loop for handling client messages (publish, subscribe/unsubscribe, disconnect) disconnect_waiter = ensure_future(handler.wait_disconnect(), loop=self._loop) subscribe_waiter = ensure_future(handler.get_next_pending_subscription(), loop=self._loop) unsubscribe_waiter = ensure_future(handler.get_next_pending_unsubscription(), loop=self._loop) wait_deliver = ensure_future(handler.mqtt_deliver_next_message(), loop=self._loop) connected = True while connected: try: done, pending = yield from asyncio.wait( [disconnect_waiter, subscribe_waiter, unsubscribe_waiter, wait_deliver], return_when=asyncio.FIRST_COMPLETED, loop=self._loop) if disconnect_waiter in done: result = disconnect_waiter.result() self.logger.debug("%s Result from wait_diconnect: %s" % (client_session.client_id, result)) if result is None: self.logger.debug("Will flag: %s" % client_session.will_flag) # Connection closed anormally, send will message if client_session.will_flag: self.logger.debug("Client %s disconnected abnormally, sending will message" % format_client_message(client_session)) yield from self._broadcast_message( client_session, client_session.will_topic, client_session.will_message, client_session.will_qos) if client_session.will_retain: self.retain_message(client_session, client_session.will_topic, client_session.will_message, client_session.will_qos) self.logger.debug("%s Disconnecting session" % client_session.client_id) yield from self._stop_handler(handler) client_session.transitions.disconnect() yield from self.plugins_manager.fire_event(EVENT_BROKER_CLIENT_DISCONNECTED, client_id=client_session.client_id) connected = False if unsubscribe_waiter in done: self.logger.debug("%s handling unsubscription" % client_session.client_id) unsubscription = unsubscribe_waiter.result() for topic in unsubscription['topics']: self._del_subscription(topic, client_session) yield from self.plugins_manager.fire_event( EVENT_BROKER_CLIENT_UNSUBSCRIBED, client_id=client_session.client_id, topic=topic) yield from handler.mqtt_acknowledge_unsubscription(unsubscription['packet_id']) unsubscribe_waiter = asyncio.Task(handler.get_next_pending_unsubscription(), loop=self._loop) if subscribe_waiter in done: self.logger.debug("%s handling subscription" % client_session.client_id) subscriptions = subscribe_waiter.result() return_codes = [] for subscription in subscriptions['topics']: result = yield from self.add_subscription(subscription, client_session) return_codes.append(result) yield from handler.mqtt_acknowledge_subscription(subscriptions['packet_id'], return_codes) for index, subscription in enumerate(subscriptions['topics']): if return_codes[index] != 0x80: yield from self.plugins_manager.fire_event( EVENT_BROKER_CLIENT_SUBSCRIBED, client_id=client_session.client_id, topic=subscription[0], qos=subscription[1]) yield from self.publish_retained_messages_for_subscription(subscription, client_session) subscribe_waiter = asyncio.Task(handler.get_next_pending_subscription(), loop=self._loop) self.logger.debug(repr(self._subscriptions)) if wait_deliver in done: if self.logger.isEnabledFor(logging.DEBUG): self.logger.debug("%s handling message delivery" % client_session.client_id) app_message = wait_deliver.result() if not app_message.topic: self.logger.warning("[MQTT-4.7.3-1] - %s invalid TOPIC sent in PUBLISH message, closing connection" % client_session.client_id) break if "#" in app_message.topic or "+" in app_message.topic: self.logger.warning("[MQTT-3.3.2-2] - %s invalid TOPIC sent in PUBLISH message, closing connection" % client_session.client_id) break yield from self.plugins_manager.fire_event(EVENT_BROKER_MESSAGE_RECEIVED, client_id=client_session.client_id, message=app_message) yield from self._broadcast_message(client_session, app_message.topic, app_message.data) if app_message.publish_packet.retain_flag: self.retain_message(client_session, app_message.topic, app_message.data, app_message.qos) wait_deliver = asyncio.Task(handler.mqtt_deliver_next_message(), loop=self._loop) except asyncio.CancelledError: self.logger.debug("Client loop cancelled") break disconnect_waiter.cancel() subscribe_waiter.cancel() unsubscribe_waiter.cancel() wait_deliver.cancel() self.logger.debug("%s Client disconnected" % client_session.client_id) server.release_connection()
async def manifest(cmd): user_data = EwUser(member=cmd.message.author) response = "" if user_data.life_state == ewcfg.life_state_corpse and user_data.busted: if user_data.poi == ewcfg.poi_id_thesewers: return await ewutils.send_message( cmd.client, cmd.message.channel, ewutils.formatMessage( cmd.message.author, "You're busted, bitch. You can't leave the sewers until your restore your power by !haunting one of the living." )) else: # sometimes busted ghosts get stuck outside the sewers user_data.poi = ewcfg.poi_id_thesewers user_data.persist() await ewrolemgr.updateRoles(cmd.client, cmd.message.author) return if user_data.life_state != ewcfg.life_state_corpse: response = "You don't even know what that MEANS." return await ewutils.send_message( cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response)) if user_data.poi != ewcfg.poi_id_thesewers: response = "You've already manifested in the city." return await ewutils.send_message( cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response)) if user_data.slimes > ewcfg.slimes_tomanifest: response = "You are too weak to manifest. You need to gather more negative slime." return await ewutils.send_message( cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response)) poi = ewcfg.id_to_poi.get(user_data.poi_death) response = "{}ing in {}.".format(cmd.tokens[0][1:].capitalize(), poi.str_name) # schedule tasks for concurrent execution message_task = asyncio.ensure_future( ewutils.send_message( cmd.client, cmd.message.channel, ewutils.formatMessage(cmd.message.author, response))) wait_task = asyncio.ensure_future(asyncio.sleep(5)) # Take control of the move for this player. ewmap.move_counter += 1 move_current = ewutils.moves_active[ cmd.message.author.id] = ewmap.move_counter await message_task await wait_task # check if the user entered another movement command while waiting for the current one to be completed if move_current != ewutils.moves_active[cmd.message.author.id]: return user_data = EwUser(member=cmd.message.author) user_data.poi = poi.id_poi user_data.persist() await ewrolemgr.updateRoles(cmd.client, cmd.message.author)
def analyse_moves(): should_black = conf.get("shouldBlack", True) should_white = conf.get("shouldWhite", True) from_current = conf.get("fromCurrent", True) start_ply = gmwidg.board.view.shown if from_current else 0 move_time = int(conf.get("max_analysis_spin", 3)) threshold = int(conf.get("variation_threshold_spin", 50)) for board in gamemodel.boards[start_ply:]: if self.stop_event.is_set(): break gmwidg.board.view.setShownBoard(board) analyzer.setBoard(board) if threat_PV: inv_analyzer.setBoard(board) yield from asyncio.sleep(move_time + 0.1) ply = board.ply color = (ply - 1) % 2 if ply - 1 in gamemodel.scores and ply in gamemodel.scores and ( (color == BLACK and should_black) or (color == WHITE and should_white)): oldmoves, oldscore, olddepth = gamemodel.scores[ply - 1] oldscore = oldscore * -1 if color == BLACK else oldscore score_str = prettyPrintScore(oldscore, olddepth) moves, score, depth = gamemodel.scores[ply] score = score * -1 if color == WHITE else score diff = score - oldscore if ((diff > threshold and color == BLACK) or (diff < -1 * threshold and color == WHITE)) and ( gamemodel.moves[ply - 1] != parseAny( gamemodel.boards[ply - 1], oldmoves[0])): if threat_PV: try: if ply - 1 in gamemodel.spy_scores: oldmoves0, oldscore0, olddepth0 = gamemodel.spy_scores[ ply - 1] score_str0 = prettyPrintScore( oldscore0, olddepth0) pv0 = listToMoves( gamemodel.boards[ply - 1], ["--"] + oldmoves0, validate=True) if len(pv0) > 2: gamemodel.add_variation( gamemodel.boards[ply - 1], pv0, comment="Threatening", score=score_str0, emit=False) except ParsingError as e: # ParsingErrors may happen when parsing "old" lines from # analyzing engines, which haven't yet noticed their new tasks log.debug( "__parseLine: Ignored (%s) from analyzer: ParsingError%s" % (' '.join(oldmoves), e)) try: pv = listToMoves(gamemodel.boards[ply - 1], oldmoves, validate=True) gamemodel.add_variation(gamemodel.boards[ply - 1], pv, comment="Better is", score=score_str, emit=False) except ParsingError as e: # ParsingErrors may happen when parsing "old" lines from # analyzing engines, which haven't yet noticed their new tasks log.debug( "__parseLine: Ignored (%s) from analyzer: ParsingError%s" % (' '.join(oldmoves), e)) self.widgets["analyze_game"].hide() self.widgets["analyze_ok_button"].set_sensitive(True) conf.set("analyzer_check", old_check_value) if threat_PV: conf.set("inv_analyzer_check", old_inv_check_value) message.dismiss() gamemodel.emit("analysis_finished")
async def on_startup(app): nonlocal task loop = asyncio.get_event_loop() task = loop.create_task(asyncio.sleep(1000))
def test_create_datagram_endpoint_sock_unix_domain(self): class Proto(asyncio.DatagramProtocol): done = None def __init__(self, loop): self.state = 'INITIAL' self.addrs = set() self.done = asyncio.Future(loop=loop) self.data = b'' def connection_made(self, transport): self.transport = transport assert self.state == 'INITIAL', self.state self.state = 'INITIALIZED' def datagram_received(self, data, addr): assert self.state == 'INITIALIZED', self.state self.addrs.add(addr) self.data += data if self.data == b'STOP' and not self.done.done(): self.done.set_result(True) def error_received(self, exc): assert self.state == 'INITIALIZED', self.state if not self.done.done(): self.done.set_exception(exc or RuntimeError()) def connection_lost(self, exc): assert self.state == 'INITIALIZED', self.state self.state = 'CLOSED' if self.done and not self.done.done(): self.done.set_result(None) tmp_file = os.path.join(tempfile.gettempdir(), str(uuid.uuid4())) sock = socket.socket(socket.AF_UNIX, type=socket.SOCK_DGRAM) sock.bind(tmp_file) with sock: pr = Proto(loop=self.loop) f = self.loop.create_datagram_endpoint(lambda: pr, sock=sock) tr, pr_prime = self.loop.run_until_complete(f) self.assertIs(pr, pr_prime) tmp_file2 = os.path.join(tempfile.gettempdir(), str(uuid.uuid4())) sock2 = socket.socket(socket.AF_UNIX, type=socket.SOCK_DGRAM) sock2.bind(tmp_file2) with sock2: f2 = self.loop.create_datagram_endpoint( asyncio.DatagramProtocol, sock=sock2) tr2, pr2 = self.loop.run_until_complete(f2) tr2.sendto(b'STOP', tmp_file) self.loop.run_until_complete(pr.done) tr.close() tr2.close() # Let transports close self.loop.run_until_complete(asyncio.sleep(0.2)) self.assertIn(tmp_file2, pr.addrs)
def run_loop_briefly(self, *, delay=0.01): self.loop.run_until_complete(asyncio.sleep(delay))
m = Message(db_url, port, message) m.save_message() def save_chat(chat_name: str): """ Salva o nome de todos os chats :param str_chat: Lista contendo o nome de todos os chats. :type str_chat: list """ db_url = config('DATABASE_URL') port = config('DATABASE_PORT', cast=int) c = Chat(db_url, port, chat_name) chat_obj = c.find_chat() if chat_obj is False: c.update_chat() if __name__ == '__main__': driver = WhatsAPIDriver(loadstyles=True) connect = connect_bot() while connect is True: sleep(3) unread = unread_msg(driver) if unread is not None: message = get_data_message(unread) if message is not None: save_message(message) continue continue
def newsProducer(myQueue): while True: yield from myQueue.put(random.randint(1,5)) yield from asyncio.sleep(1)
def __await__(self): yield from (sleep(0.1).__await__())
async def task1(self): while True: asyncio.sleep(1) print({'data': 'hello repeat'}) await self.emit('my_message', {'data': 'hello repeat'})
def test_orders_saving_and_restoration(self): config_path = "test_config" strategy_name = "test_strategy" sql = SQLConnectionManager(SQLConnectionType.TRADE_FILLS, db_path=self.db_path) order_id = None recorder = MarketsRecorder(sql, [self.connector], config_path, strategy_name) recorder.start() try: self.connector._in_flight_orders.clear() self.assertEqual(0, len(self.connector.tracking_states)) # Try to put limit buy order for 0.02 ETH worth of ZRX, and watch for order creation event. current_bid_price: Decimal = self.connector.get_price( self.trading_pair, True) price: Decimal = current_bid_price * Decimal("0.8") price = self.connector.quantize_order_price( self.trading_pair, price) amount: Decimal = Decimal("0.0001") amount = self.connector.quantize_order_amount( self.trading_pair, amount) cl_order_id = self._place_order(True, amount, OrderType.LIMIT_MAKER, price, 1, fixture.UNFILLED_ORDER) order_created_event = self.ev_loop.run_until_complete( self.event_logger.wait_for(BuyOrderCreatedEvent)) self.assertEqual(cl_order_id, order_created_event.order_id) # Verify tracking states self.assertEqual(1, len(self.connector.tracking_states)) self.assertEqual(cl_order_id, list(self.connector.tracking_states.keys())[0]) # Verify orders from recorder recorded_orders: List[ Order] = recorder.get_orders_for_config_and_market( config_path, self.connector) self.assertEqual(1, len(recorded_orders)) self.assertEqual(cl_order_id, recorded_orders[0].id) # Verify saved market states saved_market_states: MarketState = recorder.get_market_states( config_path, self.connector) self.assertIsNotNone(saved_market_states) self.assertIsInstance(saved_market_states.saved_state, dict) self.assertGreater(len(saved_market_states.saved_state), 0) # Close out the current market and start another market. self.connector.stop(self._clock) self.ev_loop.run_until_complete(asyncio.sleep(5)) self.clock.remove_iterator(self.connector) for event_tag in self.events: self.connector.remove_listener(event_tag, self.event_logger) new_connector = WazirxExchange(API_KEY, API_SECRET, [self.trading_pair], True) for event_tag in self.events: new_connector.add_listener(event_tag, self.event_logger) recorder.stop() recorder = MarketsRecorder(sql, [new_connector], config_path, strategy_name) recorder.start() saved_market_states = recorder.get_market_states( config_path, new_connector) self.clock.add_iterator(new_connector) self.assertEqual(0, len(new_connector.limit_orders)) self.assertEqual(0, len(new_connector.tracking_states)) new_connector.restore_tracking_states( saved_market_states.saved_state) self.assertEqual(1, len(new_connector.limit_orders)) self.assertEqual(1, len(new_connector.tracking_states)) # Cancel the order and verify that the change is saved. self._cancel_order(cl_order_id) self.ev_loop.run_until_complete( self.event_logger.wait_for(OrderCancelledEvent)) order_id = None self.assertEqual(0, len(new_connector.limit_orders)) self.assertEqual(0, len(new_connector.tracking_states)) saved_market_states = recorder.get_market_states( config_path, new_connector) self.assertEqual(0, len(saved_market_states.saved_state)) finally: if order_id is not None: self.connector.cancel(self.trading_pair, cl_order_id) self.run_parallel( self.event_logger.wait_for(OrderCancelledEvent)) recorder.stop() os.unlink(self.db_path)
def _run_inner(self, obtain): errors = 0 errors_max = 5 failed_initialization = set() try_reuse_discovery = False while True: if try_reuse_discovery: try_reuse_discovery = False else: self._set_state('discovering') for i in range(4): if i: self.log.info("Waiting to retry RD discovery") yield from asyncio.sleep( 2 * 3**(i - 1)) # arbitrary fall-off yield from self._fill_directory_resource( blacklist=failed_initialization) break else: self.log.error("Giving up RD discovery") break link_data = yield from obtain self._set_state("registering") try: yield from self._register() except self._RetryableError as e: errors += 1 if errors < errors_max: self.log.warning( "Initial registration failed (%s), blacklisting RD URI and retrying discovery", e) failed_initialization.add(self._directory_resource) self._directory_resource = None continue else: self.log.error( "Giving up after too many failed initial registrations" ) break # registration is active, keep it that way. # things look good enough to forget about past bad experiences. # could move this to the end of the following loop if worries come # up of having picked a bad RD that supports registration but not # registration updates errors = 0 failed_initialization = set() try_reuse_discovery = True while True: self._set_state("registered") # renew 60 seconds before timeout, unless that's before the 75% mark (then wait for that) yield from asyncio.sleep(self._lt - 60 if self._lt > 240 else self._lt * 3 // 4) self._set_state("renewing") try: yield from self._renew_registration() except self._RetryableError as e: self.log.warning( "Registration update failed (%s), retrying with new registration", e) break
async def wait(): asyncio.sleep(5) print("等我 5 秒钟")
def test_filled_orders_recorded(self): config_path: str = "test_config" strategy_name: str = "test_strategy" sql = SQLConnectionManager(SQLConnectionType.TRADE_FILLS, db_path=self.db_path) order_id = None recorder = MarketsRecorder(sql, [self.connector], config_path, strategy_name) recorder.start() try: # Try to buy some token from the exchange, and watch for completion event. price = self.connector.get_price(self.trading_pair, True) * Decimal("1.05") price = self.connector.quantize_order_price( self.trading_pair, price) amount = self.connector.quantize_order_amount( self.trading_pair, Decimal("15")) order_id = self._place_order(True, amount, OrderType.LIMIT, price, 1, None, fixture.WS_TRADE) self.ev_loop.run_until_complete( self.event_logger.wait_for(BuyOrderCompletedEvent)) self.ev_loop.run_until_complete(asyncio.sleep(1)) # Reset the logs self.event_logger.clear() # Try to sell back the same amount to the exchange, and watch for completion event. price = self.connector.get_price(self.trading_pair, True) * Decimal("0.95") price = self.connector.quantize_order_price( self.trading_pair, price) amount = self.connector.quantize_order_amount( self.trading_pair, Decimal("15")) order_id = self._place_order(False, amount, OrderType.LIMIT, price, 2, None, fixture.WS_TRADE) self.ev_loop.run_until_complete( self.event_logger.wait_for(SellOrderCompletedEvent)) # Query the persisted trade logs trade_fills: List[TradeFill] = recorder.get_trades_for_config( config_path) self.assertGreaterEqual(len(trade_fills), 2) buy_fills: List[TradeFill] = [ t for t in trade_fills if t.trade_type == "BUY" ] sell_fills: List[TradeFill] = [ t for t in trade_fills if t.trade_type == "SELL" ] self.assertGreaterEqual(len(buy_fills), 1) self.assertGreaterEqual(len(sell_fills), 1) order_id = None finally: if order_id is not None: self.connector.cancel(self.trading_pair, order_id) self.run_parallel( self.event_logger.wait_for(OrderCancelledEvent)) recorder.stop() os.unlink(self.db_path)
async def test_run_when_enter(self): # normal function, simple test var = False @run_when_enter(self, 'awaken') def enter_test(): nonlocal var var = True enter_test() self.assertIs(var, True) var = False # await asyncio.create_task(AsyncGear.set_obj_period(self, 'awaken')) await asyncio.create_task(Gear(self).set_period('awaken')) # self.assertEqual('awaken', AsyncGear.get_obj_present_period(self)) self.assertEqual('awaken', Gear(self).get_present_period()) self.assertIs(var, True) var = False # await asyncio.create_task(AsyncGear.set_obj_period(self, 'sleep')) await asyncio.create_task(Gear(self).set_period('sleep')) # test 3 situations for coroutine functions var1 = False @run_when_enter(self, 'awaken', 'abandon') async def enter_test(): await asyncio.create_task(asyncio.sleep(0.1)) nonlocal var1 if var1: var1 = False else: var1 = True await asyncio.create_task(enter_test()) self.assertIs(var1, True) var1 = False ## abandon # await asyncio.create_task(AsyncGear.set_obj_period(self, 'awaken')) await asyncio.create_task(Gear(self).set_period('awaken')) # await asyncio.create_task(AsyncGear.set_obj_period(self, 'sleep')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(asyncio.sleep(0.15)) self.assertIs(var1, True) var1 = False # await asyncio.create_task(AsyncGear.set_obj_period(self, 'awaken')) await asyncio.create_task(Gear(self).set_period('awaken')) # await asyncio.create_task(AsyncGear.set_obj_period(self, 'sleep')) await asyncio.create_task(Gear(self).set_period('sleep')) # await asyncio.create_task(AsyncGear.set_obj_period(self, )) await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(asyncio.sleep(0.15)) self.assertIs(var1, True) var1 = False ## non_block var2 = False @run_when_enter(self, 'awaken', 'non_block') async def enter_test(): await asyncio.create_task(asyncio.sleep(0.1)) nonlocal var2 if var2: var2 = False else: var2 = True await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(asyncio.sleep(0.15)) self.assertIs(var2, True) var2 = False await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(asyncio.sleep(0.15)) self.assertIs(var2, False) var2 = False await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(asyncio.sleep(0.15)) self.assertIs(var2, True) var2 = False ## queue var3 = False fs = [] @run_when_enter(self, 'awaken', 'queue') async def enter_test(): f = asyncio.get_running_loop().create_future() fs.append(f) await asyncio.create_task(asyncio.sleep(0.1)) nonlocal var3 if var3: var3 = False else: var3 = True f.set_result(None) fs.remove(f) time = asyncio.get_running_loop().time() await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(asyncio.sleep(0.15)) [await asyncio.ensure_future(f) for f in fs] self.assertEqual(0.1 * 2, round(asyncio.get_running_loop().time() - time, 1)) self.assertEqual(var3, False) time = asyncio.get_running_loop().time() var3 = False await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(Gear(self).set_period('awaken')) await asyncio.create_task(Gear(self).set_period('sleep')) await asyncio.create_task(asyncio.sleep(0.25)) [await asyncio.ensure_future(f) for f in fs] self.assertEqual(round(0.1 * 3, 1), round(asyncio.get_running_loop().time() - time, 1)) self.assertEqual(var3, True) time = asyncio.get_running_loop().time() var3 = False
def test_buy_and_sell(self): price = self.connector.get_price(self.trading_pair, True) * Decimal("1.05") price = self.connector.quantize_order_price(self.trading_pair, price) amount = self.connector.quantize_order_amount(self.trading_pair, Decimal("15")) quote_bal = self.connector.get_available_balance(self.quote_token) base_bal = self.connector.get_available_balance(self.base_token) order_id = self._place_order(True, amount, OrderType.LIMIT, price, 1, None, fixture.WS_TRADE) order_completed_event = self.ev_loop.run_until_complete( self.event_logger.wait_for(BuyOrderCompletedEvent)) self.ev_loop.run_until_complete(asyncio.sleep(2)) trade_events = [ t for t in self.event_logger.event_log if isinstance(t, OrderFilledEvent) ] base_amount_traded = sum(t.amount for t in trade_events) quote_amount_traded = sum(t.amount * t.price for t in trade_events) self.assertTrue( [evt.order_type == OrderType.LIMIT for evt in trade_events]) self.assertEqual(order_id, order_completed_event.order_id) self.assertEqual(amount, order_completed_event.base_asset_amount) self.assertEqual("BTC", order_completed_event.base_asset) self.assertEqual("INR", order_completed_event.quote_asset) self.assertAlmostEqual(base_amount_traded, order_completed_event.base_asset_amount) self.assertAlmostEqual(quote_amount_traded, order_completed_event.quote_asset_amount) self.assertTrue( any([ isinstance(event, BuyOrderCreatedEvent) and event.order_id == order_id for event in self.event_logger.event_log ])) # check available quote balance gets updated, we need to wait a bit for the balance message to arrive expected_quote_bal = quote_bal - quote_amount_traded self._mock_ws_bal_update(self.quote_token, expected_quote_bal) self.ev_loop.run_until_complete(asyncio.sleep(1)) self.assertAlmostEqual( expected_quote_bal, self.connector.get_available_balance(self.quote_token)) # Reset the logs self.event_logger.clear() # Try to sell back the same amount to the exchange, and watch for completion event. price = self.connector.get_price(self.trading_pair, True) * Decimal("0.95") price = self.connector.quantize_order_price(self.trading_pair, price) amount = self.connector.quantize_order_amount(self.trading_pair, Decimal("15")) order_id = self._place_order(False, amount, OrderType.LIMIT, price, 2, None, fixture.WS_TRADE) order_completed_event = self.ev_loop.run_until_complete( self.event_logger.wait_for(SellOrderCompletedEvent)) trade_events = [ t for t in self.event_logger.event_log if isinstance(t, OrderFilledEvent) ] base_amount_traded = sum(t.amount for t in trade_events) quote_amount_traded = sum(t.amount * t.price for t in trade_events) self.assertTrue( [evt.order_type == OrderType.LIMIT for evt in trade_events]) self.assertEqual(order_id, order_completed_event.order_id) self.assertEqual(amount, order_completed_event.base_asset_amount) self.assertEqual("BTC", order_completed_event.base_asset) self.assertEqual("INR", order_completed_event.quote_asset) self.assertAlmostEqual(base_amount_traded, order_completed_event.base_asset_amount) self.assertAlmostEqual(quote_amount_traded, order_completed_event.quote_asset_amount) self.assertGreater(order_completed_event.fee_amount, Decimal(0)) self.assertTrue( any([ isinstance(event, SellOrderCreatedEvent) and event.order_id == order_id for event in self.event_logger.event_log ])) # check available base balance gets updated, we need to wait a bit for the balance message to arrive expected_base_bal = base_bal self._mock_ws_bal_update(self.base_token, expected_base_bal) self.ev_loop.run_until_complete(asyncio.sleep(1)) self.assertAlmostEqual( expected_base_bal, self.connector.get_available_balance(self.base_token), 5)
def mycoro(): assert loop.is_running() yield from asyncio.sleep(0) loop.stop() assert not loop.is_running()
def delete(): yield from asyncio.sleep(delete_after, loop=self.loop) yield from self.delete_message(msg)
def my_sleep_func(): print('hoge') yield from asyncio.sleep(random.randint(0, 5))
async def outside_test(): await asyncio.create_task(asyncio.sleep(0.1)) nonlocal var1 var1 += 1
def slow_function(): yield from asyncio.sleep(3) #使用yield from 继续事件循环 return 42
def test_loop_callback_exceptions_bubble_up(loop): """Verify that test exceptions raised in event loop callbacks bubble up.""" def raise_test_exception(): raise ExceptionTester("Test Message") loop.call_soon(raise_test_exception) loop.run_until_complete(asyncio.sleep(.1))