def receive_log(): try: transport, protocol = yield from aioamqp.connect('localhost', 5672) except aioamqp.AmqpClosedConnection: print("closed connections") return channel = yield from protocol.channel() exchange_name = 'direct_logs' # TODO let rabbitmq choose the queue name queue_name = 'queue-%s' % random.randint(0, 10000) yield from channel.exchange(exchange_name, 'direct') yield from asyncio.wait_for(channel.queue(queue_name, durable=False, auto_delete=True), timeout=10) severities = sys.argv[1:] if not severities: print("Usage: %s [info] [warning] [error]" % (sys.argv[0],)) sys.exit(1) for severity in severities: yield from asyncio.wait_for(channel.queue_bind(exchange_name='direct_logs', queue_name=queue_name, routing_key=severity), timeout=10) print(' [*] Waiting for logs. To exit press CTRL+C') yield from asyncio.wait_for(channel.basic_consume(callback, queue_name=queue_name), timeout=10) yield from asyncio.Event().wait()
def http_request_demo(): listener = BlenderListener(event_type="SPACE", catch=True) title("HTTP Requests") prompt("Retrieving blendernation's newsfeed ...") request_coro = aiohttp.request('get', 'http://feeds.feedburner.com/Blendernation') try: request = yield from wait_for(request_coro, 3) except asyncio.TimeoutError: prompt("Sorry, server couldn't be reached.") yield from sleep(3) return feed_title_text = bpy.data.objects['feed_title'] feed_text = bpy.data.objects['feed_text'] feed_title_text.hide = False feed_text.hide = False prompt("Reading Feed...") text = yield from wait_for(request.text(), 10) feed = feedparser.parse(text) feed_title_text.data.body = feed['feed']['title'] feed_content = "" for entry in feed['entries'][:10]: feed_content += entry['title'] + "\n" feed_text.data.body = feed_content prompt("Press Space to continue...") yield from listener.wait() listener.remove() feed_title_text.hide = True feed_text.hide = True
def test_watch(loop, self): """ INTEGRATION: Receive a watch event from other process """ client = aio_etcd.Client(port=6001, allow_reconnect=True, loop=loop) set_result = yield from client.set('/test-key', 'test-value') queue = asyncio.Queue(loop=loop) @asyncio.coroutine def change_value(key, newValue): c = aio_etcd.Client(port=6001, loop=loop) yield from c.set(key, newValue) @asyncio.coroutine def watch_value(key, queue): c = aio_etcd.Client(port=6001, loop=loop) w = yield from c.watch(key) yield from queue.put(w.value) watcher = asyncio.async(watch_value('/test-key', queue), loop=loop) yield from asyncio.sleep(0.1, loop=loop) changer = asyncio.async(change_value('/test-key', 'new-test-value'), loop=loop) value = yield from asyncio.wait_for(queue.get(),timeout=2,loop=loop) yield from asyncio.wait_for(watcher,timeout=5,loop=loop) yield from asyncio.wait_for(changer,timeout=5,loop=loop) assert value == 'new-test-value'
def build_list_string(self, connection, path): fields = [] is_dir = yield from asyncio.wait_for( connection.path_io.is_dir(path), connection.path_timeout, loop=connection.loop, ) dir_flag = "d" if is_dir else "-" stats = yield from asyncio.wait_for( connection.path_io.stat(path), connection.path_timeout, loop=connection.loop, ) default = list("xwr") * 3 for i in range(9): if (stats.st_mode >> i) & 1 == 0: default[i] = "-" fields.append(dir_flag + str.join("", reversed(default))) fields.append(str(stats.st_nlink)) fields.append("none") fields.append("none") fields.append(str(stats.st_size)) t = datetime.datetime.fromtimestamp(stats.st_ctime) fields.append(t.strftime("%b %d %Y")) fields.append(path.name) s = str.join(" ", fields) return s
def receive_log(): try: protocol = yield from aioamqp.connect('localhost', 5672) except aioamqp.AmqpClosedConnection: print("closed connections") return channel = yield from protocol.channel() exchange_name = 'topic_logs' # TODO let rabbitmq choose the queue name queue_name = 'queue-%s' % random.randint(0, 10000) yield from channel.exchange(exchange_name, 'topic') yield from asyncio.wait_for(channel.queue(queue_name, durable=False, auto_delete=True), timeout=10) binding_keys = sys.argv[1:] if not binding_keys: print("Usage: %s [binding_key]..." % (sys.argv[0],)) sys.exit(1) for binding_key in binding_keys: yield from asyncio.wait_for(channel.queue_bind(exchange_name='topic_logs', queue_name=queue_name, routing_key=binding_key), timeout=10) print(' [*] Waiting for logs. To exit press CTRL+C') yield from asyncio.wait_for(channel.basic_consume(queue_name), timeout=10) while True: consumer_tag, delivery_tag, message = yield from channel.consume() print("consumer {} recved {} ({})".format(consumer_tag, message, delivery_tag))
def _get(url): retry = 8 while retry: try: with (yield from self.sem): r = yield from asyncio.wait_for(self.session.request('get', url), 10) t = yield from asyncio.wait_for(r.text(), 10) break except Exception as e: print('Exception:', e, url) t = empty_html retry -= 1 yield from asyncio.sleep(2) print("retry: %s"%(8-retry), url) th= self.threshold rr= cb(t, *args, **kwargs) res = SpliterResponse() res.itemcount, res.pagecount = rr['itemcount'], rr['pagecount'] if res.pagecount is None: res.status = None res.pagecount = 0 elif res.pagecount < th: res.status = True else: res.status = False return res
def request(self, method, url, **kwargs): params = {} for key in ['noQuotoPath']: try: params[key] = kwargs.pop(key) except KeyError: pass kwargs['connector'] = self.connector kwargs['request_class'] = _ClientRequest.factory(**params) if 'headers' not in kwargs: kwargs['headers'] = self.headers else: hdr = self.headers.copy() for k, v in kwargs['headers'].items(): hdr[k] = v kwargs['headers'] = hdr try: response = yield from asyncio.wait_for(aiohttp.request(method, url, **kwargs), self.timeout) content = yield from asyncio.wait_for(response.read(), self.timeout) except asyncio.TimeoutError: raise asyncio.TimeoutError('%s %s timeout: %s' % (method, url, self.timeout)) return self.Response(response, content)
async def test_run_stop(patch_sleep_resolution, source_station, destination_station): instance = scanner.UZScanner(mock.Mock(), 0) instance.scan = AIOMock() instance.session = mock.Mock() run_task = instance.run() asyncio.ensure_future(run_task) success_cb_id = 'id123' firstname = 'firstname' lastname = 'lastname' date = datetime(2016, 1, 1) train_num = '741K' ct_letter = 'C1' scan_id = instance.add_item( success_cb_id, firstname, lastname, date, source_station, destination_station, train_num, ct_letter) await asyncio.sleep(0) instance.scan.assert_called_once_with(scan_id, mock.ANY) assert instance.status(scan_id) == (0, None) assert instance.abort(scan_id) with pytest.raises(scanner.UknkownScanID): instance.status(scan_id) with pytest.raises(scanner.UknkownScanID): instance.abort(scan_id) instance.stop() asyncio.wait_for(run_task, 1) instance.cleanup() instance.session.close.assert_called_once_with()
def handle_client(client_reader, client_writer): req_line = yield from asyncio.wait_for(client_reader.readline(), timeout=10.0) # print('Req line "{}"'.format(req_line)) while True: header = yield from asyncio.wait_for(client_reader.readline(), timeout=10.0) if header == b'\r\n': break # print('Header "{}"'.format(header)) key, val = map(str.strip, header.rstrip().decode().lower().split(':', 1)) method, path, version = req_line.decode().split(' ') # print('method = {!r}; path = {!r}; version = {!r}'.format(method, path, version)) if path.startswith('/send_req'): path, args = path.split('?') args = loads(unquote(args)) request_handler = RequestHandler() yield from request_handler.run(client_writer, args) # self.reader, self.writer, self.transport, self._request_handler, args) elif path.startswith('/get_login'): path, args = path.split('?') args = loads(unquote(args)) request_handler = RequestHandler() yield from request_handler.get_login(client_writer, args) elif path.startswith('/dev'): path = path[4:] send_js_dev(client_writer, path) else: path = path[1:] send_js_prod(client_writer, path)
def do(self): try: if self.epoch_tag: headers = {'etag': self.epoch_tag} else: headers = None r = yield from asyncio.wait_for( self.http_session.get(config.SETTINGS_URL, headers=headers), 300) D("waiter status %d" % r.status) if r.status == 200: rawresp = yield from asyncio.wait_for(r.text(), 600) resp = utils.json_load_round_float(rawresp) self.epoch_tag = resp['epoch_tag'] D("waiter got epoch tag %s" % self.epoch_tag) epoch = self.epoch_tag.split('-')[0] if self.server.params.receive(resp['params'], epoch): self.server.reload_signal(True) elif r.status == 304: pass else: # longer timeout to avoid spinning text = yield from asyncio.wait_for(r.text(), 600) D("Bad server response. %d %s" % (r.status, text)) yield from asyncio.sleep(30) except aiohttp.errors.ClientError as e: self.limitlog.log("Error with configwaiter: %s" % str(e)) except asyncio.TimeoutError as e: self.limitlog.log("configwaiter http timed out: %s" % str(e)) except Exception as e: EX("Error watching config: %s" % str(e))
def receive_log(): try: protocol = yield from aioamqp.connect('localhost', 5672) except aioamqp.AmqpClosedConnection: print("closed connections") return channel = yield from protocol.channel() exchange_name = 'logs' # TODO let rabbitmq choose the queue name queue_name = 'queue-%s' % random.randint(0, 10000) yield from channel.exchange(exchange_name, 'fanout') yield from asyncio.wait_for(channel.queue(queue_name, durable=False, auto_delete=True), timeout=10) yield from asyncio.wait_for(channel.queue_bind(exchange_name=exchange_name, queue_name=queue_name, routing_key=''), timeout=10) print(' [*] Waiting for logs. To exit press CTRL+C') yield from asyncio.wait_for(channel.basic_consume(queue_name), timeout=10) while True: consumer_tag, delivery_tag, message = yield from channel.consume() print("consumer {} recved {} ({})".format(consumer_tag, message, delivery_tag)) yield from asyncio.sleep(10) yield from asyncio.wait_for(protocol.client_close(), timeout=10)
def list_worker(self, connection, rest): data_reader, data_writer = connection.data_connection del connection.data_connection with contextlib.closing(data_writer) as data_writer: paths = yield from asyncio.wait_for( connection.path_io.list(real_path), connection.path_timeout, loop=connection.loop, ) for path in paths: s = yield from self.build_list_string(connection, path) message = s + common.end_of_line data_writer.write(str.encode(message, "utf-8")) yield from asyncio.wait_for( data_writer.drain(), connection.socket_timeout, loop=connection.loop, ) connection.response("226", "list data transfer done") return True
def __startBlocking(self, event): if self.protover == 1: self.emit("readyForMoves") return_value = "ready" if self.protover == 2: try: return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND) if return_value == "not ready": return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND) # Gaviota sends done=0 after "xboard" and after "protover 2" too if return_value == "not ready": return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND) self.emit("readyForOptions") self.emit("readyForMoves") except asyncio.TimeoutError: log.warning("Got timeout error", extra={"task": self.defname}) raise PlayerIsDead except: log.warning("Unknown error", extra={"task": self.defname}) raise PlayerIsDead else: if return_value == "die": raise PlayerIsDead assert return_value == "ready" or return_value == "del" if event is not None: event.set()
def close_connection(self): # 7.1.1. Close the WebSocket Connection if self.state == 'CLOSED': return # Defensive assertion for protocol compliance. if self.state != 'CLOSING': # pragma: no cover raise InvalidState("Cannot close a WebSocket connection " "in the {} state".format(self.state)) if self.is_client: try: yield from asyncio.wait_for(self.connection_closed, timeout=self.timeout) except (asyncio.CancelledError, asyncio.TimeoutError): pass if self.state == 'CLOSED': return assert self.writer.can_write_eof(), "WebSocket runs over TCP/IP!" self.writer.write_eof() self.writer.close() try: yield from asyncio.wait_for(self.connection_closed, timeout=self.timeout) except (asyncio.CancelledError, asyncio.TimeoutError): pass
def close_connection(self, force=False): # 7.1.1. Close the WebSocket Connection if self.state == CLOSED: return # Defensive assertion for protocol compliance. if self.state != CLOSING and not force: # pragma: no cover raise InvalidState("Cannot close a WebSocket connection " "in the {} state".format(self.state_name)) if self.is_client and not force: try: yield from asyncio.wait_for( self.connection_closed, self.timeout, loop=self.loop) except (asyncio.CancelledError, asyncio.TimeoutError): pass if self.state == CLOSED: return # Attempt to terminate the TCP connection properly. # If the socket is already closed, this may crash. try: if self.writer.can_write_eof(): self.writer.write_eof() except Exception: # pragma: no cover pass self.writer.close() try: yield from asyncio.wait_for( self.connection_closed, self.timeout, loop=self.loop) except (asyncio.CancelledError, asyncio.TimeoutError): pass
def build_mlsx_string(self, connection, path): stats = {} if (yield from asyncio.wait_for(connection.path_io.is_file(path), connection.path_timeout, loop=connection.loop)): stats["Type"] = "file" elif (yield from asyncio.wait_for(connection.path_io.is_dir(path), connection.path_timeout, loop=connection.loop)): stats["Type"] = "dir" else: raise errors.PathIsNotFileOrDir(path) raw = yield from asyncio.wait_for( connection.path_io.stat(path), connection.path_timeout, loop=connection.loop, ) for attr, fact in Server.path_facts: stats[fact] = getattr(raw, attr) s = "" for fact, value in stats.items(): s += str.format("{}={};", fact, value) s += " " + path.name return s
def first_connection(self): self.send_notice("*** Looking up your hostname...") self.send_notice("*** Checking Ident") loop = asyncio.get_event_loop() hosttask = loop.run_in_executor(None, socket.gethostbyaddr, self.addr) try: hostname = yield from asyncio.wait_for(hosttask, timeout=5) self.send_notice("*** Found your hostname") except: hostname = self.addr self.send_notice("*** Couldn't look up your hostname") self.addrhost = hostname[0] lport = self.transport.get_extra_info('sockname')[1] rport = self.peer[1] request = "{}, {}".format(rport, lport) reply = yield from asyncio.Task(self.IdentHandler.handle_ident(request, self.addr, 113)) if reply == None: self.send_notice("*** No Ident response") self.ident = "~{}".format(self.ident) else: print("Ident Reply was {}".format(reply)) self.send_notice("*** Got Ident response") self.ident = "{}".format(self.ident) self.prefix = "{}!{}@{}".format(self.nick, self.ident, self.addrhost) self.ConnHan.connections[self.uuid]['user'] = self.prefix yield from asyncio.wait_for(self.send_motd(), timeout=5) self.MessageGenerator.code_message(leaf=self.nick, id='MODE', name=self.nick, message="+{}".format(self.modes))
def _longpoll_request(self): """Open a long-polling request and receive push data. This method uses keep-alive to make re-opening the request faster, but the remote server will set the "Connection: close" header once an hour. Raises hangups.NetworkError or UnknownSIDError. """ params = { 'VER': 8, 'gsessionid': self._gsessionid_param, 'RID': 'rpc', 't': 1, # trial 'SID': self._sid_param, 'CI': 0, 'ctype': 'hangouts', # client type 'TYPE': 'xmlhttp', } headers = get_authorization_headers(self._cookies['SAPISID']) logger.info('Opening new long-polling request') try: res = yield from asyncio.wait_for(aiohttp.request( 'get', CHANNEL_URL_PREFIX.format('channel/bind'), params=params, cookies=self._cookies, headers=headers, connector=self._connector ), CONNECT_TIMEOUT) except asyncio.TimeoutError: raise exceptions.NetworkError('Request timed out') except aiohttp.errors.ClientError as e: raise exceptions.NetworkError('Request connection error: {}' .format(e)) if res.status == 400 and res.reason == 'Unknown SID': raise UnknownSIDError('SID became invalid') elif res.status != 200: raise exceptions.NetworkError( 'Request return unexpected status: {}: {}' .format(res.status, res.reason) ) while True: try: chunk = yield from asyncio.wait_for( res.content.read(MAX_READ_BYTES), PUSH_TIMEOUT ) except asyncio.TimeoutError: raise exceptions.NetworkError('Request timed out') except aiohttp.errors.ClientError as e: raise exceptions.NetworkError('Request connection error: {}' .format(e)) except asyncio.CancelledError: # Prevent ResourceWarning when channel is disconnected. res.close() raise if chunk: yield from self._on_push_data(chunk) else: # Close the response to allow the connection to be reused for # the next request. res.close() break
def test_simple_read_write(self): loop = asyncio.get_event_loop() with PipeQueue(self.path) as pq: loop.run_until_complete( asyncio.wait_for(pq.put("S"), 2)) rv = loop.run_until_complete( asyncio.wait_for(pq.get(), 2)) self.assertEqual("S", rv)
def test_expunge_messages(self): self.imapserver.receive(Mail.create(['user'])) self.imapserver.receive(Mail.create(['user'])) imap_client = yield from self.login_user('user', 'pass', select=True) yield from asyncio.wait_for(self.loop.run_in_executor(None, imap_client.expunge), 1) self.assertEquals(('OK', [b'0']), (yield from asyncio.wait_for( self.loop.run_in_executor(None, functools.partial(imap_client.select)), 1)))
def test_tuple_read_write(self): loop = asyncio.get_event_loop() payload = (12, "string") with PipeQueue(self.path) as pq: loop.run_until_complete( asyncio.wait_for(pq.put(payload), 2)) rv = loop.run_until_complete( asyncio.wait_for(pq.get(), 2)) self.assertEqual(payload, rv)
def _longpoll_request(self): """Open a long-polling request and receive push data. It's important to use keep-alive so a connection is maintained to the specific server that holds the session (likely because of load balancing). Without keep-alive, long polling requests will frequently fail with 400 "Unknown SID". Raises hangups.NetworkError or UnknownSIDError. """ params = { 'VER': 8, 'clid': self._clid_param, 'prop': self._prop_param, 'ec': self._ec_param, 'gsessionid': self._gsessionid_param, 'RID': 'rpc', 't': 1, # trial 'SID': self._sid_param, 'CI': 0, } URL = 'https://talkgadget.google.com/u/0/talkgadget/_/channel/bind' logger.info('Opening new long-polling request') try: res = yield from asyncio.wait_for(aiohttp.request( 'get', URL, params=params, cookies=self._cookies, connector=self._connector ), CONNECT_TIMEOUT) except asyncio.TimeoutError: raise exceptions.NetworkError('Request timed out') except aiohttp.errors.ConnectionError as e: raise exceptions.NetworkError('Request connection error: {}' .format(e)) if res.status == 400 and res.reason == 'Unknown SID': raise UnknownSIDError('SID became invalid') elif res.status != 200: raise exceptions.NetworkError( 'Request return unexpected status: {}: {}' .format(res.status, res.reason) ) while True: try: chunk = yield from asyncio.wait_for( res.content.read(MAX_READ_BYTES), PUSH_TIMEOUT ) except asyncio.TimeoutError: raise exceptions.NetworkError('Request timed out') except aiohttp.errors.ConnectionError as e: raise exceptions.NetworkError('Request connection error: {}' .format(e)) if chunk: yield from self._on_push_data(chunk) else: # Close the response to allow the connection to be reused for # the next request. res.close() break
def frame_parser(self, reader, writer): # This takes care of the framing. last_request_id = 0 while True: # Read the frame header, parse it, read the data. # NOTE: The readline() and readexactly() calls will hang # if the client doesn't send enough data but doesn't # disconnect either. We add a timeout to each. (But the # timeout should really be implemented by StreamReader.) framing_b = yield from asyncio.wait_for( reader.readline(), timeout=args.timeout, loop=self.loop) if random.random()*100 < args.fail_percent: logging.warn('Inserting random failure') yield from asyncio.sleep(args.fail_sleep*random.random(), loop=self.loop) writer.write(b'error random failure\r\n') break logging.debug('framing_b = %r', framing_b) if not framing_b: break # Clean close. try: frame_keyword, request_id_b, byte_count_b = framing_b.split() except ValueError: writer.write(b'error unparseable frame\r\n') break if frame_keyword != b'request': writer.write(b'error frame does not start with request\r\n') break try: request_id, byte_count = int(request_id_b), int(byte_count_b) except ValueError: writer.write(b'error unparsable frame parameters\r\n') break if request_id != last_request_id + 1 or byte_count < 2: writer.write(b'error invalid frame parameters\r\n') break last_request_id = request_id request_b = yield from asyncio.wait_for( reader.readexactly(byte_count), timeout=args.timeout, loop=self.loop) try: request = json.loads(request_b.decode('utf8')) except ValueError: writer.write(b'error unparsable json\r\n') break response = self.handle_request(request) # Not a coroutine. if response is None: writer.write(b'error unhandlable request\r\n') break response_b = json.dumps(response).encode('utf8') + b'\r\n' byte_count = len(response_b) framing_s = 'response {} {}\r\n'.format(request_id, byte_count) writer.write(framing_s.encode('ascii')) yield from asyncio.sleep(args.resp_sleep*random.random(), loop=self.loop) writer.write(response_b)
def stop(self, key): self.tasks[key].cancel() asyncio.wait_for(self.tasks[key], 100, loop=self.loop) #try: # self.loop.run_until_complete(self.tasks[key]) #except CancelledError: # pass del self.tasks[key] return self
def retrieve(self, *command_args, conn_type="I", use_lines=False, callback=None, block_size=8192): """ :py:func:`asyncio.coroutine` Retrieve data from passive connection with some command :param command_args: arguments for :py:meth:`aioftp.Client.command` :param conn_type: connection type ("I", "A", "E", "L") :type conn_type: :py:class:`str` :param use_lines: use lines or block size for read :type use_lines: :py:class:`bool` :param callback: callback function with one argument — received :py:class:`bytes` from server. :type callback: :py:func:`callable` :param block_size: block size for transaction :type block_size: :py:class:`int` :raises asyncio.TimeoutError: if there where no data for `timeout` period """ reader, writer = yield from self.get_passive_connection(conn_type) yield from self.command(*command_args) with contextlib.closing(writer) as writer: while True: if use_lines: block = yield from asyncio.wait_for( reader.readline(), self.timeout, loop=self.loop, ) else: block = yield from asyncio.wait_for( reader.read(block_size), self.timeout, loop=self.loop, ) if not block: break if callback: callback(block) yield from self.command(None, "2xx", "1xx")
def setWebhook(self, url=None, certificate=None): p = {'url': url} if certificate: files = {'certificate': certificate} r = yield from asyncio.wait_for(aiohttp.post(self._methodurl('setWebhook'), params=self._rectify(p), data=files), self._http_timeout) else: r = yield from asyncio.wait_for(aiohttp.post(self._methodurl('setWebhook'), params=self._rectify(p)), self._http_timeout) return (yield from self._parse(r))
def test_server_login(self): pending_imap = self.loop.run_in_executor(None, functools.partial(imaplib.IMAP4, host='127.0.0.1', port=12345)) imap_client = yield from asyncio.wait_for(pending_imap, 1) pending_login = self.loop.run_in_executor(None, functools.partial(imap_client.login, 'user', 'pass')) result, data = yield from asyncio.wait_for(pending_login, 1) self.assertEqual('OK', result) self.assertEqual([b'LOGIN completed'], data) self.assertEquals(imapserver.AUTH, self.imapserver.get_connection('user').state)
def wait(self): self.running = False if self.reverse: self.tunnel.close() try: yield from asyncio.wait_for(self.tunnel.wait_closed(), 2.0, loop=self.loop) except asyncio.TimeoutError: pass else: yield from asyncio.wait_for(self.tunnel, 2.0, loop=self.loop)
def test_callback_is_called_when_connection_is_lost(self): queue = asyncio.Queue() imap_client = aioimaplib.IMAP4(port=12345, loop=self.loop, timeout=3, conn_lost_cb=( lambda m: queue.put_nowait('called with %s' % m))) yield from asyncio.wait_for(imap_client.wait_hello_from_server(), 2) yield from imap_client.login('login', 'password') yield from self._shutdown_server() self.assertEqual('called with None', (yield from asyncio.wait_for(queue.get(), timeout=2)))
def test_npchat(username, messages, host, port, do_output, alive, user_list): # Wait a random amount of time before logging in. This is to prevent # overflowing the `listen` backlog yield from asyncio.sleep(random.uniform(0, 2)) print("Connecting {username} on port {port}".format( username=username, port=port)) # Connect to chat server reader, writer = yield from asyncio.open_connection(host, port) if do_output: reader_task = asyncio.Task(test_npchat_reader(username, reader)) else: reader_task = asyncio.Task(test_npchat_devnull(reader)) # Login writer.write("ME IS {username}\n" .format(username=username) .encode('ascii')) # Wait 1 second before sending messages yield from asyncio.sleep(1) # Run for an alive time, or forever if alive: end = time() + random.uniform(*alive) else: end = float('inf') while time() < end: # Wait 1-5 seconds yield from asyncio.sleep(random.uniform(1, 5)) # 0-3 BROADCAST, 4 send to random user, 5 WHO HERE action = random.randrange(6) # BROADCAST if 0 <= action < 4: writer.write('BROADCAST {name}\n' .format(name=username).encode('ascii')) writer.writelines(common.make_body(random.choice(messages))) elif action == 4: writer.write('SEND {name} {recipient}\n' .format(name=username, recipient=random.choice(user_list)) .encode('ascii')) writer.writelines(common.make_body(random.choice(messages))) elif action == 5: writer.write('WHO HERE {name}\n' .format(name=username).encode('ascii')) # Wait 1 final second, then logout yield from asyncio.sleep(1) writer.write('LOGOUT {name}\n'.format(name=username).encode('ascii')) asyncio.wait_for(reader_task, None)
def test_listen_for_order_book_snapshots(self, mock_get_trading_pairs, mock_get_snapshot): """ Example order book message added to the queue: LiquidOrderBookMessage( type = < OrderBookMessageType.SNAPSHOT: 1 > , content = { 'buy_price_levels': [ ['181.95138', '0.69772000'], ... ], 'sell_price_levels': [ ['182.11620', '0.32400000'], ... ], 'trading_pair': 'BTC-USDC' }, timestamp = 1573041256.2376761) """ loop = asyncio.get_event_loop() # Instantiate empty async queue and make sure the initial size is 0 q = asyncio.Queue() self.assertEqual(q.qsize(), 0) # Mock Future() object return value as the request response f1 = asyncio.Future() f1.set_result( { **FixtureLiquid.SNAPSHOT_2, 'trading_pair': 'ETH-USD', 'product_id': 27 } ) f2 = asyncio.Future() f2.set_result( { **FixtureLiquid.SNAPSHOT_1, 'trading_pair': 'LCX-BTC', 'product_id': 538 } ) mock_get_snapshot.side_effect = [f1, f2] # Mock get trading pairs mocked_trading_pairs = ['ETH-USD', 'LCX-BTC'] f = asyncio.Future() f.set_result(mocked_trading_pairs) mock_get_trading_pairs.return_value = f # Listening for tracking pairs within the set timeout timeframe timeout = 6 print('{class_name} test {test_name} is going to run for {timeout} seconds, starting now'.format( class_name=self.__class__.__name__, test_name=inspect.stack()[0][3], timeout=timeout)) try: loop.run_until_complete( # Force exit from event loop after set timeout seconds asyncio.wait_for( LiquidAPIOrderBookDataSource().listen_for_order_book_snapshots(ev_loop=loop, output=q), timeout=timeout ) ) except concurrent.futures.TimeoutError as e: print(e) # Make sure that the number of items in the queue after certain seconds make sense # For instance, when the asyncio sleep time is set to 5 seconds in the method # If we configure timeout to be the same length, only 1 item has enough time to be received self.assertGreaterEqual(q.qsize(), 1) # Validate received response has correct data types first_item = q.get_nowait() self.assertIsInstance(first_item, LiquidOrderBookMessage) self.assertIsInstance(first_item.type, OrderBookMessageType) # Validate order book message type self.assertEqual(first_item.type, OrderBookMessageType.SNAPSHOT) # Validate snapshot received matches with the original snapshot received from API self.assertEqual(first_item.content['bids'], FixtureLiquid.SNAPSHOT_2['buy_price_levels']) self.assertEqual(first_item.content['asks'], FixtureLiquid.SNAPSHOT_2['sell_price_levels']) # Validate the rest of the content self.assertEqual(first_item.content['trading_pair'], mocked_trading_pairs[0]) self.assertEqual(first_item.content['product_id'], 27)
async def waitLoaded(self, timeout=15): return asyncio.wait_for(self.evLoaded.wait(), timeout)
def async_run_with_timeout(self, coroutine: Awaitable, timeout: int = 1): ret = asyncio.get_event_loop().run_until_complete(asyncio.wait_for(coroutine, timeout)) return ret
def test_create_connection_memory_leak(self): HELLO_MSG = b'1' * self.PAYLOAD_SIZE server_context = test_utils.simple_server_sslcontext() client_context = test_utils.simple_client_sslcontext() def serve(sock): sock.settimeout(self.TIMEOUT) sock.start_tls(server_context, server_side=True) sock.sendall(b'O') data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.shutdown(socket.SHUT_RDWR) sock.close() class ClientProto(asyncio.Protocol): def __init__(self, on_data, on_eof): self.on_data = on_data self.on_eof = on_eof self.con_made_cnt = 0 def connection_made(proto, tr): # XXX: We assume user stores the transport in protocol proto.tr = tr proto.con_made_cnt += 1 # Ensure connection_made gets called only once. self.assertEqual(proto.con_made_cnt, 1) def data_received(self, data): self.on_data.set_result(data) def eof_received(self): self.on_eof.set_result(True) async def client(addr): await asyncio.sleep(0.5) on_data = self.loop.create_future() on_eof = self.loop.create_future() tr, proto = await self.loop.create_connection( lambda: ClientProto(on_data, on_eof), *addr, ssl=client_context) self.assertEqual(await on_data, b'O') tr.write(HELLO_MSG) await on_eof tr.close() with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: self.loop.run_until_complete( asyncio.wait_for(client(srv.addr), timeout=support.SHORT_TIMEOUT)) # No garbage is left for SSL client from loop.create_connection, even # if user stores the SSLTransport in corresponding protocol instance client_context = weakref.ref(client_context) support.gc_collect() self.assertIsNone(client_context())
def queue_get(): return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
def connection_lost(self, exc: Optional[Exception]) -> None: self.on_connection_lost.set_result(True) self.loop.create_task( asyncio.wait_for(self.service.on_disconnect(), timeout=0.5))
def player_input(): return asyncio.wait_for( asyncio.ensure_future( client.wait_for_message(author=message.author)), None)
def wrapper(test_case, *args, **kw): return test_case.loop.run_until_complete( asyncio.wait_for(test_case_fun(test_case, *args, **kw), timeout, loop=test_case.loop))
async def _on_broadcast(self, _action, **kwargs): func = getattr(self.service, _action) future = asyncio.wait_for(func(kwargs['service'], kwargs['client'], kwargs['payload']), timeout=0.2) await self.create_future(future)
def f(self, *args, **kwargs): if isinstance(self, HTTPServiceClient): return (yield from make_request(func, self, args, kwargs, method)) elif isinstance(self, HTTPService): Stats.http_stats['total_requests'] += 1 if required_params is not None: req = args[0] if req.method in ["POST", "DELETE", "PUT", "PATCH"]: query_params = yield from req.post() if not query_params: query_params = yield from req.json() elif req.method == "GET": query_params = req.GET params = required_params if not isinstance(required_params, list): params = [required_params] missing_params = list(filter(lambda x: x not in query_params, params)) if len(missing_params) > 0: res_d = {'error': 'Required params {} not found'.format(','.join(missing_params))} Stats.http_stats['total_responses'] += 1 Aggregator.update_stats(endpoint=func.__name__, status=400, success=False, server_type='http', time_taken=0, process_time_taken=0) return Response(status=400, content_type='application/json', body=json.dumps(res_d).encode()) t1 = time.time() tp1 = time.process_time() # Support for multi request body encodings req = args[0] try: yield from req.json() except: pass else: req.post = req.json wrapped_func = func success = True _logger = logging.getLogger() api_timeout = _http_timeout if valid_timeout(timeout): api_timeout = timeout if not iscoroutine(func): wrapped_func = coroutine(func) tracking_id = SharedContext.get(X_REQUEST_ID) try: result = yield from wait_for(shield(wrapped_func(self, *args, **kwargs)), api_timeout) except TimeoutError as e: Stats.http_stats['timedout'] += 1 status = 'timeout' success = False _logger.exception("HTTP request had a timeout for method %s", func.__name__) timeout_log = { 'time_taken': api_timeout, 'type': 'http', 'hostname': socket.gethostbyname(socket.gethostname()), 'service_name': self._service_name, 'endpoint': func.__name__, 'api_execution_threshold_exceed': True, 'api_timeout': True, X_REQUEST_ID: tracking_id } logging.getLogger('stats').info(timeout_log) raise e except VykedServiceException as e: Stats.http_stats['total_responses'] += 1 status = 'handled_exception' _logger.info('Handled exception %s for method %s ', e.__class__.__name__, func.__name__) raise e except Exception as e: status = 'unhandled_exception' success = False if suppressed_errors: for _error in suppressed_errors: if isinstance(e, _error): status = 'handled_exception' raise e Stats.http_stats['total_errors'] += 1 _logger.exception('Unhandled exception %s for method %s ', e.__class__.__name__, func.__name__) _stats_logger = logging.getLogger('stats') d = {"exception_type": e.__class__.__name__, "method_name": func.__name__, "message": str(e), "service_name": self._service_name, "hostname": socket.gethostbyname(socket.gethostname()), X_REQUEST_ID: tracking_id} _stats_logger.info(dict(d)) _exception_logger = logging.getLogger('exceptions') d["message"] = traceback.format_exc() _exception_logger.info(dict(d)) raise e else: t2 = time.time() tp2 = time.process_time() hostname = socket.gethostname() service_name = '_'.join(setproctitle.getproctitle().split('_')[:-1]) status = result.status logd = { 'status': result.status, 'time_taken': int((t2 - t1) * 1000), 'process_time_taken': int((tp2-tp1) * 1000), 'type': 'http', 'hostname': hostname, 'service_name': service_name, 'endpoint': func.__name__, 'api_execution_threshold_exceed': False, X_REQUEST_ID: tracking_id } method_execution_time = (t2 - t1) if method_execution_time > CONFIG.SLOW_API_THRESHOLD: logd['api_execution_threshold_exceed'] = True logging.getLogger('stats').info(logd) else: logging.getLogger('stats').debug(logd) Stats.http_stats['total_responses'] += 1 return result finally: t2 = time.time() tp2 = time.process_time() Aggregator.update_stats(endpoint=func.__name__, status=status, success=success, server_type='http', time_taken=int((t2 - t1) * 1000), process_time_taken=int((tp2 - tp1) * 1000))
periodic_commit_task: asyncio.Future = None try: log.info("Starting server") loop.run_until_complete(server.start()) log.info("Starting clients and plugins") loop.run_until_complete(asyncio.gather(*[client.start() for client in clients], loop=loop)) log.info("Startup actions complete, running forever") periodic_commit_task = asyncio.ensure_future(periodic_commit(), loop=loop) loop.run_forever() except KeyboardInterrupt: log.info("Interrupt received, stopping HTTP clients/servers and saving database") if periodic_commit_task is not None: periodic_commit_task.cancel() log.debug("Stopping clients") loop.run_until_complete(asyncio.gather(*[client.stop() for client in Client.cache.values()], loop=loop)) db_session.commit() if stop_log_listener is not None: log.debug("Closing websockets") loop.run_until_complete(stop_log_listener()) log.debug("Stopping server") try: loop.run_until_complete(asyncio.wait_for(server.stop(), 5, loop=loop)) except asyncio.TimeoutError: log.warning("Stopping server timed out") log.debug("Closing event loop") loop.close() log.debug("Everything stopped, shutting down") sys.exit(0)
def validate_password(self, username, password): return asyncio.wait_for(self.replay(username, password), timeout=4.0)
def recv(self): try: raw = unasyncio(asyncio.wait_for(self.conn.recv(), timeout=0.1)) except asyncio.exceptions.TimeoutError: return None return json.loads(raw)
def _request_helper(self, options_func, cancellation_event): """ Query string should be provided as a manually serialized and encoded string. :param options_func: :param cancellation_event: :return: """ if cancellation_event is not None: assert isinstance(cancellation_event, Event) options = options_func() assert isinstance(options, RequestOptions) create_response = options.create_response create_status = options.create_status create_exception = options.create_exception params_to_merge_in = {} if options.operation_type == PNOperationType.PNPublishOperation: params_to_merge_in['seqn'] = yield from self._publish_sequence_manager.get_next_sequence() options.merge_params_in(params_to_merge_in) url = utils.build_url(self.config.scheme(), self.base_origin, options.path, options.query_string) log_url = utils.build_url(self.config.scheme(), self.base_origin, options.path, options.query_string) logger.debug("%s %s %s" % (options.method_string, log_url, options.data)) if AIOHTTP_V in (1, 2): from yarl import URL url = URL(url, encoded=True) try: response = yield from asyncio.wait_for( self._session.request(options.method_string, url, headers=self.headers, data=options.data if options.data is not None else None), options.request_timeout) except (asyncio.TimeoutError, asyncio.CancelledError): raise except Exception as e: logger.error("session.request exception: %s" % str(e)) raise body = yield from response.text() if cancellation_event is not None and cancellation_event.is_set(): return response_info = None status_category = PNStatusCategory.PNUnknownCategory if response is not None: request_url = six.moves.urllib.parse.urlparse(str(response.url)) query = six.moves.urllib.parse.parse_qs(request_url.query) uuid = None auth_key = None if 'uuid' in query and len(query['uuid']) > 0: uuid = query['uuid'][0] if 'auth_key' in query and len(query['auth_key']) > 0: auth_key = query['auth_key'][0] response_info = ResponseInfo( status_code=response.status, tls_enabled='https' == request_url.scheme, origin=request_url.hostname, uuid=uuid, auth_key=auth_key, client_request=None, client_response=response ) if body is not None and len(body) > 0: try: data = json.loads(body) except ValueError: if response.status == 599 and len(body) > 0: data = body else: raise except TypeError: try: data = json.loads(body.decode("utf-8")) except ValueError: raise create_exception(category=status_category, response=response, response_info=response_info, exception=PubNubException( pn_error=PNERR_JSON_DECODING_FAILED, errormsg='json decode error', ) ) else: data = "N/A" logger.debug(data) if response.status != 200: if response.status >= 500: err = PNERR_SERVER_ERROR else: err = PNERR_CLIENT_ERROR if response.status == 403: status_category = PNStatusCategory.PNAccessDeniedCategory if response.status == 400: status_category = PNStatusCategory.PNBadRequestCategory raise create_exception(category=status_category, response=data, response_info=response_info, exception=PubNubException( errormsg=data, pn_error=err, status_code=response.status ) ) else: return AsyncioEnvelope( result=create_response(data), status=create_status( PNStatusCategory.PNAcknowledgmentCategory, data, response_info, None) )
def coro(): persp = perspective_manager.get_perspective("games") gmwidg = persp.cur_gmwidg() gamemodel = gmwidg.gamemodel old_check_value = conf.get("analyzer_check") conf.set("analyzer_check", True) if HINT not in gamemodel.spectators: try: yield from asyncio.wait_for( gamemodel.start_analyzer(HINT), 5.0) except asyncio.TimeoutError: log.error( "Got timeout error while starting hint analyzer") return except Exception: log.error("Unknown error while starting hint analyzer") return analyzer = gamemodel.spectators[HINT] gmwidg.menuitems["hint_mode"].active = True threat_PV = conf.get("ThreatPV") if threat_PV: old_inv_check_value = conf.get("inv_analyzer_check") conf.set("inv_analyzer_check", True) if SPY not in gamemodel.spectators: try: yield from asyncio.wait_for( gamemodel.start_analyzer(SPY), 5.0) except asyncio.TimeoutError: log.error( "Got timeout error while starting spy analyzer" ) return except Exception: log.error( "Unknown error while starting spy analyzer") return inv_analyzer = gamemodel.spectators[SPY] gmwidg.menuitems["spy_mode"].active = True title = _("Game analyzing in progress...") text = _("Do you want to abort it?") content = InfoBar.get_message_content( title, text, Gtk.STOCK_DIALOG_QUESTION) def response_cb(infobar, response, message): conf.set("analyzer_check", old_check_value) if threat_PV: conf.set("inv_analyzer_check", old_inv_check_value) message.dismiss() abort() message = InfoBarMessage(Gtk.MessageType.QUESTION, content, response_cb) message.add_button( InfoBarMessageButton(_("Abort"), Gtk.ResponseType.CANCEL)) gmwidg.replaceMessages(message) @asyncio.coroutine def analyse_moves(): should_black = conf.get("shouldBlack") should_white = conf.get("shouldWhite") from_current = conf.get("fromCurrent") start_ply = gmwidg.board.view.shown if from_current else 0 move_time = int(conf.get("max_analysis_spin")) threshold = int(conf.get("variation_threshold_spin")) for board in gamemodel.boards[start_ply:]: if self.stop_event.is_set(): break gmwidg.board.view.setShownBoard(board) analyzer.setBoard(board) if threat_PV: inv_analyzer.setBoard(board) yield from asyncio.sleep(move_time + 0.1) ply = board.ply - gamemodel.lowply color = (ply - 1) % 2 if ply - 1 in gamemodel.scores and ply in gamemodel.scores and ( (color == BLACK and should_black) or (color == WHITE and should_white)): oldmoves, oldscore, olddepth = gamemodel.scores[ply - 1] oldscore = oldscore * -1 if color == BLACK else oldscore score_str = prettyPrintScore(oldscore, olddepth) moves, score, depth = gamemodel.scores[ply] score = score * -1 if color == WHITE else score diff = score - oldscore if ((diff > threshold and color == BLACK) or (diff < -1 * threshold and color == WHITE) ) and (gamemodel.moves[ply - 1] != parseAny( gamemodel.boards[ply - 1], oldmoves[0])): if threat_PV: try: if ply - 1 in gamemodel.spy_scores: oldmoves0, oldscore0, olddepth0 = gamemodel.spy_scores[ ply - 1] score_str0 = prettyPrintScore( oldscore0, olddepth0) pv0 = listToMoves( gamemodel.boards[ply - 1], ["--"] + oldmoves0, validate=True) if len(pv0) > 2: gamemodel.add_variation( gamemodel.boards[ply - 1], pv0, comment="Threatening", score=score_str0, emit=False) except ParsingError as e: # ParsingErrors may happen when parsing "old" lines from # analyzing engines, which haven't yet noticed their new tasks log.debug( "__parseLine: Ignored (%s) from analyzer: ParsingError%s" % (' '.join(oldmoves), e)) try: pv = listToMoves(gamemodel.boards[ply - 1], oldmoves, validate=True) gamemodel.add_variation( gamemodel.boards[ply - 1], pv, comment="Better is", score=score_str, emit=False) except ParsingError as e: # ParsingErrors may happen when parsing "old" lines from # analyzing engines, which haven't yet noticed their new tasks log.debug( "__parseLine: Ignored (%s) from analyzer: ParsingError%s" % (' '.join(oldmoves), e)) self.widgets["analyze_game"].hide() self.widgets["analyze_ok_button"].set_sensitive(True) conf.set("analyzer_check", old_check_value) if threat_PV: conf.set("inv_analyzer_check", old_inv_check_value) message.dismiss() gamemodel.emit("analysis_finished") create_task(analyse_moves()) hide_window(None) return True
def _run_loop_waiting_for(future: Union[Awaitable, asyncio.Future, Coroutine], timeout: float): return asyncio.get_event_loop().run_until_complete( asyncio.wait_for(asyncio.shield(future), timeout=timeout))
def stop_client(self): try: self.loop.run_until_complete( asyncio.wait_for(self.client.worker_task, timeout=1)) except asyncio.TimeoutError: # pragma: no cover self.fail("Client failed to stop")
def wait(self, timeout): try: yield from asyncio.wait_for(self.kernel.wait(), timeout) return False except asyncio.TimeoutError: return True
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1): ret = self.ev_loop.run_until_complete( asyncio.wait_for(coroutine, timeout)) return ret
def wait_for(self, event, *, check=None, timeout=None): """|coro| Waits for a WebSocket event to be dispatched. This could be used to wait for a user to reply to a message, or to react to a message, or to edit a message in a self-contained way. The ``timeout`` parameter is passed onto :func:`asyncio.wait_for`. By default, it does not timeout. Note that this does propagate the :exc:`asyncio.TimeoutError` for you in case of timeout and is provided for ease of use. In case the event returns multiple arguments, a :class:`tuple` containing those arguments is returned instead. Please check the :ref:`documentation <discord-api-events>` for a list of events and their parameters. This function returns the **first event that meets the requirements**. Examples --------- Waiting for a user reply: :: @client.event async def on_message(message): if message.content.startswith('$greet'): channel = message.channel await channel.send('Say hello!') def check(m): return m.content == 'hello' and m.channel == channel msg = await client.wait_for('message', check=check) await channel.send('Hello {.author}!'.format(msg)) Waiting for a thumbs up reaction from the message author: :: @client.event async def on_message(message): if message.content.startswith('$thumb'): channel = message.channel await channel.send('Send me that \N{THUMBS UP SIGN} reaction, mate') def check(reaction, user): return user == message.author and str(reaction.emoji) == '\N{THUMBS UP SIGN}' try: reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await channel.send('\N{THUMBS DOWN SIGN}') else: await channel.send('\N{THUMBS UP SIGN}') Parameters ------------ event: :class:`str` The event name, similar to the :ref:`event reference <discord-api-events>`, but without the ``on_`` prefix, to wait for. check: Optional[predicate] A predicate to check what to wait for. The arguments must meet the parameters of the event being waited for. timeout: Optional[:class:`float`] The number of seconds to wait before timing out and raising :exc:`asyncio.TimeoutError`. Raises ------- asyncio.TimeoutError If a timeout is provided and it was reached. Returns -------- Any Returns no arguments, a single argument, or a :class:`tuple` of multiple arguments that mirrors the parameters passed in the :ref:`event reference <discord-api-events>`. """ future = self.loop.create_future() if check is None: def _check(*args): return True check = _check ev = event.lower() try: listeners = self._listeners[ev] except KeyError: listeners = [] self._listeners[ev] = listeners listeners.append((future, check)) return asyncio.wait_for(future, timeout, loop=self.loop)
async def _on_service_disconnect(self, _action, **kwargs): func = getattr(self.service, _action) future = asyncio.wait_for(func(kwargs['service']), timeout=0.2) await self.create_future(future)
def test_start_tls_client_reg_proto_1(self): HELLO_MSG = b'1' * self.PAYLOAD_SIZE server_context = test_utils.simple_server_sslcontext() client_context = test_utils.simple_client_sslcontext() def serve(sock): sock.settimeout(self.TIMEOUT) data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.start_tls(server_context, server_side=True) sock.sendall(b'O') data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.shutdown(socket.SHUT_RDWR) sock.close() class ClientProto(asyncio.Protocol): def __init__(self, on_data, on_eof): self.on_data = on_data self.on_eof = on_eof self.con_made_cnt = 0 def connection_made(proto, tr): proto.con_made_cnt += 1 # Ensure connection_made gets called only once. self.assertEqual(proto.con_made_cnt, 1) def data_received(self, data): self.on_data.set_result(data) def eof_received(self): self.on_eof.set_result(True) async def client(addr): await asyncio.sleep(0.5) on_data = self.loop.create_future() on_eof = self.loop.create_future() tr, proto = await self.loop.create_connection( lambda: ClientProto(on_data, on_eof), *addr) tr.write(HELLO_MSG) new_tr = await self.loop.start_tls(tr, proto, client_context) self.assertEqual(await on_data, b'O') new_tr.write(HELLO_MSG) await on_eof new_tr.close() with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: self.loop.run_until_complete( asyncio.wait_for(client(srv.addr), timeout=support.SHORT_TIMEOUT)) # No garbage is left if SSL is closed uncleanly client_context = weakref.ref(client_context) support.gc_collect() self.assertIsNone(client_context())
def wait_until_full(self): return asyncio.wait_for(self._full.wait(), timeout=15)
def main(): server = DpowServer() async def startup(app): logger.info("Server starting") if config.debug: logger.warn("Debug mode is on") try: await server.setup() asyncio.ensure_future(server.loop(), loop=loop) except Exception as e: logger.critical(e) await server.close() sys.exit(1) async def cleanup(app): logger.info("Server shutting down") await server.close() # use websockets or callback from the node app_blocks = None if config.enable_precache and not config.websocket_uri: app_blocks = web.Application( middlewares=[web.normalize_path_middleware()]) app_blocks.router.add_post('/block/', server.block_arrival_cb_handler) handler_blocks = app_blocks.make_handler() coroutine_blocks = loop.create_server(handler_blocks, "0.0.0.0", 5040) server_blocks = loop.run_until_complete(coroutine_blocks) # endpoint for a permanent connection to services via websockets app_ws = web.Application(middlewares=[web.normalize_path_middleware()]) app_ws.router.add_get('/service_ws/', server.service_ws_handler) handler_ws = app_ws.make_handler() coroutine_ws = loop.create_server(handler_ws, "0.0.0.0", 5035) server_ws = loop.run_until_complete(coroutine_ws) # endpoint for checking if server is up and if blocks are being received app_upcheck = web.Application( middlewares=[web.normalize_path_middleware()]) def upcheck_handler(request): return web.Response(text="up") app_upcheck.router.add_get('/upcheck/', upcheck_handler) app_upcheck.router.add_get('/upcheck/blocks/', server.upcheck_blocks_handler) handler_upcheck = app_upcheck.make_handler() coroutine_upcheck = loop.create_server(handler_upcheck, "0.0.0.0", 5031) server_upcheck = loop.run_until_complete(coroutine_upcheck) # endpoint for service requests app_services = web.Application( middlewares=[web.normalize_path_middleware()]) app_services.on_startup.append(startup) app_services.on_cleanup.append(cleanup) app_services.router.add_post('/service/', server.service_post_handler) try: if config.web_path: # aiohttp does not allow setting group write permissions on the created socket by default, so a custom socket is created sock = get_socket(config.web_path) web.run_app(app_services, host="0.0.0.0", port=5030, sock=sock) else: web.run_app(app_services, host="0.0.0.0", port=5030) except KeyboardInterrupt: loop.stop() finally: if not loop.is_closed(): if app_blocks: server_blocks.close() loop.run_until_complete(handler_blocks.shutdown(5.0)) server_ws.close() loop.run_until_complete(handler_ws.shutdown(5.0)) server_upcheck.close() loop.run_until_complete(handler_upcheck.shutdown(5.0)) remaining_tasks = asyncio.Task.all_tasks() loop.run_until_complete( asyncio.wait_for(asyncio.gather(*remaining_tasks), timeout=10)) loop.close()
def test_start_tls_client_buf_proto_1(self): HELLO_MSG = b'1' * self.PAYLOAD_SIZE server_context = test_utils.simple_server_sslcontext() client_context = test_utils.simple_client_sslcontext() client_con_made_calls = 0 def serve(sock): sock.settimeout(self.TIMEOUT) data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.start_tls(server_context, server_side=True) sock.sendall(b'O') data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.sendall(b'2') data = sock.recv_all(len(HELLO_MSG)) self.assertEqual(len(data), len(HELLO_MSG)) sock.shutdown(socket.SHUT_RDWR) sock.close() class ClientProtoFirst(asyncio.BufferedProtocol): def __init__(self, on_data): self.on_data = on_data self.buf = bytearray(1) def connection_made(self, tr): nonlocal client_con_made_calls client_con_made_calls += 1 def get_buffer(self, sizehint): return self.buf def buffer_updated(slf, nsize): self.assertEqual(nsize, 1) slf.on_data.set_result(bytes(slf.buf[:nsize])) class ClientProtoSecond(asyncio.Protocol): def __init__(self, on_data, on_eof): self.on_data = on_data self.on_eof = on_eof self.con_made_cnt = 0 def connection_made(self, tr): nonlocal client_con_made_calls client_con_made_calls += 1 def data_received(self, data): self.on_data.set_result(data) def eof_received(self): self.on_eof.set_result(True) async def client(addr): await asyncio.sleep(0.5) on_data1 = self.loop.create_future() on_data2 = self.loop.create_future() on_eof = self.loop.create_future() tr, proto = await self.loop.create_connection( lambda: ClientProtoFirst(on_data1), *addr) tr.write(HELLO_MSG) new_tr = await self.loop.start_tls(tr, proto, client_context) self.assertEqual(await on_data1, b'O') new_tr.write(HELLO_MSG) new_tr.set_protocol(ClientProtoSecond(on_data2, on_eof)) self.assertEqual(await on_data2, b'2') new_tr.write(HELLO_MSG) await on_eof new_tr.close() # connection_made() should be called only once -- when # we establish connection for the first time. Start TLS # doesn't call connection_made() on application protocols. self.assertEqual(client_con_made_calls, 1) with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: self.loop.run_until_complete( asyncio.wait_for(client(srv.addr), timeout=self.TIMEOUT))
def wrapper(cls, *args, **kwargs): coro = f(cls, *args, **kwargs) timeout = getattr(cls, name) return asyncio.wait_for(coro, timeout, loop=cls.loop)
def test_subscribe_account_trades_structure(self): """ Check if response match expected dict structure. :return: """ expect_subscription: dict = {'id': str, 'result': [str]} expect: dict = { "channel": str, "result": [{ 'base_precision': int, 'quote_precision': int, 'fee_precision': int, 'order_id': str, 'market': str, 'side': str, 'quantity': str, 'price': str, 'fee_amount': str, 'fee_denom': str, 'address': str, 'block_height': str, 'block_created_at': str, 'id': int }] } # connect to websocket client = DemexWebsocket(uri=MAINNET_WS_URI) # little work around to save the response self.response: List[dict] = [] async def on_connect(): # use AMM to be sure deterministic of which tokens the wallet holds await client.subscribe_account_trades('balance', WALLET_SWTH_ETH1_AMM) async def on_message(message: dict): # save response into self self.response.append(message) try: loop = asyncio.get_event_loop() loop.run_until_complete( asyncio.wait_for( client.connect(on_connect_callback=on_connect, on_receive_message_callback=on_message), WEBSOCKET_TIMEOUT_SUBSCRIPTION)) except asyncio.TimeoutError: loop = asyncio.get_event_loop() loop.run_until_complete(client.disconnect()) if not self.response: raise RuntimeError("Did not receive a response.") if len(self.response) < 2: self.skipTest( f"Did not receive orders within time, test can not finish.") channel_subscription: dict = self.response[0] self.assertDictStructure(expect_subscription, channel_subscription) for message in self.response[1:]: # if this fails, check if the AMM wallet own other tokens as expected self.assertDictStructure(expect, message)
def _test_repeated_pulses_with_rule(self, config, pulse_ms, pause_min, pause_max): latency = [] rule_latency = [] pulse_duration = [] rule_pulse_duration = [] timeouts = 0 config["flipper"].enable() for _ in range(100): # measure coil -> input latency pulse_start = time.time() config["coil1"].pulse(pulse_ms=pulse_ms) try: self.mpf.clock.loop.run_until_complete( asyncio.wait_for( self.mpf.switch_controller.wait_for_switch( config["switch1"], state=1, only_on_change=False), timeout=.5)) switch_active = time.time() self.mpf.clock.loop.run_until_complete( asyncio.wait_for( self.mpf.switch_controller.wait_for_switch( config["switch2"], state=1, only_on_change=False), timeout=.5)) switch2_active = time.time() self.mpf.clock.loop.run_until_complete( asyncio.wait_for( self.mpf.switch_controller.wait_for_switch( config["switch1"], state=0, only_on_change=False), timeout=.5)) switch_inactive = time.time() self.mpf.clock.loop.run_until_complete( asyncio.wait_for( self.mpf.switch_controller.wait_for_switch( config["switch2"], state=0, only_on_change=False), timeout=.5)) switch2_inactive = time.time() except asyncio.TimeoutError: print( "WARNING: Ran into timeout while waiting. Check your setup!" ) timeouts += 1 continue self.mpf.clock.loop.run_until_complete( asyncio.sleep( random.uniform(pause_min * 0.001, pause_max * 0.001))) latency.append((switch_active - pulse_start) * 1000) rule_latency.append((switch2_active - switch_active) * 1000) pulse_duration.append((switch_inactive - switch_active) * 1000) rule_pulse_duration.append( (switch2_inactive - switch2_active) * 1000) print( "----------------------------------------------------------------------------------------" ) print("Pulse duration: {}ms Pause: {}ms to {}ms".format( pulse_ms, pause_min, pause_max)) print( "Latency mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f} stdev: {:.2f} variance: {:.2f}" .format(statistics.mean(latency), statistics.median(latency), min(latency), max(latency), statistics.stdev(latency), statistics.variance(latency))) print( "Rule Latency mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f} stdev: {:.2f} variance: {:.2f}" .format(statistics.mean(rule_latency), statistics.median(rule_latency), min(rule_latency), max(rule_latency), statistics.stdev(rule_latency), statistics.variance(rule_latency))) print( "Pulse duration measured mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f} stdev: {:.2f} " "variance: {:.2f}".format(statistics.mean(pulse_duration), statistics.median(pulse_duration), min(pulse_duration), max(pulse_duration), statistics.stdev(pulse_duration), statistics.variance(pulse_duration))) print( "Rule Pulse duration measured mean: {:.2f} median: {:.2f} min: {:.2f} max: {:.2f} stdev: {:.2f} " "variance: {:.2f}".format( statistics.mean(rule_pulse_duration), statistics.median(rule_pulse_duration), min(rule_pulse_duration), max(rule_pulse_duration), statistics.stdev(rule_pulse_duration), statistics.variance(rule_pulse_duration))) if timeouts: print( "Warning: Experienced {} timeouts during benchmark. Check your setup!" .format(timeouts)) print( "----------------------------------------------------------------------------------------" ) print() config["flipper"].disable()
def run_application(component: Union[Component, Dict[str, Any]], *, event_loop_policy: str = None, max_threads: int = None, logging: Union[Dict[str, Any], int, None] = INFO, start_timeout: Union[int, float, None] = 10): """ Configure logging and start the given root component in the default asyncio event loop. Assuming the root component was started successfully, the event loop will continue running until the process is terminated. Initializes the logging system first based on the value of ``logging``: * If the value is a dictionary, it is passed to :func:`logging.config.dictConfig` as argument. * If the value is an integer, it is passed to :func:`logging.basicConfig` as the logging level. * If the value is ``None``, logging setup is skipped entirely. By default, the logging system is initialized using :func:`~logging.basicConfig` using the ``INFO`` logging level. The default executor in the event loop is replaced with a new :class:`~concurrent.futures.ThreadPoolExecutor` where the maximum number of threads is set to the value of ``max_threads`` or, if omitted, the default value of :class:`~concurrent.futures.ThreadPoolExecutor`. :param component: the root component (either a component instance or a configuration dictionary where the special ``type`` key is either a component class or a ``module:varname`` reference to one) :param event_loop_policy: entry point name (from the ``asphalt.core.event_loop_policies`` namespace) of an alternate event loop policy (or a module:varname reference to one) :param max_threads: the maximum number of worker threads in the default thread pool executor (the default value depends on the event loop implementation) :param logging: a logging configuration dictionary, :ref:`logging level <python:levels>` or ``None`` :param start_timeout: seconds to wait for the root component (and its subcomponents) to start up before giving up (``None`` = wait forever) """ assert check_argument_types() # Configure the logging system if isinstance(logging, dict): dictConfig(logging) elif isinstance(logging, int): basicConfig(level=logging) # Inform the user whether -O or PYTHONOPTIMIZE was set when Python was launched logger = getLogger(__name__) logger.info('Running in %s mode', 'development' if __debug__ else 'production') # Switch to an alternate event loop policy if one was provided if event_loop_policy: create_policy = policies.resolve(event_loop_policy) policy = create_policy() asyncio.set_event_loop_policy(policy) logger.info('Switched event loop policy to %s', qualified_name(policy)) # Assign a new default executor with the given max worker thread limit if one was provided event_loop = asyncio.get_event_loop() if max_threads is not None: event_loop.set_default_executor(ThreadPoolExecutor(max_threads)) logger.info('Installed a new thread pool executor with max_workers=%d', max_threads) # Instantiate the root component if a dict was given if isinstance(component, dict): component = component_types.create_object(**component) logger.info('Starting application') context = Context() exception = None # type: BaseException exit_code = 0 # Start the root component try: coro = asyncio.wait_for(component.start(context), start_timeout, loop=event_loop) event_loop.run_until_complete(coro) except asyncio.TimeoutError as e: exception = e logger.error('Timeout waiting for the root component to start') exit_code = 1 except Exception as e: exception = e logger.exception('Error during application startup') exit_code = 1 else: logger.info('Application started') # Enable the component tree to be garbage collected del component # Add a signal handler to gracefully deal with SIGTERM try: event_loop.add_signal_handler(signal.SIGTERM, sigterm_handler, logger, event_loop) except NotImplementedError: pass # Windows does not support signals very well # Finally, run the event loop until the process is terminated or Ctrl+C is pressed try: event_loop.run_forever() except KeyboardInterrupt: pass except SystemExit as e: exit_code = e.code # Close the root context logger.info('Stopping application') event_loop.run_until_complete(context.close(exception)) # Shut down leftover async generators (requires Python 3.6+) try: event_loop.run_until_complete(event_loop.shutdown_asyncgens()) except (AttributeError, NotImplementedError): pass # Finally, close the event loop itself event_loop.close() logger.info('Application stopped') # Shut down the logging system shutdown() if exit_code: sys.exit(exit_code)
def blocking_task(self, loop, executor, was_invoked): logging.debug('start blocking task()') fut = loop.run_in_executor(executor, self.blocking_func, was_invoked) yield from asyncio.wait_for(fut, timeout=5.0) logging.debug('start blocking task()')