def test_streamreader_constructor(self): self.addCleanup(asyncio.set_event_loop, None) asyncio.set_event_loop(self.loop) reader = asyncio.StreamReader() self.assertIs(reader._loop, self.loop)
async def test(): return asyncio.StreamReader()
def test___repr__nondefault_limit(self): stream = asyncio.StreamReader(loop=self.loop, limit=123) self.assertEqual("<StreamReader limit=123>", repr(stream))
def test_readline_empty_eof(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_eof() line = self.loop.run_until_complete(stream.readline()) self.assertEqual(b'', line)
def test_readexactly_limit(self): stream = asyncio.StreamReader(limit=3, loop=self.loop) stream.feed_data(b'chunk') data = self.loop.run_until_complete(stream.readexactly(5)) self.assertEqual(b'chunk', data) self.assertEqual(b'', stream._buffer)
def on_stream_start(self, i): self._data[i] = asyncio.StreamReader(loop=self.loop) self._push[i] = Channel(loop=self.loop)
def test_feed_nonempty_data(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(self.DATA) self.assertEqual(self.DATA, stream._buffer)
async def test_23_get_from_stream_header_too_long(self): data = '{0[0]} {0[1]}\n'.format(self.size).encode() + self.rgba # 2x2 reader = asyncio.StreamReader() reader.feed_data(b'x' * 20 + b'\n') with self.assertRaisesRegexp(ValueError, 'Header too long'): image = await qubesimgconverter.Image.get_from_stream_async(reader)
async def get_directly_linked_peers_without_handshake(peer1_class=LESPeer, peer1_headerdb=None, peer2_class=LESPeer, peer2_headerdb=None): """See get_directly_linked_peers(). Neither the P2P handshake nor the sub-protocol handshake will be performed here. """ cancel_token = CancelToken("get_directly_linked_peers_without_handshake") if peer1_headerdb is None: peer1_headerdb = get_fresh_mainnet_headerdb() if peer2_headerdb is None: peer2_headerdb = get_fresh_mainnet_headerdb() peer1_private_key = ecies.generate_privkey() peer2_private_key = ecies.generate_privkey() peer1_remote = kademlia.Node(peer2_private_key.public_key, kademlia.Address('0.0.0.0', 0, 0)) peer2_remote = kademlia.Node(peer1_private_key.public_key, kademlia.Address('0.0.0.0', 0, 0)) initiator = auth.HandshakeInitiator(peer1_remote, peer1_private_key, cancel_token) peer2_reader = asyncio.StreamReader() peer1_reader = asyncio.StreamReader() # Link the peer1's writer to the peer2's reader, and the peer2's writer to the # peer1's reader. peer2_writer = type("mock-streamwriter", (object, ), { "write": peer1_reader.feed_data, "close": lambda: None }) peer1_writer = type("mock-streamwriter", (object, ), { "write": peer2_reader.feed_data, "close": lambda: None }) peer1, peer2 = None, None handshake_finished = asyncio.Event() async def do_handshake(): nonlocal peer1 aes_secret, mac_secret, egress_mac, ingress_mac = await auth._handshake( initiator, peer1_reader, peer1_writer, cancel_token) peer1 = peer1_class(remote=peer1_remote, privkey=peer1_private_key, reader=peer1_reader, writer=peer1_writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=peer1_headerdb, network_id=1) handshake_finished.set() asyncio.ensure_future(do_handshake()) responder = auth.HandshakeResponder(peer2_remote, peer2_private_key, cancel_token) auth_cipher = await peer2_reader.read(constants.ENCRYPTED_AUTH_MSG_LEN) initiator_ephemeral_pubkey, initiator_nonce, _ = decode_authentication( auth_cipher, peer2_private_key) responder_nonce = keccak(os.urandom(constants.HASH_LEN)) auth_ack_msg = responder.create_auth_ack_message(responder_nonce) auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg) peer2_writer.write(auth_ack_ciphertext) await handshake_finished.wait() aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets( initiator_nonce, responder_nonce, initiator_ephemeral_pubkey, auth_cipher, auth_ack_ciphertext) assert egress_mac.digest() == peer1.ingress_mac.digest() assert ingress_mac.digest() == peer1.egress_mac.digest() peer2 = peer2_class(remote=peer2_remote, privkey=peer2_private_key, reader=peer2_reader, writer=peer2_writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=peer2_headerdb, network_id=1) return peer1, peer2
def sslwrap(reader, writer, sslcontext, server_side=False, server_hostname=None, verbose=None): if sslcontext is None: return reader, writer ssl_reader = asyncio.StreamReader() class Protocol(asyncio.Protocol): def data_received(self, data): ssl_reader.feed_data(data) def eof_received(self): ssl_reader.feed_eof() def connection_lost(self, exc): ssl_reader.feed_eof() ssl = asyncio.sslproto.SSLProtocol(asyncio.get_event_loop(), Protocol(), sslcontext, None, server_side, server_hostname, False) class Transport(asyncio.Transport): _paused = False def __init__(self, extra={}): self._extra = extra self.closed = False def write(self, data): if data and not self.closed: writer.write(data) def close(self): self.closed = True writer.close() def _force_close(self, exc): if not self.closed: (verbose or print)(f'{exc} from {writer.get_extra_info("peername")[0]}') self.close() def abort(self): self.close() ssl.connection_made(Transport()) async def channel(): try: while True: data = await reader.read_() if not data: break ssl.data_received(data) except Exception: pass finally: ssl.eof_received() asyncio.ensure_future(channel()) class Writer(): def get_extra_info(self, key): return writer.get_extra_info(key) def write(self, data): ssl._app_transport.write(data) def drain(self): return writer.drain() def close(self): ssl._app_transport.close() return ssl_reader, Writer()
def __init__(self, *, host=None, port=None, secure=None, timeout=10, max_size=2**20, max_queue=2**5, read_limit=2**16, write_limit=2**16, loop=None, legacy_recv=False): self.host = host self.port = port self.secure = secure self.timeout = timeout self.max_size = max_size self.max_queue = max_queue self.read_limit = read_limit self.write_limit = write_limit # Store a reference to loop to avoid relying on self._loop, a private # attribute of StreamReaderProtocol, inherited from _FlowControlMixin. if loop is None: loop = asyncio.get_event_loop() self.loop = loop self.legacy_recv = legacy_recv # Configure read buffer limits. The high-water limit is defined by # ``self.read_limit``. The ``limit`` argument controls the line length # limit and half the buffer limit of :class:`~asyncio.StreamReader`. # That's why it must be set to half of ``self.read_limit``. stream_reader = asyncio.StreamReader(limit=read_limit // 2, loop=loop) super().__init__(stream_reader, self.client_connected, loop) self.reader = None self.writer = None self._drain_lock = asyncio.Lock(loop=loop) self.path = None self.request_headers = None self.raw_request_headers = None self.response_headers = None self.raw_response_headers = None self.extensions = [] self.subprotocol = None # Code and reason must be set when the closing handshake completes. self.close_code = None self.close_reason = '' # Futures tracking steps in the connection's lifecycle. # Set to True when the opening handshake has completed properly. self.opening_handshake = asyncio.Future(loop=loop) # Set to True when the closing handshake has completed properly and to # False when the connection terminates abnormally. self.closing_handshake = asyncio.Future(loop=loop) # Set to None when the connection state becomes CLOSED. self.connection_closed = asyncio.Future(loop=loop) # Queue of received messages. self.messages = asyncio.queues.Queue(max_queue, loop=loop) # Mapping of ping IDs to waiters, in chronological order. self.pings = collections.OrderedDict() # Task managing the connection, initalized in self.client_connected. self.worker_task = None # In a subclass implementing the opening handshake, the state will be # CONNECTING at this point. if self.state == OPEN: self.opening_handshake.set_result(True)
async def read_pipe(pipe): reader = asyncio.StreamReader() read_protocol = asyncio.StreamReaderProtocol(reader) read_transport, _ = await loop.connect_read_pipe( lambda: read_protocol, pipe) return reader
async def open_connection(host, port, loop, create_connection): reader = asyncio.StreamReader(loop=loop) protocol = asyncio.StreamReaderProtocol(reader, loop=loop) transport, _ = await create_connection(lambda: protocol, host, port) writer = asyncio.StreamWriter(transport, protocol, reader, loop) return reader, writer
def setUp(self): super().setUp() self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.stream = asyncio.StreamReader()
def __init__(self, *, host=None, port=None, secure=None, ping_interval=20, ping_timeout=20, close_timeout=None, max_size=2**20, max_queue=2**5, read_limit=2**16, write_limit=2**16, loop=None, legacy_recv=False, timeout=10): # Backwards-compatibility: close_timeout used to be called timeout. # If both are specified, timeout is ignored. if close_timeout is None: close_timeout = timeout self.host = host self.port = port self.secure = secure self.ping_interval = ping_interval self.ping_timeout = ping_timeout self.close_timeout = close_timeout self.max_size = max_size self.max_queue = max_queue self.read_limit = read_limit self.write_limit = write_limit # Store a reference to loop to avoid relying on self._loop, a private # attribute of StreamReaderProtocol, inherited from FlowControlMixin. if loop is None: loop = asyncio.get_event_loop() self.loop = loop self.legacy_recv = legacy_recv # Configure read buffer limits. The high-water limit is defined by # ``self.read_limit``. The ``limit`` argument controls the line length # limit and half the buffer limit of :class:`~asyncio.StreamReader`. # That's why it must be set to half of ``self.read_limit``. stream_reader = asyncio.StreamReader(limit=read_limit // 2, loop=loop) super().__init__(stream_reader, self.client_connected, loop) self.reader = None self.writer = None self._drain_lock = asyncio.Lock(loop=loop) # This class implements the data transfer and closing handshake, which # are shared between the client-side and the server-side. # Subclasses implement the opening handshake and, on success, execute # :meth:`connection_open()` to change the state to OPEN. self.state = State.CONNECTING logger.debug("%s - state = CONNECTING", self.side) # HTTP protocol parameters. self.path = None self.request_headers = None self.response_headers = None # WebSocket protocol parameters. self.extensions = [] self.subprotocol = None # The close code and reason are set when receiving a close frame or # losing the TCP connection. self.close_code = None self.close_reason = "" # Completed when the connection state becomes CLOSED. Translates the # :meth:`connection_lost()` callback to a :class:`~asyncio.Future` # that can be awaited. (Other :class:`~asyncio.Protocol` callbacks are # translated by ``self.stream_reader``). self.connection_lost_waiter = asyncio.Future(loop=loop) # Queue of received messages. self.messages = collections.deque() self._pop_message_waiter = None self._put_message_waiter = None # Mapping of ping IDs to waiters, in chronological order. self.pings = collections.OrderedDict() # Task running the data transfer. self.transfer_data_task = None # Exception that occurred during data transfer, if any. self.transfer_data_exc = None # Task sending keepalive pings. self.keepalive_ping_task = None # Task closing the TCP connection. self.close_connection_task = None
async def create_stdin_reader() -> StreamReader: stream_reader = asyncio.StreamReader() protocol = asyncio.StreamReaderProtocol(stream_reader) loop = asyncio.get_running_loop() await loop.connect_read_pipe(lambda: protocol, sys.stdin) return stream_reader
def _get_empty_buffer(self, _: bool) -> asyncio.StreamReader: return asyncio.StreamReader()
async def connect(self): loop = asyncio.get_event_loop() self._reader = asyncio.StreamReader(loop=loop) self._read_transport, _ = await loop.connect_read_pipe( lambda: asyncio.StreamReaderProtocol(self._reader), self._read_pipe)
def test_feed_empty_data(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'') self.assertEqual(b'', stream._buffer)
async def prepare_pipeline(self): self.loop = asyncio.get_event_loop() self.reader = asyncio.StreamReader() self.protocol = asyncio.StreamReaderProtocol(self.reader) await self.loop.connect_read_pipe(lambda: self.protocol, sys.stdin)
def test_invalid_limit(self): with self.assertRaisesRegex(ValueError, 'imit'): asyncio.StreamReader(limit=0, loop=self.loop) with self.assertRaisesRegex(ValueError, 'imit'): asyncio.StreamReader(limit=-1, loop=self.loop)
def test_ctor_global_loop(self, m_events): stream = asyncio.StreamReader() self.assertIs(stream._loop, m_events.get_event_loop.return_value)
def test_readuntil_separator(self): stream = asyncio.StreamReader(loop=self.loop) with self.assertRaisesRegex(ValueError, 'Separator should be'): self.loop.run_until_complete(stream.readuntil(separator=b''))
def test___repr__data(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'data') self.assertEqual("<StreamReader 4 bytes>", repr(stream))
def test_streamreader_constructor_without_loop(self): with self.assertWarns(DeprecationWarning) as cm: with self.assertRaisesRegex(RuntimeError, 'There is no current event loop'): asyncio.StreamReader() self.assertEqual(cm.warnings[0].filename, __file__)
def test___repr__exception(self): stream = asyncio.StreamReader(loop=self.loop) exc = RuntimeError() stream.set_exception(exc) self.assertEqual("<StreamReader e=RuntimeError()>", repr(stream))
def test___repr__(self): stream = asyncio.StreamReader(loop=self.loop) self.assertEqual("<StreamReader>", repr(stream))
def test___repr__transport(self): stream = asyncio.StreamReader(loop=self.loop) stream._transport = mock.Mock() stream._transport.__repr__ = mock.Mock() stream._transport.__repr__.return_value = "<Transport>" self.assertEqual("<StreamReader t=<Transport>>", repr(stream))
def test___repr__eof(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_eof() self.assertEqual("<StreamReader eof>", repr(stream))
def test_exception(self): stream = asyncio.StreamReader(loop=self.loop) self.assertIsNone(stream.exception()) exc = ValueError() stream.set_exception(exc) self.assertIs(stream.exception(), exc)