async def relay(
        dreader: asyncio.StreamReader,
        dwriter: asyncio.StreamWriter,
        ureader: asyncio.StreamReader,
        uwriter: asyncio.StreamWriter,
) -> None:
    """Pass data/EOF from dreader to uwriter, and ureader to dwriter.

    Both writers are ensured to be closed upon exiting this function.
    """
    _logger.debug(
        'Relaying %r <=> %r', dwriter.get_extra_info('peername'),
        uwriter.get_extra_info('peername'))
    utask = asyncio.create_task(_relay_data_side(dreader, uwriter))
    dtask = asyncio.create_task(_relay_data_side(ureader, dwriter))
    async with contexts.aclosing_multiple_writers(dwriter, uwriter):
        try:
            await asyncio.gather(utask, dtask)
            _logger.debug(
                'Relay %r <=> %r ended normally',
                dwriter.get_extra_info('peername'),
                uwriter.get_extra_info('peername'))
        except:
            dtask.cancel()
            utask.cancel()
            raise
        finally:
            await asyncio.wait({dtask, utask})
            for t in (dtask, utask):
                if t.exception():
                    _logger.debug(
                        'Relay task %r caught exception %r', t, t.exception())
示例#2
0
    def test_put(self, loop):
        result = 39
        result_queue = Queue()
        called_write = Queue()
        called_write_eof = Queue()
        writer = StreamWriter(None, None, None, None)
        unix_socket = UnixSocket(None, loop)
        unix_socket.writer = writer

        def write(data):
            called_write.put(True)

            if data != b'\n':
                result_queue.put(unix_socket.decode(data))

        def write_eof():
            called_write_eof.put(True)

        writer.write = write
        writer.write_eof = write_eof

        async def run():
            unix_socket.ready.set()
            await unix_socket.put(result)

        loop.run_until_complete(run())
        check_queue_multi(called_write, [True] * 2)
        check_queue(result_queue, result)
async def handle_client_connection(
        adapter: adapters.ClientAdapter,
        transport: str,
        upstream_host: str,
        upstream_port: int,
        args: Dict[str, str],
        reader: asyncio.StreamReader,
        writer: asyncio.StreamWriter,
) -> None:
    handler_logger.debug(
        'Accepted connection for transport %s from %r on %r',
        transport,
        writer.get_extra_info('peername'), writer.get_extra_info('sockname'))
    async with contexts.log_unhandled_exc(handler_logger), \
               contexts.aclosing_multiple_writers(writer) as writers:
        try:
            ureader, uwriter = await adapter.open_transport_connection(
                transport, upstream_host, upstream_port, args)
        except exceptions.PTConnectError as e:
            handler_logger.warning(
                'PT reported error while connecting to upstream '
                '(%r, %r): %r', upstream_host, upstream_port, e)
            writer.transport.abort()
            return
        writers.add(uwriter)
        logname = (f'{writer.get_extra_info("peername")!r} ==> '
                   f'({upstream_host!r}, {upstream_port})')
        handler_logger.info('[%s] %s', transport, logname)
        try:
            await relays.relay(reader, writer, ureader, uwriter)
        except OSError as e:
            handler_logger.warning(
                '[%s] %s caught %r', transport, logname, e)
async def negotiate_socks4_userid(
        reader: asyncio.StreamReader,
        writer: asyncio.StreamWriter,
        host: Union[str, ipaddress.IPv4Address, ipaddress.IPv6Address],
        port: int,
        args: Optional[Dict[str, str]],
) -> None:
    try:
        host = ipaddress.IPv4Address(host)
    except ValueError:
        raise ValueError('SOCKS4 only supports IPv4 address')
    if args:
        args_bytes = encode_args(args)
    else:
        args_bytes = b''
    writer.write(b''.join((
        b'\x04',  # ver
        enums.SOCKS4Command.CONNECT,
        port.to_bytes(2, 'big'),
        host.packed,
        args_bytes,
        b'\0',
    )))
    buf = await reader.readexactly(8)
    assert buf[0] == 0, 'Invalid SOCKS4 reply version'
    reply = enums.SOCKS4Reply(buf[1:2])
    if reply is not enums.SOCKS4Reply.GRANTED:
        raise exceptions.PTSOCKS4ConnectError(reply)
示例#5
0
文件: __init__.py 项目: icgood/pymap
 async def __call__(self, reader: StreamReader,
                    writer: StreamWriter) -> None:
     conn = ManageSieveConnection(self._config, reader, writer)
     try:
         await conn.run(self._login)
     finally:
         writer.close()
示例#6
0
    async def send(self, writer: asyncio.StreamWriter):
        """
        Future to send a message over the WebSocket
        :param writer: StreamWriter used to send the message
        :return:
        """
        opcode = self.opcode
        data = self.data

        frame = bytearray()
        head = 0b00000000
        head |= 0b10000000
        head |= opcode
        frame.append(head)
        next_byte = 0b00000000
        if data:
            payload_length = len(data)
        else:
            payload_length = 0
        if 65535 >= payload_length >= 126:
            next_byte |= 126
            extended_bytes = struct.pack("!H", payload_length)
        elif payload_length > 65535:
            next_byte |= 127
            extended_bytes = struct.pack("!Q", payload_length)
        else:
            next_byte |= payload_length
            extended_bytes = None
        frame.append(next_byte)
        if extended_bytes:
            frame.extend(extended_bytes)
        if data:
            frame.extend(data)
        writer.write(frame)
        await writer.drain()
示例#7
0
文件: auth.py 项目: firefox0x/py-evm
async def _handshake(initiator: 'HandshakeInitiator', reader: asyncio.StreamReader,
                     writer: asyncio.StreamWriter
                     ) -> Tuple[bytes, bytes, PreImage, PreImage]:
    """See the handshake() function above.

    This code was factored out into this helper so that we can create Peers with directly
    connected readers/writers for our tests.
    """
    initiator_nonce = keccak(os.urandom(HASH_LEN))
    auth_msg = initiator.create_auth_message(initiator_nonce)
    auth_init = initiator.encrypt_auth_message(auth_msg)
    writer.write(auth_init)

    auth_ack = await reader.read(ENCRYPTED_AUTH_ACK_LEN)

    ephemeral_pubkey, responder_nonce = initiator.decode_auth_ack_message(auth_ack)
    aes_secret, mac_secret, egress_mac, ingress_mac = initiator.derive_secrets(
        initiator_nonce,
        responder_nonce,
        ephemeral_pubkey,
        auth_init,
        auth_ack
    )

    return aes_secret, mac_secret, egress_mac, ingress_mac
示例#8
0
文件: server.py 项目: chfoo/wpull
    def __call__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
        '''Handle a request

        Coroutine.'''
        _logger.debug('New proxy connection.')
        session = self._new_session(reader, writer)
        self.event_dispatcher.notify(self.Event.begin_session, session)
        is_error = False

        try:
            yield from session()
        except Exception as error:
            if not isinstance(error, StopIteration):
                error = True
                if isinstance(error, (ConnectionAbortedError,
                                      ConnectionResetError)):
                    # Client using the proxy has closed the connection
                    _logger.debug('Proxy error', exc_info=True)
                else:
                    _logger.exception('Proxy error')
                writer.close()
            else:
                raise
        finally:
            self.event_dispatcher.notify(self.Event.end_session, session,
                                         error=is_error)

        writer.close()
        _logger.debug('Proxy connection closed.')
示例#9
0
文件: __init__.py 项目: icgood/pymap
 async def __call__(self, reader: StreamReader,
                    writer: StreamWriter) -> None:
     conn = IMAPConnection(self.commands, self._config, reader, writer)
     state = ConnectionState(self._login, self._config)
     try:
         await conn.run(state)
     finally:
         writer.close()
 async def _write_ext_msg(
         writer: asyncio.StreamWriter,
         command: bytes,
         body: bytes,
 ) -> None:
     assert len(command) == 2
     body_len = len(body).to_bytes(2, 'big')
     writer.write(command + body_len + body)
     await writer.drain()
示例#11
0
 async def _client_wrapper(self,
                           reader: StreamReader,
                           writer: StreamWriter) -> None:
     try:
         return await self._callback(
             reader=reader, writer=writer,
         )
     except asyncio.CancelledError:
         pass
     finally:
         writer.close()
async def _relay_data_side(
        reader: asyncio.StreamReader,
        writer: asyncio.StreamWriter,
) -> None:
    """Pass data and EOF from reader to writer."""
    while True:
        buf = await reader.read(BUF_SIZE)
        if not buf:  # EOF
            break
        writer.write(buf)
        await writer.drain()
    writer.write_eof()
    await writer.drain()
    async def _ext_or_port_handler(
            self,
            reader: asyncio.StreamReader,
            writer: asyncio.StreamWriter,
    ) -> None:
        # This callback function should not close writer when exiting. After
        # all, the API consumer may decide to stash reader and writer somewhere
        # for use later and return from their supplied callback function early.
        async with contexts.log_unhandled_exc(self._logger):
            try:
                auth_result = await self._authenticator.authenticate(
                    reader, writer)
            except (OSError, asyncio.IncompleteReadError) as e:
                self._logger.warning(
                    'Error during ExtOrPort SafeCookie authentication: %r', e)
                return
            if not auth_result:
                self._logger.warning(
                    'ExtOrPort SafeCookie authentication failed')
                return
            transport = host = port = None
            while True:
                command, body = await self._read_ext_msg(reader)
                if command == enums.ExtOrPortCommand.DONE:
                    break
                elif command == enums.ExtOrPortCommand.USERADDR:
                    host, port = str_utils.parse_hostport(body.decode('ascii'))
                    host = ipaddress.ip_address(host)
                elif command == enums.ExtOrPortCommand.TRANSPORT:
                    transport = body.decode('ascii')
                    str_utils.validate_transport_name(transport)
                else:
                    self._logger.info(
                        'Received unknown ExtOrPort command %r, body %r',
                        command, body)
            connection_info = ExtOrPortClientConnection(transport, host, port)
            if self._preconnect_cb is not None:
                accept = await self._preconnect_cb(connection_info)
            else:
                accept = True
            if not accept:
                await self._write_ext_msg(
                    writer, enums.ExtOrPortReply.DENY, b'')
                writer.write_eof()
                return
            await self._write_ext_msg(writer, enums.ExtOrPortReply.OKAY, b'')

            await self._cb(reader, writer, connection_info)
示例#14
0
def QueueSender(reader: asyncio, writer: asyncio.StreamWriter, queue_name: str):
    """
    A coroutine for pulling items from the Queue to the streams.
    """
    client = writer.get_extra_info("peername")
    sclient = ':'.join(str(_) for _ in client)
    while True:
        try:
            data = yield from reader.read(65536)
        except ConnectionResetError:
            rlogger.info("Client {} closed connection".format(sclient))
            return
        if not data:
            slogger.info("Client {} closed connection".format(sclient))
            return
        # Unpack data
        try:
            sub_data = msgpack.unpackb(data, encoding='utf-8')
        except (msgpack.UnpackException, ValueError) as e:
            slogger.error("Recieved non-msgpack pull from {}".format(sclient))
            continue
        action = sub_data.get("action", -1)
        if not action == 1:
            slogger.error("Recieved non-pull action on pull channel from client (action: {})"
                          .format(sclient, action))
            continue
        queue = queues[queue_name][1]
        assert isinstance(queue, asyncio.Queue)
        data = yield from queue.get()
        slogger.debug("Packing data {} for queue {}".format(data[1], queue_name))
        response = {"status": 0, "data": data[1], "msgnum": data[0]}
        msgpack.pack(response, writer)
    async def authenticate(
            self,
            reader: asyncio.StreamReader,
            writer: asyncio.StreamWriter,
    ) -> bool:
        """(async) Authenticate a connecting client.

        Returns:
            True if authentication is successful and False otherwise. The
            caller is responsible for closing the connection in case of
            failure.
        """
        writer.write(enums.ExtOrPortAuthTypes.SAFE_COOKIE
                     + enums.ExtOrPortAuthTypes.END_AUTH_TYPES)
        client_auth_type = await reader.readexactly(1)
        if client_auth_type != enums.ExtOrPortAuthTypes.SAFE_COOKIE:
            return False
        client_nonce = await reader.readexactly(self.nonce_len)
        server_nonce = secrets.token_bytes(self.nonce_len)
        server_hash = self.hash(b''.join((
            self.server_hash_header, client_nonce, server_nonce)))
        writer.write(server_hash + server_nonce)
        client_hash = await reader.readexactly(self.hash_len)
        result = hmac.compare_digest(client_hash, self.hash(b''.join((
            self.client_hash_header, client_nonce, server_nonce))))
        writer.write(int(result).to_bytes(1, 'big'))
        return result
示例#16
0
文件: commloop.py 项目: PJB3005/MoMMI
    async def handle_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
        data: bytes = await reader.read(2)  # Read ID.
        if data != b"\x30\x05":
            writer.write(ERROR_ID)
            return

        logger.info(f"Got ID packets: {data}.")

        auth: bytes = await reader.read(DIGEST_SIZE)
        logger.info(f"Got digest: {auth}.")

        length: int = struct.unpack("!I", await reader.read(4))[0]
        data = b""
        while len(data) < length:
            newdata = await reader.read(length - len(data))
            if len(newdata) == 0:
                break
            data += newdata
        logger.info(f"Got message ength: {length}, data: {data}.")
        try:
            logger.info(f"Decoded: {data.decode('UTF-8')}")
            message: Dict[str, Any] = json.loads(data.decode("UTF-8"))
            logger.info(f"Loaded: {message}")
            # Any of these will throw a KeyError with broken packets.
            message["type"], message["meta"], message["cont"]
        except:
            logger.exception("hrrm")
            writer.write(ERROR_PACK)
            return

        stomach: hmac.HMAC = hmac.new(AUTHKEY, data, sha512)
        if not hmac.compare_digest(stomach.digest(), auth):
            writer.write(ERROR_HMAC)
            return

        logger.info(message)
        writer.write(ERROR_OK)

        for event in events:
            try:
                await event(message)
            except:
                logger.exception("Caught exception inside commloop event handler.")
示例#17
0
def QueueWaiter(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, queue_name: str):
    """
    A coroutine for waiting upon new items to be placed into the Queue.
    """
    client = writer.get_extra_info("peername")
    sclient = ':'.join(str(_) for _ in client)
    while True:
        try:
            data = yield from reader.read(65536)
        except ConnectionResetError:
            rlogger.info("Client {} closed connection".format(sclient))
            return
        if not data:
            rlogger.info("Client {} closed connection".format(sclient))
            return
        # Unpack
        try:
            sub_data = msgpack.unpackb(data, encoding="utf-8")
        except (msgpack.UnpackException, ValueError) as e:
            rlogger.error("Recieved non-msgpack push from {}".format(sclient))
            continue
        rlogger.debug("Recieved data from client {}: {}".format(sclient, sub_data))
        assert isinstance(sub_data, dict)
        action = sub_data.get("action", -1)
        if not action == 0:
            rlogger.error("Recieved non-push action on push channel from client {} (action: {})"
                          .format(sclient, action))
            continue
        # Get data to place
        data = sub_data.get("data", None)
        if not data:
            rlogger.error("Recieved no data on push channel from client {}".format(sclient))
            continue
        # Increment and get message number
        queues[queue_name][0] += 1
        msgnum = queues[queue_name][0]
        queue = queues[queue_name][1]
        # Put it on the queue
        assert isinstance(queue, asyncio.Queue)
        yield from queue.put([msgnum, data])
        # Respond to the client
        response = {"msgnum": msgnum, "status": 0}
        rlogger.debug("Sending response with message number {}".format(msgnum))
        msgpack.pack(response, writer)
示例#18
0
    async def _accept(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
        addr = writer.get_extra_info('peername')
        peer = Peer(addr[0], addr[1])

        client = PeerTCPClient(self._our_peer_id, peer)

        try:
            info_hash = await client.accept(reader, writer)
            if info_hash not in self._torrent_managers:
                raise ValueError('Unknown info_hash')
        except Exception as e:
            client.close()

            if isinstance(e, asyncio.CancelledError):
                raise
            else:
                logger.debug("%s wasn't accepted because of %r", peer, e)
        else:
            self._torrent_managers[info_hash].accept_client(peer, client)
示例#19
0
def connected_cb(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
    """
    A callback for connected clients.
    """
    client = writer.get_extra_info("peername")
    sclient = ':'.join(str(_) for _ in client)
    logger.info("Recieved connection from {}:{}".format(*client))
    # Read a subscription message.
    try:
        sub = yield from reader.read(65536)
    except ConnectionResetError:
        rlogger.info("Client {} closed connection".format(sclient))
        return
    if not sub:
        logger.error("Client {} terminated connection abnormally".format(sclient))
        return
    try:
        sub_data = msgpack.unpackb(sub)
    except (msgpack.UnpackException, ValueError) as e:
        logger.error("Recieved unknown subscription message from {}:{}".format(*client))
        yield from writer.drain()
        writer.close()
        return
    # Get the data from the subscription message.
    if not b'queue' in sub_data:
        logger.error("Recieved null queue from {}".format(sclient))
        yield from writer.drain()
        writer.close()
        return
    queue_to_sub = sub_data[b"queue"]
    action = sub_data.get(b"action", 0)
    queue_created = False
    if queue_to_sub not in queues:
        queues[queue_to_sub] = [0, asyncio.Queue()]
        logger.debug("Created queue {}".format(queue_to_sub))
        queue_created = True
    logger.debug("Client {} subscribed to queue {} in mode {} ({})".format(sclient, queue_to_sub,
                                                                           action, "push" if not action else "pull"))
    if action == 0:
        loop.create_task(QueueWaiter(reader, writer, queue_to_sub))
    else:
        loop.create_task(QueueSender(reader, writer, queue_to_sub))
    msgpack.pack({"created": queue_created}, writer)
示例#20
0
 async def handle_client(self, reader: StreamReader, writer: StreamWriter) -> None:
     connection = h11.Connection(h11.SERVER)
     body = None  # type: StreamReader
     while True:
         data = await reader.read(65536)
         connection.receive_data(data)
         event = connection.next_event()
         if event is h11.NEED_DATA:
             continue
         elif isinstance(event, h11.Request):
             headers = CIMultiDict((key.decode('ascii'), value.decode('iso-8859-1'))
                                   for key, value in event.headers)
             peername = writer.get_extra_info('peername')
             peercert = writer.get_extra_info('peercert')
             parsed = urlparse(event.target, allow_fragments=False)
             query = unquote(parsed.query.decode('ascii'))
             request = HTTPRequest(
                 event.http_version.decode('ascii'), event.method.decode('ascii'),
                 parsed.path.decode('utf-8'), query, headers, body, bool(self.tls_context),
                 peername, peercert)
         elif isinstance(event, h11.Data):
             body.feed_data(event.data)
         elif isinstance(event, h11.EndOfMessage):
             body.feed_eof()
示例#21
0
async def send_msg(stream: StreamWriter, data: bytes):
    size_bytes = len(data).to_bytes(4, byteorder='big')
    stream.writelines([size_bytes, data])
    await stream.drain()
示例#22
0
def send_line_to_writer(writer: asyncio.StreamWriter, line):
    print('->', line)
    writer.write(line.encode('utf-8') + b'\r\n')
async def negotiate_socks5_userpass(
        reader: asyncio.StreamReader,
        writer: asyncio.StreamWriter,
        host: Union[str, ipaddress.IPv4Address, ipaddress.IPv6Address],
        port: int,
        args: Optional[Dict[str, str]],
) -> None:
    if args:
        args_bytes = encode_args(args)
        if len(args_bytes) > 255 * 2:
            raise ValueError('Encoded args too long')
        username = args_bytes[:255]
        password = args_bytes[255:]
        if not password:
            password = b'\0'
        writer.write(b'\x05\x01'  # SOCKS5, 1 auth method
                     + enums.SOCKS5AuthType.USERNAME_PASSWORD)
        buf = await reader.readexactly(2)
        assert buf[0] == 5, 'Invalid server SOCKS version'
        if buf[1:2] != enums.SOCKS5AuthType.USERNAME_PASSWORD:
            raise RuntimeError(
                f'PT rejected userpass auth method, returned {buf[1:2]!r}')
        writer.write(b''.join((
            b'\x01',  # userpass sub-negotiation version 1
            len(username).to_bytes(1, 'big'),
            username,
            len(password).to_bytes(1, 'big'),
            password,
        )))
        buf = await reader.readexactly(2)
        assert buf[0] == 1, 'Invalid server USERPASS sub-negotiation version'
        if buf[1] != 0:
            raise RuntimeError(
                f'PT rejected username/password, returned {buf[1:2]!r}')
    else:
        writer.write(b'\x05\x01'  # SOCKS5, 1 auth method
                     + enums.SOCKS5AuthType.NO_AUTH)
        buf = await reader.readexactly(2)
        assert buf[0] == 5, 'Invalid server SOCKS version'
        if buf[1:2] != enums.SOCKS5AuthType.NO_AUTH:
            raise RuntimeError(
                f'PT rejected noauth auth method, returned {buf[1:2]!r}')

    try:
        host = ipaddress.ip_address(host)
    except ValueError:
        host_type = enums.SOCKS5AddressType.DOMAIN_NAME
        host_bytes = host.encode('idna')
        host_len = len(host_bytes)
        if host_len > 255:
            raise ValueError('Hostname too long')
        host_bytes = host_len.to_bytes(1, 'big') + host_bytes
    else:
        if host.version == 6:
            host_type = enums.SOCKS5AddressType.IPV6_ADDRESS
        else:
            host_type = enums.SOCKS5AddressType.IPV4_ADDRESS
        host_bytes = host.packed
    writer.write(b''.join((
        b'\x05',  # SOCKS5
        enums.SOCKS5Command.CONNECT,
        b'\0',  # reserved
        host_type,
        host_bytes,
        port.to_bytes(2, 'big'),
    )))
    # buf = version, reply, reserved, addr_type, 1st byte of address
    buf = await reader.readexactly(5)
    assert buf[0] == 5, 'Invalid server SOCKS version'
    reply = enums.SOCKS5Reply(buf[1:2])
    if reply is not enums.SOCKS5Reply.SUCCESS:
        raise exceptions.PTSOCKS5ConnectError(reply)
    assert buf[2] == 0, 'Invalid RSV field'
    bind_addr_type = enums.SOCKS5AddressType(buf[3:4])
    if bind_addr_type is enums.SOCKS5AddressType.IPV4_ADDRESS:
        # consume remaining address and port in one call to readexactly()
        await reader.readexactly(-1 + 4 + 2)
    elif bind_addr_type is enums.SOCKS5AddressType.IPV6_ADDRESS:
        await reader.readexactly(-1 + 16 + 2)
    else:
        await reader.readexactly(buf[4] + 2)
示例#24
0
 async def handle_client(self, reader: asyncio.StreamReader,
                         writer: asyncio.StreamWriter):
     self.DATA.append(await reader.readline())
     writer.close()
示例#25
0
    async def _receive_handshake(
        self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
    ) -> None:
        msg = await self.wait(
            reader.read(ENCRYPTED_AUTH_MSG_LEN), timeout=REPLY_TIMEOUT
        )
        ip, socket, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, socket)
        self.logger.debug("Receiving handshake from %s", remote_address)
        got_eip8 = False
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey
            )
        except DecryptionError:
            # Try to decode as EIP8
            got_eip8 = True
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await self.wait(reader.read(remaining_bytes), timeout=REPLY_TIMEOUT)
            try:
                ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                    msg, self.privkey
                )
            except DecryptionError as e:
                self.logger.debug("Failed to decrypt handshake: %s", e)
                return

        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(
            initiator_remote, self.privkey, got_eip8, self.cancel_token
        )

        responder_nonce = os.urandom(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await self.wait(writer.drain())

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext,
        )
        connection = PeerConnection(
            reader=reader,
            writer=writer,
            aes_secret=aes_secret,
            mac_secret=mac_secret,
            egress_mac=egress_mac,
            ingress_mac=ingress_mac,
        )

        # Create and register peer in peer_pool
        peer = self.peer_pool.get_peer_factory().create_peer(
            remote=initiator_remote, connection=connection, inbound=True
        )

        if (
            peer.remote in self.peer_pool.connected_nodes
            or peer.remote.pubkey in self.peer_pool.dialedout_pubkeys
        ):
            self.logger.debug("already connected or dialed, disconnecting...")
            await peer.disconnect(DisconnectReason.already_connected)
            return

        if self.peer_pool.is_full:
            await peer.disconnect(DisconnectReason.too_many_peers)
            return
        elif NO_SAME_IP and not self.peer_pool.is_valid_connection_candidate(
            peer.remote
        ):
            await peer.disconnect(DisconnectReason.useless_peer)
            return

        total_peers = len(self.peer_pool)
        inbound_peer_count = len(
            [peer for peer in self.peer_pool.connected_nodes.values() if peer.inbound]
        )
        if (
            total_peers > 1
            and inbound_peer_count / total_peers > self.allow_dial_in_ratio
        ):
            # make sure to have at least (1-allow_dial_in_ratio) outbound connections out of total connections
            await peer.disconnect(DisconnectReason.too_many_peers)
        else:
            # We use self.wait() here as a workaround for
            # https://github.com/ethereum/py-evm/issues/670.
            await self.wait(self.do_handshake(peer))
示例#26
0
async def write_error(writer: asyncio.StreamWriter, message: str) -> None:
    json_error = json.dumps({'error': message})
    writer.write(json_error.encode())
    await writer.drain()
示例#27
0
def _write_tcp_packet(packet: Packet, writer: aio.StreamWriter) -> None:
    packet = packet.to_bytes()
    writer.write(struct.pack('>H', len(packet)) + packet)
    writer.drain()
示例#28
0
async def handle_connection(reader: asyncio.StreamReader,
                            writer: asyncio.StreamWriter):
    host, port = writer.get_extra_info('peername')
    logger = main_logger.getChild(f"{host}:{port}")
    logger.info("Connection opened from %s", writer.get_extra_info('peername'))
    writer.write("DNEVNIK-RU-BKEND-62-02\n".encode())
    writer.write("Посчитайте средний балл для каждого ученика\n".encode())
    writer.write(
        "Если ученик не набрал оценок за период следует вывести н/а\n".encode(
        ))
    task = Task()
    remaining = 450
    while remaining and not writer.is_closing():
        logger.info(f"{remaining} tasks left")
        writer.write(f"Осталось {remaining} заданий\n".encode())
        try:
            task_s = task.get_task()
            writer.write(task_s.encode())
            line = await reader.readuntil()
            logger.info(f"{len(line)} bytes received")
            try:
                correct = task.check_task(line)
            except Exception as e:
                writer.write("Неверный формат ответа\n".encode())
                logger.info("presentation error")
                continue
            if correct:
                remaining -= 1
                writer.write("Верно\n".encode())
                logger.info("correct")
            else:
                writer.write("Неверно\n".encode())
                logger.info("incorrect")
                break
        except Exception as e:
            writer.write("Непредвиденная ошибка\n".encode())
            logger.error(e)
            break
    if remaining <= 0:
        logger.info("solved")
        flag = FLAG
        writer.write(f"Ваш флаг: {flag}\n".encode())
    writer.write(f"До свидания!\n".encode())
    writer.write_eof()
    await writer.wait_closed()
示例#29
0
 async def close(self, reader: StreamReader, writer: StreamWriter) -> None:
     """ Close a StreamReader and StreamWriter """
     reader.feed_eof()
     writer.close()
     await writer.wait_closed()
示例#30
0
	async def send_protocol(self: object, writer: asyncio.StreamWriter, 
		message: str, address: tuple) -> None:
		""" This coroutine sends protocol to participant. """
		writer.write(message)
		await writer.drain()
		self.logger.info('Sent {} to host: {} at port: {}'.format(message.decode(), *address))
示例#31
0
 async def to_stream(self, writer: asyncio.StreamWriter):
     writer.write(self.to_bytes())
     await writer.drain()
     self.protocol_ts = datetime.now()
示例#32
0
    async def _do_process_communication(
        self,
        chain: Chain,
        challenge: bytes32,
        initial_form: ClassgroupElement,
        ip: str,
        reader: asyncio.StreamReader,
        writer: asyncio.StreamWriter,
    ):
        disc: int = create_discriminant(challenge,
                                        self.constants.DISCRIMINANT_SIZE_BITS)

        try:
            # Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
            # the timelord tells the vdf_client what to execute.
            async with self.lock:
                if self.config["fast_algorithm"]:
                    # Run n-wesolowski (fast) algorithm.
                    writer.write(b"N")
                else:
                    # Run two-wesolowski (slow) algorithm.
                    writer.write(b"T")
                await writer.drain()

            prefix = str(len(str(disc)))
            if len(prefix) == 1:
                prefix = "00" + prefix
            if len(prefix) == 2:
                prefix = "0" + prefix
            async with self.lock:
                writer.write((prefix + str(disc)).encode())
                await writer.drain()

            # Send (a, b) from 'initial_form'.
            for num in [initial_form.a, initial_form.b]:
                prefix_l = len(str(num))
                prefix_len = len(str(prefix_l))
                async with self.lock:
                    writer.write(
                        (str(prefix_len) + str(prefix_l) + str(num)).encode())
                    await writer.drain()
            try:
                ok = await reader.readexactly(2)
            except (asyncio.IncompleteReadError, ConnectionResetError,
                    Exception) as e:
                log.warning(f"{type(e)} {e}")
                async with self.lock:
                    self.vdf_failures.append(chain)
                    self.vdf_failures_count += 1
                return

            if ok.decode() != "OK":
                return

            log.info("Got handshake with VDF client.")
            async with self.lock:
                self.allows_iters.append(chain)
            # Listen to the client until "STOP" is received.
            while True:
                try:
                    data = await reader.readexactly(4)
                except (
                        asyncio.IncompleteReadError,
                        ConnectionResetError,
                        Exception,
                ) as e:
                    log.warning(f"{type(e)} {e}")
                    async with self.lock:
                        self.vdf_failures.append(chain)
                        self.vdf_failures_count += 1
                    break

                msg = ""
                try:
                    msg = data.decode()
                except Exception:
                    pass
                if msg == "STOP":
                    log.info(f"Stopped client running on ip {ip}.")
                    async with self.lock:
                        writer.write(b"ACK")
                        await writer.drain()
                    break
                else:
                    try:
                        # This must be a proof, 4 bytes is length prefix
                        length = int.from_bytes(data, "big")
                        proof = await reader.readexactly(length)
                        stdout_bytes_io: io.BytesIO = io.BytesIO(
                            bytes.fromhex(proof.decode()))
                    except (
                            asyncio.IncompleteReadError,
                            ConnectionResetError,
                            Exception,
                    ) as e:
                        log.warning(f"{type(e)} {e}")
                        async with self.lock:
                            self.vdf_failures.append(chain)
                            self.vdf_failures_count += 1
                        break

                    iterations_needed = uint64(
                        int.from_bytes(stdout_bytes_io.read(8),
                                       "big",
                                       signed=True))

                    y_size_bytes = stdout_bytes_io.read(8)
                    y_size = uint64(
                        int.from_bytes(y_size_bytes, "big", signed=True))

                    y_bytes = stdout_bytes_io.read(y_size)
                    witness_type = uint8(
                        int.from_bytes(stdout_bytes_io.read(1),
                                       "big",
                                       signed=True))
                    proof_bytes: bytes = stdout_bytes_io.read()

                    # Verifies our own proof just in case
                    int_size = (self.constants.DISCRIMINANT_SIZE_BITS +
                                16) >> 4
                    a = int.from_bytes(y_bytes[:int_size], "big", signed=True)
                    b = int.from_bytes(y_bytes[int_size:], "big", signed=True)
                    output = ClassgroupElement(int512(a), int512(b))
                    time_taken = time.time() - self.chain_start_time[chain]
                    ips = int(iterations_needed / time_taken * 10) / 10
                    log.info(
                        f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
                        f" iters, "
                        f"Estimated IPS: {ips}, Chain: {chain}")

                    vdf_info: VDFInfo = VDFInfo(
                        challenge,
                        iterations_needed,
                        output,
                    )
                    vdf_proof: VDFProof = VDFProof(
                        witness_type,
                        proof_bytes,
                    )

                    if not vdf_proof.is_valid(self.constants, initial_form,
                                              vdf_info):
                        log.error("Invalid proof of time!")
                    async with self.lock:
                        self.proofs_finished.append(
                            (chain, vdf_info, vdf_proof))
        except ConnectionResetError as e:
            log.info(f"Connection reset with VDF client {e}")
示例#33
0
async def handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
    """Client handler"""
    try:
        addr = writer.get_extra_info("peername")
        print("Incoming connection from: {}:{}".format(addr[0], addr[1]))

        encryption = Encryptor()
        pub = encryption.create_rsa_pair()

        writer.write(pub)
        await writer.drain()

        aes_key = encryption.rsa_decrypt(await reader.read(1024))
        encryption.set_aes(aes_key)

        writer.write(encryption.aes_encrypt(b"ok"))
        await writer.drain()

        db_conn = sqlite3.connect("db.sqlite")
        db_cursor = db_conn.cursor()

        while True:
            req = await reader.read(4096)
            if not req:
                raise ConnectionResetError
            data = encryption.aes_decrypt(req)
            if data:
                if data[0] == "0".encode()[0]:
                    type, short_name, *key, level, hash = data.split(b":")
                    key = b":".join(key)

                    db_cursor.execute(
                        "INSERT INTO Files "
                        "(FileName, Hash, AccessLvl, Key) "
                        "VALUES (?, ?, ?, ?)",
                        (short_name.decode(), hash.decode(), level.decode(),
                         key))
                    db_conn.commit()
                    writer.write(encryption.aes_encrypt(b"ok"))
                    await writer.drain()
                elif data[0] == "1".encode()[0]:
                    type, key, hash = data.split(b":")
                    qu = """SELECT FileName, Key FROM Files WHERE Hash = ?
     AND AccessLvl <= (SELECT AccessLvl From Users WHERE Keys = ?)"""
                    res = db_cursor.execute(
                        qu, (hash.decode(), key.decode())).fetchone()
                    if res:
                        file_name, file_key = res
                        resp = b":".join((file_name.encode(), file_key))
                        writer.write(encryption.aes_encrypt(resp))
                        await writer.drain()
                    else:
                        writer.write(encryption.aes_encrypt(b"err"))

        writer.close()
    except ConnectionResetError as err:
        addr = writer.get_extra_info("peername")
        print("Connection closed: {}:{}".format(addr[0], addr[1]))
        writer.close()
示例#34
0
 def to_stream(self, writer: asyncio.StreamWriter):
     writer.write(self.to_bytes())
     yield from writer.drain()
     self.protocol_ts = datetime.now()
示例#35
0
 def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
     self.r = reader
     self.w = writer
     self.__socket = writer.get_extra_info("socket")
示例#36
0
 def send_object(obj: Any, writer: asyncio.StreamWriter):
     data = pickle.dumps(obj)
     length_data = struct.pack(ControlServer.LENGTH_FMT, len(data))
     writer.write(length_data)
     writer.write(data)
示例#37
0
    async def _do_process_communication(
        self,
        chain: Chain,
        challenge: bytes32,
        initial_form: ClassgroupElement,
        ip: str,
        reader: asyncio.StreamReader,
        writer: asyncio.StreamWriter,
        # Data specific only when running in bluebox mode.
        bluebox_iteration: Optional[uint64] = None,
        header_hash: Optional[bytes32] = None,
        height: Optional[uint32] = None,
        field_vdf: Optional[uint8] = None,
    ):
        disc: int = create_discriminant(challenge,
                                        self.constants.DISCRIMINANT_SIZE_BITS)

        try:
            # Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
            # the timelord tells the vdf_client what to execute.
            async with self.lock:
                if self.sanitizer_mode:
                    writer.write(b"S")
                else:
                    if self.config["fast_algorithm"]:
                        # Run n-wesolowski (fast) algorithm.
                        writer.write(b"N")
                    else:
                        # Run two-wesolowski (slow) algorithm.
                        writer.write(b"T")
                await writer.drain()

            prefix = str(len(str(disc)))
            if len(prefix) == 1:
                prefix = "00" + prefix
            if len(prefix) == 2:
                prefix = "0" + prefix
            async with self.lock:
                writer.write((prefix + str(disc)).encode())
                await writer.drain()

            # Send initial_form prefixed with its length.
            async with self.lock:
                writer.write(
                    bytes([len(initial_form.data)]) + initial_form.data)
                await writer.drain()
            try:
                ok = await reader.readexactly(2)
            except (asyncio.IncompleteReadError, ConnectionResetError,
                    Exception) as e:
                log.warning(f"{type(e)} {e}")
                async with self.lock:
                    self.vdf_failures.append(chain)
                    self.vdf_failures_count += 1
                return

            if ok.decode() != "OK":
                return

            log.info("Got handshake with VDF client.")
            if not self.sanitizer_mode:
                async with self.lock:
                    self.allows_iters.append(chain)
            else:
                async with self.lock:
                    assert chain is Chain.BLUEBOX
                    assert bluebox_iteration is not None
                    prefix = str(len(str(bluebox_iteration)))
                    if len(str(bluebox_iteration)) < 10:
                        prefix = "0" + prefix
                    iter_str = prefix + str(bluebox_iteration)
                    writer.write(iter_str.encode())
                    await writer.drain()

            # Listen to the client until "STOP" is received.
            while True:
                try:
                    data = await reader.readexactly(4)
                except (
                        asyncio.IncompleteReadError,
                        ConnectionResetError,
                        Exception,
                ) as e:
                    log.warning(f"{type(e)} {e}")
                    async with self.lock:
                        self.vdf_failures.append(chain)
                        self.vdf_failures_count += 1
                    break

                msg = ""
                try:
                    msg = data.decode()
                except Exception:
                    pass
                if msg == "STOP":
                    log.info(f"Stopped client running on ip {ip}.")
                    async with self.lock:
                        writer.write(b"ACK")
                        await writer.drain()
                    break
                else:
                    try:
                        # This must be a proof, 4 bytes is length prefix
                        length = int.from_bytes(data, "big")
                        proof = await reader.readexactly(length)
                        stdout_bytes_io: io.BytesIO = io.BytesIO(
                            bytes.fromhex(proof.decode()))
                    except (
                            asyncio.IncompleteReadError,
                            ConnectionResetError,
                            Exception,
                    ) as e:
                        log.warning(f"{type(e)} {e}")
                        async with self.lock:
                            self.vdf_failures.append(chain)
                            self.vdf_failures_count += 1
                        break

                    iterations_needed = uint64(
                        int.from_bytes(stdout_bytes_io.read(8),
                                       "big",
                                       signed=True))

                    y_size_bytes = stdout_bytes_io.read(8)
                    y_size = uint64(
                        int.from_bytes(y_size_bytes, "big", signed=True))

                    y_bytes = stdout_bytes_io.read(y_size)
                    witness_type = uint8(
                        int.from_bytes(stdout_bytes_io.read(1),
                                       "big",
                                       signed=True))
                    proof_bytes: bytes = stdout_bytes_io.read()

                    # Verifies our own proof just in case
                    form_size = ClassgroupElement.get_size(self.constants)
                    output = ClassgroupElement.from_bytes(y_bytes[:form_size])
                    if not self.sanitizer_mode:
                        time_taken = time.time() - self.chain_start_time[chain]
                        ips = int(iterations_needed / time_taken * 10) / 10
                        log.info(
                            f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
                            f" iters, "
                            f"Estimated IPS: {ips}, Chain: {chain}")

                    vdf_info: VDFInfo = VDFInfo(
                        challenge,
                        iterations_needed,
                        output,
                    )
                    vdf_proof: VDFProof = VDFProof(
                        witness_type,
                        proof_bytes,
                        self.sanitizer_mode,
                    )

                    if not vdf_proof.is_valid(self.constants, initial_form,
                                              vdf_info):
                        log.error("Invalid proof of time!")
                    if not self.sanitizer_mode:
                        async with self.lock:
                            self.proofs_finished.append(
                                (chain, vdf_info, vdf_proof))
                    else:
                        async with self.lock:
                            writer.write(b"010")
                            await writer.drain()
                        assert header_hash is not None
                        assert field_vdf is not None
                        assert height is not None
                        response = timelord_protocol.RespondCompactProofOfTime(
                            vdf_info, vdf_proof, header_hash, height,
                            field_vdf)
                        if self.server is not None:
                            message = make_msg(
                                ProtocolMessageTypes.
                                respond_compact_vdf_timelord, response)
                            await self.server.send_to_all([message],
                                                          NodeType.FULL_NODE)
        except ConnectionResetError as e:
            log.info(f"Connection reset with VDF client {e}")
示例#38
0
async def send_to_stream(msg_q: asyncio.Queue,
                         stream_out: asyncio.StreamWriter):
    msg = await msg_q.get()
    stream_out.write(msg)
    await stream_out.drain()
示例#39
0
async def handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):

    no = 0
    IS_IMG = False
    client = {'reader': reader, 'writer': writer}
    clients.append(client)
    print('Con One')
    StringImg = b''
    Model = ''
    Date = ''
    while True:
        if IS_IMG == True:
            while True:
                try:
                    data: bytes = await reader.read(1024)
                    if not data:
                        break
                    StringImg += data
                    if b'<eof>' in StringImg:
                        IS_IMG = False
                        for i in clients:
                            if i == client:
                                continue
                            i['writer'].write(StringImg)
                            await i['writer'].drain()

                        StringImg = b''
                        break

                        #StringImg = StringImg.replace(b'<eof>',b'')
                        #StringImg = StringImg.replace(b'<sof>',b'')
                        #저장용으로 바꿀 공간=
                except:
                    print('image error')
        else:
            data: bytes = await reader.read(1024)
            if data != None:
                if b'<sof>' in data:
                    StringImg += data
                    IS_IMG = True
                    data = b''
                    continue
                peername = writer.get_extra_info('peername')
                print(f"[S] received: {len(data)} bytes from {peername}")

                cli_mes = data.decode('UTF-8')  #client에서 보내준 data를 decode
                print(cli_mes)
                if cli_mes.startswith('TOC'):  # Rasp에서 C#으로 보내줄 data 서버에 출력
                    print("[Rasp_Client] message: {}".format(cli_mes[3:]))

                elif cli_mes.startswith('TOR'):  # C#에서 Rasp로 보내줄 data 서버에 출력
                    print("[C#_Client] message: {}".format(cli_mes[3:]))

                no += 1
                #해당 index에 값을 넣어주기 위해 추가

                conn = sqlite.connect("DB.db")  # DB파일에 연결
                c = conn.cursor()  # 커서획득
                if cli_mes.startswith('TEMPS'):
                    sendf = cli_mes.encode('UTF-8')
                    writer.write(sendf)
                    await writer.drain()
                if cli_mes.startswith('TORDatesStand'):

                    payload = cli_mes.encode('UTF-8')
                    for i in clients:
                        await asyncio.sleep(0.3)
                        if i == client:
                            continue
                        print(payload)
                        i['writer'].write(payload)
                        await i['writer'].drain()

                        print("[Server] sent: {}".format(cli_mes))

                    insert_cli_mes = cli_mes.replace('TORDatesStand,',
                                                     '').split(',')
                    index_cli_mes = insert_cli_mes[1]
                    Model = insert_cli_mes[0]
                    Date = insert_cli_mes[4]

                    print(insert_cli_mes)
                    for no in range(1,
                                    int(index_cli_mes) +
                                    1):  #로트 크기만큼 Unit_date 값 넣어주기
                        c.execute(
                            '''INSERT INTO Unit_factory(Model, Unit_no, Unit_Date)
                                    VALUES('%s', %s, %s)''' %
                            (Model, no, Date))

                    c.execute(
                        '''INSERT INTO Result(Model, hStandard, vStandard, ResultDate)
                                VALUES('%s', %s, %s, %s)''' %
                        (insert_cli_mes[0], insert_cli_mes[2],
                         insert_cli_mes[3], Date))

                elif cli_mes.startswith('TOCUnit_no'):
                    insert_cli_mes = cli_mes.replace('TOC', '').split(',')

                    for k in insert_cli_mes:
                        k = 'TOC' + k
                        payload = k.encode('UTF-8')
                        for i in clients:
                            i['writer'].write(payload)
                            await i['writer'].drain()
                            time.sleep(0.1)
                    print("[Server] sent: {}".format(cli_mes))
                    print(insert_cli_mes)
                    #DB파일에 저장

                    c.execute('''UPDATE Unit_factory SET  
                                Unit_horizon = %s, Unit_vertical = %s, 
                                Unit_hpass = %s, Unit_vpass = %s,
                                Unit_temp = %s
                                WHERE Unit_no = %s and Unit_date = %s''' %
                              (insert_cli_mes[1][12:], insert_cli_mes[2][13:],
                               insert_cli_mes[3][10:], insert_cli_mes[4][10:],
                               insert_cli_mes[5][4:], insert_cli_mes[0][7:],
                               insert_cli_mes[6][9:]))

                elif cli_mes.startswith('TOCAQL_hpass'):
                    print("[Server] sent1: {}".format(cli_mes))
                    thenTOC = cli_mes.replace('TOC', 'Last')
                    print(thenTOC)
                    ttl = thenTOC.encode('UTF-8')
                    for i in clients:
                        if i == client:
                            continue
                        i['writer'].write(ttl)
                        await i['writer'].drain()

                    insert_cli_mes = cli_mes.replace('TOC', '').split(',')
                    time.sleep(0.3)
                    print("[Server] sent: {}".format(cli_mes))
                    try:
                        c.execute(
                            '''UPDATE Result SET
                                AQL_hpass = %s, AQL_vpass = %s,
                                hSigma = %s, vSigma = %s,
                                hMean = %s, vMean = %s,
                                hCp = %s, vCp = %s,
                                hunpassCount = %s, vunpassCount = %s,
                                hDefectrate = %s, vDefectrate = %s,
                                Hadjust = '%s', Vadjust = '%s',
                                TotalunpassCount = %s, TotalDefectrate = %s,
                                Result_dateGap= '%s', Result_LOT = %s
                                WHERE Resultdate = %s and Model = "%s"''' %
                            (insert_cli_mes[0][9:], insert_cli_mes[1][9:],
                             insert_cli_mes[2][6:], insert_cli_mes[3][6:],
                             insert_cli_mes[4][5:], insert_cli_mes[5][5:],
                             insert_cli_mes[6][3:], insert_cli_mes[7][3:],
                             insert_cli_mes[8][12:], insert_cli_mes[9][12:],
                             insert_cli_mes[10][11:], insert_cli_mes[11][11:],
                             insert_cli_mes[12][7:], insert_cli_mes[13][7:],
                             insert_cli_mes[14][16:], insert_cli_mes[15][15:],
                             insert_cli_mes[17][7:], insert_cli_mes[18][3:],
                             insert_cli_mes[16][4:], insert_cli_mes[19][5:]))

                    except Exception as e:
                        print('update exception' + e)
                    time.sleep(4)

                elif cli_mes.startswith('start'):
                    payload = cli_mes.encode('UTF-8')
                    for i in clients:
                        i['writer'].write(payload)
                        await writer.drain()

                    print("[Server] sent: {}".format(cli_mes))
                conn.commit()  # 트랜젝션의 내용을 DB에 반영함
                conn.close()  # 커서닫기

                await asyncio.sleep(1.5)  #대기

                serv_mes = 'Success'
                for i in clients:
                    i['writer'].write(serv_mes.encode('UTF-8'))
                    await writer.drain()
                #writer.write(serv_mes.encode('UTF-8'))
                #await writer.drain()
                print("[Server] sent: {}".format(serv_mes))
        IS_IMG = False
示例#40
0
async def connection_loop(execute_rpc: Callable[[Any], Any],
                          reader: asyncio.StreamReader,
                          writer: asyncio.StreamWriter, logger: logging.Logger,
                          cancel_token: CancelToken) -> None:
    # TODO: we should look into using an io.StrinIO here for more efficient
    # writing to the end of the string.
    raw_request = ''
    while True:
        request_bytes = b''
        try:
            request_bytes = await cancel_token.cancellable_wait(
                reader.readuntil(b'}'))
        except asyncio.LimitOverrunError as e:
            logger.info(
                "Client request was too long. Erasing buffer and restarting..."
            )
            request_bytes = await cancel_token.cancellable_wait(
                reader.read(e.consumed))
            await cancel_token.cancellable_wait(
                write_error(
                    writer,
                    "reached limit: %d bytes, starting with '%s'" % (
                        e.consumed,
                        request_bytes[:20],
                    ),
                ))
            continue

        raw_request += request_bytes.decode()

        bad_prefix, raw_request = strip_non_json_prefix(raw_request)
        if bad_prefix:
            logger.info("Client started request with non json data: %r",
                        bad_prefix)
            await cancel_token.cancellable_wait(
                write_error(writer, 'Cannot parse json: ' + bad_prefix), )

        try:
            request = json.loads(raw_request)
        except json.JSONDecodeError:
            # invalid json request, keep reading data until a valid json is formed
            logger.debug("Invalid JSON, waiting for rest of message: %r",
                         raw_request)
            continue

        # reset the buffer for the next message
        raw_request = ''

        if not request:
            logger.debug("Client sent empty request")
            await cancel_token.cancellable_wait(
                write_error(writer, 'Invalid Request: empty'), )
            continue

        try:
            result = await execute_rpc(request, from_ipc=True)
        except Exception as e:
            logger.exception("Unrecognized exception while executing RPC")
            await cancel_token.cancellable_wait(
                write_error(writer, "unknown failure: " + str(e)), )
        else:
            writer.write(result.encode())

        await cancel_token.cancellable_wait(writer.drain())
示例#41
0
def fdms_session(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
    online = None
    ''':type: (FdmsHeader, FdmsTransaction)'''
    add_on = None
    ''':type: (FdmsHeader, FdmsTransaction)'''
    offline = list()

    writer.write(bytes((ENQ,)))
    yield from writer.drain()

    while True:

        # Get Request
        attempt = 0
        while True:
            try:
                if attempt > 4:
                    return

                request = yield from asyncio.wait_for(read_fdms_packet(reader), timeout=15.0)
                if len(request) == 0:
                    return

                control_byte = request[0]
                if control_byte == STX:
                    lrs = functools.reduce(lambda x, y: x ^ int(y), request[2:-1], int(request[1]))
                    if lrs != request[-1]:
                        raise ValueError('LRS sum')

                    pos, header = parse_header(request)
                    txn = header.create_txn()
                    txn.parse(request[pos:-2])
                    if header.txn_type == FdmsTransactionType.Online.value:
                        if online is None:
                            online = (header, txn)
                        else:
                            add_on = (header, txn)
                    else:
                        offline.append((header, txn))

                    if header.protocol_type == '2':
                        break

                    # Respond with ACK
                    attempt = 0
                    writer.write(bytes((ACK,)))

                elif control_byte == EOT:
                    break

            # Close session
            except asyncio.TimeoutError:
                return

            # Respond with NAK
            except Exception as e:
                logging.getLogger(LOG_NAME).debug('Request error: %s', str(e))
                attempt += 1
                writer.write(bytes((NAK,)))

            yield from writer.drain()

        if online is None:
            return

        # Process Transactions & Send Response
        for txn in offline:
            rs = process_txn(txn)
        offline.clear()

        if add_on is not None:
            process_add_on_txn(online, add_on)
        add_on = None

        rs = process_txn(online)

        # Send Response
        rs_bytes = rs.response()

        if rs.action_code == FdmsActionCode.HostSpecificPoll or rs.action_code == FdmsActionCode.RevisionInquiry:
            writer.write(rs_bytes)
            yield from writer.drain()
        else:
            attempt = 0
            while True:
                if attempt >= 4:
                    return

                writer.write(rs_bytes)
                yield from writer.drain()

                control_byte = 0
                try:
                    while True:
                        rs_head = yield from asyncio.wait_for(reader.read(1), timeout=4.0)
                        if len(rs_head) == 0:
                            return
                        control_byte = rs_head[0] & 0x7f
                        if control_byte == ACK:
                            break
                        elif control_byte == NAK:
                            break
                # Close session
                except asyncio.TimeoutError as e:
                    return

                if control_byte == ACK:
                    break
                else:
                    attempt += 1

            if online[0].wcc in {'B', 'C'}:
                # Send ENQ
                writer.write(bytes((ENQ,)))
                yield from writer.drain()
                continue
            else:
                break

    writer.write(bytes((EOT,)))
    yield from writer.drain()
    if writer.can_write_eof():
        writer.write_eof()
示例#42
0
 async def on_connect(_: StreamReader, writer: StreamWriter) -> None:
     writer.write(data)
     await writer.drain()
示例#43
0
 async def to_stream(self, writer: asyncio.StreamWriter):
     writer.write(self.to_bytes())
     await writer.drain()
示例#44
0
    async def connection_handler(self, reader: asyncio.StreamReader,
                                 writer: asyncio.StreamWriter):
        # start authentication
        _ip = writer.transport.get_extra_info("peername")
        _id = self.random_string(16)
        self.login_logger.info(f"New connection from {_ip}")

        try:
            # receive the api key saved in the node and compares it to the local one
            proposed_api_key = (await reader.read(1024)).decode()[2:]
        except UnicodeDecodeError:
            self.login_logger.info(
                f"Connection declined for {_ip}. Reason: InvalidEncoding")
            writer.close()
            return

        if proposed_api_key != self.key:
            writer.close()
            self.login_logger.info(
                f"Connection declined for {_ip}. Reason: InvalidApiKey")
            return

        writer.write(
            b"A_ACCEPT"
        )  # this notifies the client, that it can transfer the config
        await writer.drain()

        proposed_configuration = await reader.read(
            4096
        )  # this equals 4096 bytes of configuration, which should be enough
        try:
            proposed_configuration = proposed_configuration.decode("UTF-8")
        except UnicodeDecodeError:
            self.login_logger.info(
                f"Connection declined for {_ip}. Reason: InvalidConfEncoding")
            writer.close()
            return

        try:
            configuration = json.loads(proposed_configuration)
            new_node = Node()
            valid_config = new_node.from_dict(configuration)
            if valid_config:
                self.nodes[_id] = new_node
            else:
                self.login_logger.info(
                    f"Connection declined for {_ip}. Reason: InvalidConf")
                return
        except json.JSONDecodeError:
            self.login_logger.info(
                f"Connection declined for {_ip}. Reason: InvalidConf")
            return

        accepted = False
        while not accepted:
            writer.write(f"BT_{environ.get('BOT_TOKEN', '')}".encode())
            await writer.drain()
            if (await reader.read(1024)).decode() == "BT_ACCEPT":
                accepted = True

        is_ready = False
        while not is_ready and not self.stopped:
            await asyncio.sleep(0.1)
            try:
                response = await reader.read(1024)
            except BrokenPipeError:
                self.login_logger.info(
                    f"Connection lost to {_ip}. Reason: BPE")
                break
            if response.decode() == "D_READY":
                self.nodes[_id].writer = writer
                self.nodes[_id].reader = reader
                is_ready = True

        self.login_logger.info(f"Connection established to {_ip}")

        while not self.stopped and is_ready:
            try:
                response = await reader.read(1024)
            except BrokenPipeError:
                del self.nodes[_id]
                self.login_logger.info(
                    f"Connection lost to {_ip}. Reason: BPE")
                break
            if not response:
                print("response none")
                break
            self._handle_response(response=response.decode())
            await asyncio.sleep(0.1)
        self.login_logger.info(f"Connection lost to {_ip}. Reason: Basic")
        writer.close()
示例#45
0
    async def _receive_handshake(self, reader: asyncio.StreamReader,
                                 writer: asyncio.StreamWriter) -> None:
        msg = await wait_with_token(
            reader.read(ENCRYPTED_AUTH_MSG_LEN),
            token=self.cancel_token,
            timeout=REPLY_TIMEOUT,
        )

        ip, socket, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, socket)
        self.logger.debug("Receiving handshake from %s", remote_address)
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)
        except DecryptionError:
            # Try to decode as EIP8
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await wait_with_token(
                reader.read(remaining_bytes),
                token=self.cancel_token,
                timeout=REPLY_TIMEOUT,
            )
            try:
                ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                    msg, self.privkey)
            except DecryptionError as e:
                self.logger.warn("Failed to decrypt handshake", exc_info=True)
                return

        # Create `HandshakeResponder(remote: kademlia.Node, privkey: datatypes.PrivateKey)` instance
        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(initiator_remote, self.privkey,
                                       self.cancel_token)

        # Call `HandshakeResponder.create_auth_ack_message(nonce: bytes)` to create the reply
        responder_nonce = secrets.token_bytes(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(nonce=responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await writer.drain()

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext)

        # Create and register peer in peer_pool
        peer = self.peer_class(remote=initiator_remote,
                               privkey=self.privkey,
                               reader=reader,
                               writer=writer,
                               aes_secret=aes_secret,
                               mac_secret=mac_secret,
                               egress_mac=egress_mac,
                               ingress_mac=ingress_mac,
                               chaindb=self.chaindb,
                               network_id=self.network_id)

        await self.do_handshake(peer)
async def fix_stream_processor(handler: Handler, shutdown_timeout: float,
                               reader: AsyncIterator[bytes],
                               writer: StreamWriter,
                               cancellation_event: asyncio.Event) -> None:
    """Create a processor for a stream of FIX data.

    Args:
        handler (Handler): The handler.
        shutdown_timeout (float): The time to wait before shutting down.
        reader (AsyncIterator[bytes]): The stream reader.
        writer (StreamWriter): The stream writer.
        cancellation_event (asyncio.Event): An event with which to cancel the processing.

    Raises:
        RuntimeError: If an invalid event was received.
    """
    if cancellation_event.is_set():
        return

    read_queue: "Queue[Event]" = Queue()
    write_queue: "Queue[Event]" = Queue()

    async def receive() -> Event:
        return await read_queue.get()

    async def send(evt: Event) -> None:
        await write_queue.put(evt)

    await read_queue.put({'type': 'connected'})

    state = FixState.OK
    message: bytes = b''

    reader_iter = reader.__aiter__()

    # Create initial tasks.
    handler_task = asyncio.create_task(handler(send, receive))
    read_task: Task[bytes] = asyncio.create_task(reader_iter.__anext__())
    write_task: Task[Event] = asyncio.create_task(write_queue.get())
    cancellation_task = asyncio.create_task(cancellation_event.wait())
    pending: Set[Future] = {
        read_task, write_task, handler_task, cancellation_task
    }
    # Start the task service loop.
    while state == FixState.OK and not cancellation_event.is_set():

        # Wait for a task to be completed.
        completed, pending = await asyncio.wait(
            pending, return_when=asyncio.FIRST_COMPLETED)

        # Handle the completed tasks. The pending tasks are left to become completed.
        for task in completed:

            if task == handler_task:

                state = FixState.HANDLER_COMPLETED
                continue

            elif task == cancellation_task:

                state = FixState.CANCELLED
                continue

            elif task == write_task:

                # Fetch the event sent by the handler.
                event = write_task.result()

                if event['type'] == 'fix':
                    # Send data to the handler and renew the write task.
                    LOGGER.debug('Sending "%s"', event["message"])
                    writer.write(event['message'])
                    await writer.drain()
                    write_task = asyncio.create_task(write_queue.get())
                    pending.add(write_task)
                elif event['type'] == 'close':
                    # Close the connection and exit the task service loop.
                    writer.close()
                    state = FixState.HANDLER_CLOSED
                    continue
                else:
                    LOGGER.debug('Invalid event "%s"', event["type"])
                    raise RuntimeError(f'Invalid event "{event["type"]}"')

            elif task == read_task:

                try:
                    message = cast(bytes, task.result())
                    LOGGER.debug('Received "%s"', message)
                    # Notify the client and reset the state.
                    await read_queue.put({'type': 'fix', 'message': message})
                    message = b''
                    # Read the field.
                    read_task = asyncio.create_task(reader_iter.__anext__())
                    pending.add(read_task)
                except StopAsyncIteration:
                    state = FixState.EOF
                    continue
            else:
                raise AssertionError('Invalid task')

    # Attempt to shutdown gracefully.

    if state == FixState.HANDLER_COMPLETED:
        # When the handler task has finished the session if over.
        # Calling done will re-raise an exception.
        handler_task.done()
        await cancel_await(read_task)
        await cancel_await(write_task)
        writer.close()
    else:
        # Notify the client of the disconnection.
        await read_queue.put({'type': 'disconnect'})

        await cancel_await(write_task)

        if state != FixState.EOF:
            writer.close()
            await cancel_await(read_task)

        if not handler_task.cancelled():
            LOGGER.info('Waiting %s seconds for the handler to complete.',
                        shutdown_timeout)
            try:
                await asyncio.wait_for(handler_task, timeout=shutdown_timeout)
            except asyncio.TimeoutError:
                LOGGER.error('Cancelling the handler')
                await cancel_await(
                    handler_task, lambda: LOGGER.warning(
                        'The handler task did not complete and has been cancelled'
                    ))

    LOGGER.debug('Shutdown complete.')
示例#47
0
async def send_message(message: Union[str, bytes],
                       writer: asyncio.StreamWriter) -> None:
    writer.write((message.encode() if isinstance(message, str) else message) +
                 SEPARATOR_BINARY)
    await writer.drain()
async def submit_message(writer: asyncio.StreamWriter, message: str) -> None:
    writer.write(f"{clean_string(message)}\n\n".encode())
    await writer.drain()
示例#49
0
    async def data_loop(
        self,
        ingress: PacketQueue,
        egress: PacketQueue,
        reader: asyncio.StreamReader,
        writer: asyncio.StreamWriter,
        protocol: TCPProtocolV1,
    ):
        def out_task():
            return asyncio.create_task(egress.get(),
                                       name=f'send-{protocol.remote.id}')

        def in_task():
            return asyncio.create_task(
                reader.readexactly(protocol.num_to_read()),
                name=f'recv-{protocol.remote.id}',
            )

        def retrieve_exception_cb(task: asyncio.Task):
            """ retrieve potential result/exception from orphan tasks """
            try:
                result = task.result()
                self.logger.debug(f'orphan task {task!r} done with {result}')
            except:
                self.logger.debug(f'orphan task {task!r} got exception',
                                  exc_info=True)

        outbound = out_task()
        inbound = in_task()
        closing = False
        while not closing:
            done, pending = await asyncio.wait(
                {outbound, inbound}, return_when=asyncio.FIRST_COMPLETED)
            if outbound in done:
                try:
                    to_send = outbound.result()
                except asyncio.CancelledError:
                    closing = True
                else:
                    if to_send.origin.id == 1 and to_send.received_from and to_send.received_from.id == 4:
                        self.logger.warn(f'send {to_send}')
                    to_send = protocol.encode(to_send)
                    writer.write(to_send)
                    self.logger.debug(f'send {b2a_hex(to_send)!s}')
                    outbound = out_task()
            if inbound in done:
                try:
                    bs = inbound.result()
                except asyncio.IncompleteReadError as ex:
                    self.logger.info('remote closing connection.')
                    bs = ex.partial
                    closing = True
                self.logger.debug(f'received {b2a_hex(bs)!s}')

                while True:
                    pkt, origin_id, verify = protocol.decode(bs)
                    bs = b''
                    if pkt is None and verify:
                        assert (protocol.num_to_read() >
                                0), 'cannot consume buffer'
                        break
                    elif pkt is None and not verify:
                        # 接收到数据,但是数据签名验证异常
                        #
                        self.closing(origin_id)
                        break
                    else:
                        await ingress.put(pkt)
                    # in case multiple packets received in one reading,
                    # parse as many packets as possible
                    if protocol.num_to_read() > 0:
                        break
                if not closing:
                    inbound = in_task()
        for t in [inbound, outbound]:
            if not t.done():
                # add callback to eliminate "exception not retrieve" error
                t.add_done_callback(retrieve_exception_cb)
示例#50
0
async def sendMessage(writer: asyncio.StreamWriter, msg: Dict[str, Any]) -> None:
    ''' Encode and send a message to the server. '''
    data = json.dumps(msg)
    print('\n<{!s} sending {!r}>\n'.format(datetime.datetime.now(), data))
    writer.write(data.encode('utf-8') + b'\n')
    await writer.drain()
示例#51
0
文件: server.py 项目: wanseob/py-evm
    async def _receive_handshake(self, reader: asyncio.StreamReader,
                                 writer: asyncio.StreamWriter) -> None:
        msg = await self.wait(reader.read(ENCRYPTED_AUTH_MSG_LEN),
                              timeout=REPLY_TIMEOUT)

        ip, socket, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, socket)
        self.logger.debug("Receiving handshake from %s", remote_address)
        got_eip8 = False
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)
        except DecryptionError:
            # Try to decode as EIP8
            got_eip8 = True
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await self.wait(reader.read(remaining_bytes),
                                   timeout=REPLY_TIMEOUT)
            try:
                ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                    msg, self.privkey)
            except DecryptionError as e:
                self.logger.debug("Failed to decrypt handshake: %s", e)
                return

        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(initiator_remote, self.privkey,
                                       got_eip8, self.cancel_token)

        responder_nonce = secrets.token_bytes(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await self.wait(writer.drain())

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext)

        # Create and register peer in peer_pool
        peer = self.peer_class(
            remote=initiator_remote,
            privkey=self.privkey,
            reader=reader,
            writer=writer,
            aes_secret=aes_secret,
            mac_secret=mac_secret,
            egress_mac=egress_mac,
            ingress_mac=ingress_mac,
            headerdb=self.headerdb,
            network_id=self.network_id,
            inbound=True,
        )

        if self.peer_pool.is_full:
            peer.disconnect(DisconnectReason.too_many_peers)
        elif not self.peer_pool.is_valid_connection_candidate(peer.remote):
            peer.disconnect(DisconnectReason.useless_peer)

        total_peers = len(self.peer_pool.connected_nodes)
        inbound_peer_count = len([
            peer for peer in self.peer_pool.connected_nodes.values()
            if peer.inbound
        ])
        if total_peers > 1 and inbound_peer_count / total_peers > DIAL_IN_OUT_RATIO:
            # make sure to have at least 1/4 outbound connections
            peer.disconnect(DisconnectReason.too_many_peers)
        else:
            # We use self.wait() here as a workaround for
            # https://github.com/ethereum/py-evm/issues/670.
            await self.wait(self.do_handshake(peer))
示例#52
0
 def to_stream(self, writer: asyncio.StreamWriter):
     writer.write(self.to_bytes())
     yield from writer.drain()
    async def handleConnection(self, redirectorClientReader: asyncio.StreamReader,
                               redirectorClientWriter: asyncio.StreamWriter):
        """ Handle connections from redirector clients """
        line = b''
        sport = pport = shost = phost = None
        serviceAddress = peerAddress = None
        serviceReader = serviceWriter = None
        redirectorClientAddress = None
        tasks = []

        try:
            # fix buffering issues (backpressure effect)
            redirectorClientWriter.transport.set_write_buffer_limits(0)
            # Get client address from socket
            peername = redirectorClientWriter.transport.get_extra_info('peername')
            redirectorClientAddress = peername[0] + ':' + str(peername[1])
            print('Redirector client connected from', redirectorClientAddress, 'to', self.listenAddress)

            try:
                with async_timeout.timeout(CONNECT_TIMEOUT, loop=self.loop):
                    try:
                        # Wait for the 1st packet (line), which is
                        line = await redirectorClientReader.readline()
                        ll = len(line)

                        # Validate message
                        if ll < 11 or ll > 28 or line[-1:] != b"\n" or b":" not in line:
                            raise ValueError()

                        peerData = line[:-1].decode()
                        phost, pport, sport = peerData.split(':')
                        # The address of the peer
                        peerAddress = phost + ':' + pport
                        pport = int(pport)
                        # Parse service port
                        shost, sport = self.servicePorts[int(sport)]
                        # Replace ports
                        if sport in self.replacePorts:
                            sport = self.replacePorts[sport]
                        serviceAddress = shost + ':' + str(sport)

                    except ValueError: pass

                    except KeyError:
                        error("Error: No service is on the specified port: {!r}".format(line))
                        raise GoToEnd()

            except asyncio.TimeoutError:
                error('Error: Client', redirectorClientAddress, 'protocol error, no data in time!')
                raise GoToEnd()

            except BrokenPipeError:
                error('Client has immediately closed connection!')
                raise GoToEnd()

            if sport is None:
                error('Error: Client', redirectorClientAddress, 'protocol error!')
                raise GoToEnd()

            # Here we know the peer address and port and the destination port as well so we can create the iptables
            # rules and connect to
            try:
                # Create a socket and get a free port to communicate with. We need to use our own socket to be able to
                #  get listening port and create iptables rules before connection
                csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                csock.bind((shost, 0))
                scport = csock.getsockname()[1]
                serviceClientAddress = shost + ':' + str(scport)
            except socket.error as e:
                error("Socket error: ", e)
                raise GoToEnd()

            try:
                with async_timeout.timeout(CONNECT_TIMEOUT, loop=self.loop):
                    print(peerAddress, "connecting through", redirectorClientAddress + '…')
                    # Add IPTables rules
                    res = await Server.Iptables.addNatRules(redirectorClientAddress, serviceClientAddress,
                                                            scport, shost, sport, phost, pport)
                    # Exit if not connected or no rules
                    if res != 0: raise GoToEnd()
                    try:
                        # Use our own socket to connect
                        csock.setblocking(False)
                        await self.loop.sock_connect(csock, (shost, sport))
                        serviceReader, serviceWriter = await asyncio.open_connection(sock=csock)
                    except (ConnectionError, BrokenPipeError, OSError, GeneratorExit):
                        error("Error: service connection from", peerAddress, 'to', serviceAddress, 'is failed!')
                        raise GoToEnd()

            except asyncio.TimeoutError:
                error('Error: service', serviceAddress, 'connection error timeout!')
                raise GoToEnd()

            if not serviceReader or not serviceWriter:
                error('Error: service', serviceAddress, 'connection error!')
                raise GoToEnd()

            # Here connection successfull
            print(peerAddress, 'through', redirectorClientAddress, 'connected to', serviceAddress)

            async def relayStream(reader, writer, otherWriter):
                """ Transfer data from reader to writer """
                try:
                    while True:
                        # Stop if no data has received in time
                        with async_timeout.timeout(READ_TIMEOUT, loop=self.loop):
                            try:
                                await writer.drain()
                                data = await reader.read(READ_SIZE)
                                l = len(data)
                                if l == 0:  # EOF
                                    if not otherWriter.transport.is_closing():
                                        if reader == redirectorClientReader:
                                            print('Peer ', peerAddress, 'through', redirectorClientAddress,
                                                  'has closed connection to', serviceAddress)
                                        else:
                                            print('Service', serviceAddress,
                                                  'has closed connection from peer', peerAddress,
                                                  'through', redirectorClientAddress)
                                    break
                                writer.write(data)
                            except (ConnectionError, BrokenPipeError):
                                if reader == redirectorClientReader:
                                    error('Peer', peerAddress, 'through', redirectorClientAddress,
                                          'has disconnected from service', serviceAddress)
                                else:
                                    error('Service', serviceAddress, 'has disconnected from peer', peerAddress,
                                          'through', redirectorClientAddress)
                                break
                except OSError as err:
                    error('Error: OS error:', str(err))

                except asyncio.TimeoutError:
                    if reader == redirectorClientReader:
                        error('Peer', peerAddress, 'through', redirectorClientAddress,
                              'read timeout occured. Closing connection.')
                    else:
                        error('Service', serviceAddress, 'read timeout occured. Closing connection.')

                except asyncio.CancelledError: pass

                # Close connection
                if not writer.transport.is_closing():
                    await writer.drain()
                    writer.close()
                    # To let the socket actually close
                    await asyncio.sleep(0, loop=self.loop)

            # Create relay tasks
            tasks = [
                asyncio.ensure_future(relayStream(redirectorClientReader, serviceWriter, redirectorClientWriter),
                                      loop=self.loop),
                asyncio.ensure_future(relayStream(serviceReader, redirectorClientWriter, serviceWriter),
                                      loop=self.loop)
            ]

            # Stop waiting when any connection endpoint has closed
            done, pending = await asyncio.wait(tasks, loop=self.loop, return_when=asyncio.FIRST_COMPLETED)
            # Cancel remaining task
            for task in pending: task.cancel()
            if pending: await asyncio.wait(pending, loop=self.loop, timeout=1)
            tasks = []

        except asyncio.CancelledError: pass
        except GoToEnd: pass

        finally:
            # If we have pending tasks, close them
            if tasks:
                for task in tasks: task.cancel()
                await asyncio.wait(tasks, loop=self.loop, timeout=1)
            # Close connection from the client if still connected
            if not redirectorClientWriter.transport.is_closing():
                await redirectorClientWriter.drain()
                redirectorClientWriter.close()
            # Close connection to the service if still connected
            if serviceWriter and not serviceWriter.transport.is_closing():
                await serviceWriter.drain()
                serviceWriter.close()
            # Delete iptables rules
            if redirectorClientAddress and Server.Iptables.hasNatRule(redirectorClientAddress):
                await Server.Iptables.deleteNatRules(redirectorClientAddress)
示例#54
0
文件: test.py 项目: skolu/fdms
def accept_fdms_client(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
    asyncio.Task(fdms.fdms_session(reader, writer), loop=loop).add_done_callback(lambda fut: writer.close())
    async def handleConnection(self, peerReader: asyncio.StreamReader, peerWriter: asyncio.StreamWriter):
        """ Accepts peer connections """
        redirectorReader = redirectorWriter = None
        tasks = []
        try:
            # fix buffering issues (backpressure effect)
            peerWriter.transport.set_write_buffer_limits(0)
            # Get socket info
            peername = peerWriter.get_extra_info('peername')
            sockname = peerWriter.get_extra_info('sockname')
            # Calculate peer and liten address info
            peerAddress = peername[0] + ':' + str(peername[1])
            listenPort = str(sockname[1])
            listenAddress = sockname[0] + ':' + listenPort
            print('Peer', peerAddress, 'connected to', listenAddress)

            # Connect to redirector server
            try:
                with async_timeout.timeout(CONNECT_TIMEOUT, loop=self.loop):
                    try:
                        redirectorReader, redirectorWriter = await asyncio.open_connection(self.redirectorServerHost,
                                                                                           self.redirectorServerPort,
                                                                                           ssl=False)
                        # fix buffering issues (backpressure effect)
                        redirectorWriter.transport.set_write_buffer_limits(0)
                        # Send peer info to the redirector server as 1st message
                        redirectorWriter.write(str.encode(peerAddress + ':' + listenPort + '\n'))

                    except (ConnectionError, BrokenPipeError, GeneratorExit, OSError):
                        error('Error: Redirecting connection from ' + peerAddress + ' to ' +
                              listenAddress + ' is failed!')

            except asyncio.TimeoutError:
                error('Error: Redirecting connection from ' + peerAddress + ' to ' + listenAddress +
                      ' was failed because of timeout!')

            # If connection was unsuccessfull
            if not redirectorReader or not redirectorWriter: raise GoToEnd()

            print("Peer", peerAddress, 'redirected to', self.redirectorServerAddress)

            async def relayStream(reader, writer, otherWriter):
                """ Transfer data from reader to writer """
                try:
                    while True:
                        # Stop if no data has received in time
                        with async_timeout.timeout(READ_TIMEOUT, loop=self.loop):
                            try:
                                await writer.drain()
                                data = await reader.read(READ_SIZE)
                                l = len(data)
                                if l == 0:  # EOF
                                    if not otherWriter.transport.is_closing():
                                        if reader == peerReader:
                                            print('Peer', peerAddress, 'has closed connection to',
                                                  self.redirectorServerAddress)
                                        else:
                                            print('Redirector', self.redirectorServerAddress,
                                                  'has closed connection from peer', peerAddress)
                                    break
                                writer.write(data)
                            except (ConnectionError, BrokenPipeError):
                                if reader == peerReader:
                                    error('Peer', peerAddress, 'has disconnected from', self.redirectorServerAddress)
                                else:
                                    error('Redirector', self.redirectorServerAddress,
                                          'has disconnected from', peerAddress)
                                break

                except OSError as e:
                    error('Error: OS error:', str(e))

                except asyncio.TimeoutError:
                    if reader == peerReader:
                        error('Peer', peerAddress, 'read timeout occured. Closing connection.')
                    else:
                        error('Redirector', self.redirectorServerAddress, 'read timeout occured. Closing connection.')

                except asyncio.CancelledError: pass

                # Close connection
                if not writer.transport.is_closing():
                    await writer.drain()
                    writer.close()
                    # To let the socket actually close
                    await asyncio.sleep(0, loop=self.loop)

            # Create relay tasks
            tasks = [
                asyncio.ensure_future(relayStream(peerReader, redirectorWriter, peerWriter), loop=self.loop),
                asyncio.ensure_future(relayStream(redirectorReader, peerWriter, redirectorWriter), loop=self.loop)
            ]

            # Stop waiting when any connection endpoint has closed
            done, pending = await asyncio.wait(tasks, loop=self.loop, return_when=asyncio.FIRST_COMPLETED)
            # Cancel remaining task
            for task in pending: task.cancel()
            if pending: await asyncio.wait(pending, loop=self.loop, timeout=1)
            tasks = []

        except BrokenPipeError:
            error('Error: Peer has immediately closed connection!')

        except asyncio.CancelledError: pass
        except GoToEnd: pass

        finally:
            # If we have pending tasks close them
            if tasks:
                for task in tasks: task.cancel()
                await asyncio.wait(tasks, loop=self.loop, timeout=1)
            # Close peer connection if still opened
            if not peerWriter.transport.is_closing():
                await peerWriter.drain()
                peerWriter.close()
            # Close redirector connection if still opened
            if redirectorWriter and not redirectorWriter.transport.is_closing():
                await redirectorWriter.drain()
                redirectorWriter.close()
示例#56
0
    async def _serv_client(
        self, intake: asyncio.StreamReader, outlet: asyncio.StreamWriter,
    ):
        loop = asyncio.get_running_loop()
        eol = loop.create_future()
        try:
            addr = outlet.get_extra_info("peername", "<some-peer>")

            # prepare the peer object
            ident = str(addr)
            # outletting currently has no rate limit, maybe add in the future?
            # with an unbounded queue, backpressure from remote peer is ignored
            # and outgoing packets can pile up locally
            poq = asyncio.Queue()
            # intaking should create backpressure when handled slowly, so use a
            # bounded queue
            hoq = asyncio.Queue(maxsize=1)

            peer = Peer(ident=ident, eol=eol, posting=poq.put, hosting=hoq.get,)

            # per-connection peer module preparation
            modu = {"peer": peer}
            if self.init is not None:
                # call per-connection peer module initialization method
                maybe_async = self.init(modu)
                if inspect.isawaitable(maybe_async):
                    await maybe_async
            # launch the peer module, it normally forks a concurrent task to
            # run a command landing loop
            runpy.run_module(
                self.service_modu, modu,
            )
            logger.debug(f"Nedh client peer module {self.service_modu} initialized")

            self.clients.publish(peer)

            # pump commands in,
            # this task is the only one reading the socket
            asyncio.create_task(
                receivePacketStream(
                    peer_site=ident, intake=intake, pkt_sink=hoq.put, eos=eol,
                )
            )

            # pump commands out,
            # this task is the only one writing the socket
            while True:
                pkt = await read_stream(eol, poq.get())
                if pkt is EndOfStream:
                    break
                await sendPacket(ident, outlet, pkt)

        except asyncio.CancelledError:
            pass

        except Exception as exc:
            logger.error("Nedh client caused error.", exc_info=True)
            if not eol.done():
                eol.set_exception(exc)

        finally:
            if not eol.done():
                eol.set_result(None)
            # todo post err (if any) to peer
            outlet.close()
            await outlet.wait_closed()
示例#57
0
 async def _handle_request(self, reader: asyncio.StreamReader,
                           writer: asyncio.StreamWriter):
     writer.write(self.prompt)
     while 1:
         writer.write(b'\r\n$amipy> ')
         client = writer.get_extra_info('peername')
         _c = ':'.join(str(i) for i in client)
         try:
             await writer.drain()
             data = await reader.readline()
             msg = data.decode().strip()
             if msg == 'quit':
                 print(
                     f'*[Server] {time.ctime()} Connection closed at {_c}')
                 writer.close()
                 return
             elif msg:
                 resp = self.parse_opt(msg)
                 print(
                     f'*[Server] {time.ctime()} Received "{msg}" from {_c}.'
                 )
                 writer.write(resp.encode('latin-1'))
         except Exception as e:
             print(f'*[Server] {time.ctime()} {e} at {_c}')
             writer.close()
         if not writer.is_closing():
             await writer.drain()
         else:
             writer.close()
             return