async def ahandle_query(self, writer: StreamWriter, wlock: Lock, query: bytes) -> None: """Resolve a DNS query and write the DNS answer to the peer stream.""" if writer.is_closing(): return # Resolve DNS query answer = await self.resolver.aresolve(query) if writer.is_closing(): return # Create the DNS answer packet answer_size = len(answer).to_bytes(2, 'big') answer = b''.join([answer_size, answer]) # Write the DNS answer to the peer stream async with wlock: if writer.is_closing(): return await writer.drain() if writer.is_closing(): return writer.write(answer)
async def _close(writer: asyncio.StreamWriter) -> None: try: if writer is not None: writer.is_closing() writer.close() await writer.wait_closed() except Exception as e: logging.exception(f"Close writer exception: {type(e)}")
async def open_pipe(us_reader: StreamReader, us_writer: StreamWriter, ds_factory=None): logging.info("pipe open") ds_reader, ds_writer = await ds_factory() pipe = DataPipe((us_reader, us_writer), (ds_reader, ds_writer)) try: await pipe.flow(65536, 300) finally: logging.info("pipe closed") us_writer.is_closing() or us_writer.close() ds_writer.is_closing() or ds_writer.close()
async def service_client(self, reader: aio.StreamReader, writer: aio.StreamWriter) -> None: try: while True: # Parse DNS query packet into a request prefix = await reader.readexactly(2) query = await reader.readexactly(struct.unpack('!H', prefix)[0]) request = dns.DNSRecord.parse(query) response = await self._resolver.resolve(request) # Pack DNS response into answer answer = response.pack() writer.write(struct.pack('!H', len(answer)) + answer) await writer.drain() # Connection likely closed or reset by client except aio.IncompleteReadError: pass # Failed to parse DNS query except dns.DNSError: writer.write(dns.DNSRecord(dns.DNSHeader(rcode=getattr(dns.RCODE, 'FORMERR'))).pack()) await writer.drain() # Cleanly close client connection finally: if not writer.is_closing(): writer.close() await writer.wait_closed()
async def dispatch( self, remote: Node, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, protocol: TCPProtocolV1, ): ingress, egress = self.queue_manager.create_queue(remote) try: # 使用data_loop来读写socket # ingress 和egress都是来自QueueManager,在这个类对象里面缓存处理数据,需要发送的数据也是丢进egress中,然后在data_loop中获取发送。 # data_loop里只是进行数据的收、发工作 await self.data_loop(ingress, egress, reader, writer, protocol) except asyncio.CancelledError: self.logger.info( f'{remote.id} data loop exited due to initiative close') except asyncio.IncompleteReadError: self.logger.warn( f'{remote.id} data loop exited due to disconnection') if remote.is_delegate and remote > self.local_node: self.logger.info( f'{remote.id} delegate disconnection, try reconnecting') self.connect_in_background(remote) else: self.logger.info( f'{remote.id} disconnection, expect incoming reconnection') except: # noqa self.logger.warn('data loop exited with exception', exc_info=True) finally: if not writer.is_closing(): writer.close() await writer.wait_closed() self.queue_manager.close(remote)
async def _handle_request(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter): writer.write(self.prompt) while 1: writer.write(b'\r\n$amipy> ') client = writer.get_extra_info('peername') _c = ':'.join(str(i) for i in client) try: await writer.drain() data = await reader.readline() msg = data.decode().strip() if msg == 'quit': print( f'*[Server] {time.ctime()} Connection closed at {_c}') writer.close() return elif msg: resp = self.parse_opt(msg) print( f'*[Server] {time.ctime()} Received "{msg}" from {_c}.' ) writer.write(resp.encode('latin-1')) except Exception as e: print(f'*[Server] {time.ctime()} {e} at {_c}') writer.close() if not writer.is_closing(): await writer.drain() else: writer.close() return
async def rfb_close_writer(writer: asyncio.StreamWriter) -> bool: closing = writer.is_closing() if not closing: writer.transport.abort() # type: ignore writer.close() await writer.wait_closed() return (not closing)
async def close_writer(writer: asyncio.StreamWriter) -> bool: closing = writer.is_closing() if not closing: writer.transport.abort() # type: ignore writer.close() try: await writer.wait_closed() except Exception: pass return (not closing)
def __check_writer(writer: asyncio.StreamWriter) -> bool: if writer is None: return False if hasattr(writer, "is_closing"): return not writer.is_closing() if writer.transport: return not writer.transport.is_closing() return writer.can_write_eof()
def is_closing(writer: asyncio.StreamWriter) -> bool: """ Python 3.6-compatible `asyncio.StreamWriter.is_closing` wrapper. :param writer: The `asyncio.StreamWriter` object. :return: `True` if the writer is closing, or closed. """ if sys.version_info >= (3, 7): return writer.is_closing() # Python 3.6: transport = writer.transport assert isinstance(transport, asyncio.WriteTransport) return transport.is_closing()
async def echo_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: """ Receive message from client. :param reader: client reader :param writer: client writer :return: """ client_address, client_port = writer.transport.get_extra_info( "peername") logger.info( f"new connection `{client_address}:{client_port}` was established with server" ) while not writer.is_closing(): try: # read message logger.debug("waiting for message") message = await reader.read(read_bytes ) # max number of bytes to read logger.debug("message received") # wait for message response await await_if_coroutine(response_handler, message=message, reader=reader, writer=writer) logger.debug("handle message from client") # flush the writer buffer await writer.drain() except NotReadableMessage as error: # do not close connection if this error type if reader.at_eof(): break logger.warning(error) except Exception as error: logger.error(error) raise error writer.close() logger.info("connection closed")
async def handle_connection(reader: asyncio.StreamReader, writer: asyncio.StreamWriter): host, port = writer.get_extra_info('peername') logger = main_logger.getChild(f"{host}:{port}") logger.info("Connection opened from %s", writer.get_extra_info('peername')) writer.write("DNEVNIK-RU-BKEND-62-02\n".encode()) writer.write("Посчитайте средний балл для каждого ученика\n".encode()) writer.write( "Если ученик не набрал оценок за период следует вывести н/а\n".encode( )) task = Task() remaining = 450 while remaining and not writer.is_closing(): logger.info(f"{remaining} tasks left") writer.write(f"Осталось {remaining} заданий\n".encode()) try: task_s = task.get_task() writer.write(task_s.encode()) line = await reader.readuntil() logger.info(f"{len(line)} bytes received") try: correct = task.check_task(line) except Exception as e: writer.write("Неверный формат ответа\n".encode()) logger.info("presentation error") continue if correct: remaining -= 1 writer.write("Верно\n".encode()) logger.info("correct") else: writer.write("Неверно\n".encode()) logger.info("incorrect") break except Exception as e: writer.write("Непредвиденная ошибка\n".encode()) logger.error(e) break if remaining <= 0: logger.info("solved") flag = FLAG writer.write(f"Ваш флаг: {flag}\n".encode()) writer.write(f"До свидания!\n".encode()) writer.write_eof() await writer.wait_closed()
async def server_connection_callback(self, client_reader: StreamReader, client_writer: StreamWriter) -> None: host, port = client_writer.get_extra_info('peername') try: await self.serve_client(client_reader=client_reader, client_writer=client_writer) except ValueError as err: logger.warning(f'{host}:{port} package error: {err}') except ConnectionError as err: logger.warning(f'{host}:{port} connection error: {err}') finally: if not client_writer.is_closing(): await client_writer.drain() client_writer.close() logger.info(f'{host}:{port} close session')
async def on_connect(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter): peername = writer.get_extra_info('peername') print(f"[+] {peername} connected") self.readers.append(reader) self.writers.append(writer) while not writer.is_closing(): try: message = await reader.readuntil(Server.MESSAGE_DELIMITER) print(f"[#] {peername}: {message}") self.loop.create_task(self.broadcast(message)) except async_exc.IncompleteReadError: print(f"(-) {peername} disconnected") break # except (ConnectionResetError, async_exc.IncompleteReadError) as e: except: print_exc() self.readers.remove(reader) self.writers.remove(writer)
async def _handshake_shared_secret(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, endpoint: EndPoint, keys, token): ephemeral_keys = ecies.generate_random() shared_secret = ecies.make_shared_secret(keys.private_bytes, endpoint.pubkey) random = os.urandom(16) random_secret = sha3_256(random).digest() sequence = pad32(int_to_big32(0)) exchange_secret = sha3_256(shared_secret + random_secret + sequence).digest() sig = ephemeral_keys.sign(exchange_secret) encode_sig = encode_signature(sig) # --- temporary version = pad16(int_to_big16(1)) handshake_suffix = pad16(int_to_big16(0)) # --------------------------------------------- # payload = encode_payload(sequence, encode_sig, keys.public_bytes, random_secret, version, handshake_suffix) cipher = ecies.encrypt(ephemeral_keys.public_bytes, payload) if writer.is_closing(): raise ConnectionError( 'during open_connection handling, connection closed') writer.write(cipher) await writer.drain() token.cancellable_wait(reader.read(169), timeout=5) if reader.at_eof(): raise ConnectionError('disconnected')
async def ahandle_peer(self, reader: StreamReader, writer: StreamWriter) -> None: """Read all DNS queries from the peer stream and schedule their resolution via a DnsResolver instance.""" tasks: Union[List[Task], Set[Task]] = [] wlock = aio.Lock() logging.debug(f'Got TCP DNS query stream from {writer.transport.get_extra_info("peername")}') while True: # Parse a DNS query packet off of the wire try: query_size = int.from_bytes(await reader.readexactly(2), 'big') query = await reader.readexactly(query_size) # Check if our peer has finished writing to the stream except aio.IncompleteReadError: break # Schedule the processing of the query tasks.append(aio.create_task(self.ahandle_query(writer, wlock, query))) # Wait for all scheduled query processing to finish while tasks: done, tasks = await aio.wait(tasks, return_when=aio.FIRST_COMPLETED) for task in done: error = task.exception() if error is not None: logging.warning(f'TCP DNS query resolution encountered an error - {error!r}') if not writer.is_closing(): # Indicate we are done writing to the stream if writer.can_write_eof(): writer.write_eof() # Close the stream writer.close() await writer.wait_closed()
async def handle_connection(self, reader: StreamReader, writer: StreamWriter): while True: try: # Wait forever on new data to arrive data = await reader.readuntil(b"\n") # <3> decoded_data = data.decode("utf8").strip() # <4> try: message = BaseSchema().loads(decoded_data) # <5> except MarshmallowError: logger.info("Received unreadable message", peer=writer) break # Extract the address from the message, add it to the writer object writer.address = message["meta"]["address"] # Let's add the peer to our connection pool self.connection_pool.add_peer(writer) # ...and handle the message await self.p2p_protocol.handle_message(message, writer) # <6> await writer.drain() if writer.is_closing(): break except (asyncio.exceptions.IncompleteReadError, ConnectionError): # An error happened, break out of the wait loop break # The connection has closed. Let's clean up... writer.close() await writer.wait_closed() self.connection_pool.remove_peer(writer) # <7>
async def __handle_client( self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, ) -> None: proto = AsyncProtocol(reader, writer) packet_type, worker_id, digest, pid = await proto.receive() async with self.__closing_lock: if self.__closing: proto.close() if packet_type == PacketTypes.BAD_INITIALIZER: packet_type, exc = await proto.receive() if packet_type != PacketTypes.EXCEPTION: await proto.send(PacketTypes.BAD_PACKET) else: set_exception(self.__futures, exc) await self.close() return if packet_type != PacketTypes.AUTH: await proto.send(PacketTypes.BAD_PACKET) if writer.can_write_eof(): writer.write_eof() return if worker_id not in self.worker_ids: log.error("Unknown worker with id %r", worker_id) return expected_digest = hmac.HMAC( self.__cookie, worker_id, digestmod=hashlib.sha256, ).digest() if expected_digest != digest: await proto.send(PacketTypes.AUTH_FAIL) if writer.can_write_eof(): writer.write_eof() log.debug("Bad digest %r expected %r", digest, expected_digest) return await proto.send(PacketTypes.AUTH_OK) self._statistic.processes += 1 self._statistic.spawning += 1 self.pids.add(pid) try: while not reader.at_eof(): func: Callable args: Tuple[Any, ...] kwargs: Dict[str, Any] result_future: asyncio.Future process_future: asyncio.Future ( func, args, kwargs, result_future, process_future, ) = await self.tasks.get() if process_future.done() or result_future.done(): continue try: process_future.set_result(pid) await proto.send((PacketTypes.REQUEST, func, args, kwargs)) packet_type, payload = await proto.receive() if result_future.done(): log.debug( "Result future %r already done, skipping", result_future, ) continue if packet_type == PacketTypes.RESULT: result_future.set_result(payload) elif packet_type in ( PacketTypes.EXCEPTION, PacketTypes.CANCELLED, ): result_future.set_exception(payload) del packet_type, payload except (asyncio.IncompleteReadError, ConnectionError): if not result_future.done(): result_future.set_exception( ProcessError(f"Process {pid!r} unexpected exited"), ) break except Exception as e: if not result_future.done(): result_future.set_exception(e) if not writer.is_closing(): if writer.can_write_eof(): writer.write_eof() writer.close() raise finally: self._statistic.processes -= 1 self.pids.remove(pid)