async def _address_relay(self): while not self.is_closed: try: try: relay_peer, num_peers = await self.relay_queue.get() except asyncio.CancelledError: return None relay_peer_info = PeerInfo(relay_peer.host, relay_peer.port) if not relay_peer_info.is_valid(): continue # https://en.bitcoin.it/wiki/Satoshi_Client_Node_Discovery#Address_Relay connections = self.server.get_full_node_connections() hashes = [] cur_day = int(time.time()) // (24 * 60 * 60) for connection in connections: peer_info = connection.get_peer_info() if peer_info is None: continue cur_hash = int.from_bytes( bytes( std_hash( self.key.to_bytes(32, byteorder="big") + peer_info.get_key() + cur_day.to_bytes(3, byteorder="big"))), byteorder="big", ) hashes.append((cur_hash, connection)) hashes.sort(key=lambda x: x[0]) for index, (_, connection) in enumerate(hashes): if index >= num_peers: break peer_info = connection.get_peer_info() pair = (peer_info.host, peer_info.port) async with self.lock: if pair in self.neighbour_known_peers and relay_peer.host in self.neighbour_known_peers[ pair]: continue if pair not in self.neighbour_known_peers: self.neighbour_known_peers[pair] = set() self.neighbour_known_peers[pair].add(relay_peer.host) if connection.peer_node_id is None: continue msg = make_msg( ProtocolMessageTypes.respond_peers, full_node_protocol.RespondPeers([relay_peer]), ) await connection.send_message(msg) except Exception as e: self.log.error(f"Exception in address relay: {e}") self.log.error(f"Traceback: {traceback.format_exc()}")
async def test_did_recovery_with_empty_set(self, two_wallet_nodes): num_blocks = 5 full_nodes, wallets = two_wallet_nodes full_node_1 = full_nodes[0] server_1 = full_node_1.server wallet_node, server_2 = wallets[0] wallet_node_2, server_3 = wallets[1] wallet = wallet_node.wallet_state_manager.main_wallet ph = await wallet.get_new_puzzlehash() await server_2.start_client( PeerInfo("localhost", uint16(server_1._port)), None) await server_3.start_client( PeerInfo("localhost", uint16(server_1._port)), None) for i in range(1, num_blocks): await full_node_1.farm_new_transaction_block( FarmNewBlockProtocol(ph)) funds = sum([ calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks - 1) ]) await time_out_assert(15, wallet.get_confirmed_balance, funds) async with wallet_node.wallet_state_manager.lock: did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet( wallet_node.wallet_state_manager, wallet, uint64(101)) for i in range(1, num_blocks): await full_node_1.farm_new_transaction_block( FarmNewBlockProtocol(ph)) await time_out_assert(15, did_wallet.get_confirmed_balance, 101) await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101) coins = await did_wallet.select_coins(1) coin = coins.pop() info = Program.to([]) pubkey = (await did_wallet.wallet_state_manager.get_unused_derivation_record( did_wallet.wallet_info.id)).pubkey spend_bundle = await did_wallet.recovery_spend( coin, ph, info, pubkey, SpendBundle([], AugSchemeMPL.aggregate([]))) additions = spend_bundle.additions() assert additions == []
async def test_mempool_update_performance(self, wallet_nodes, default_400_blocks): blocks = default_400_blocks full_nodes, wallets = wallet_nodes wallet_node = wallets[0][0] wallet_server = wallets[0][1] full_node_api_1 = full_nodes[0] full_node_api_2 = full_nodes[1] server_1 = full_node_api_1.full_node.server server_2 = full_node_api_2.full_node.server wallet = wallet_node.wallet_state_manager.main_wallet ph = await wallet.get_new_puzzlehash() for block in blocks: await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block)) await wallet_server.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None) await time_out_assert(60, wallet_height_at_least, True, wallet_node, 399) big_transaction: TransactionRecord = await wallet.generate_signed_transaction(40000000000000, ph, 2213) peer = await connect_and_get_peer(server_1, server_2) await full_node_api_1.respond_transaction( full_node_protocol.RespondTransaction(big_transaction.spend_bundle), peer, test=True ) cons = list(server_1.all_connections.values())[:] for con in cons: await con.close() blocks = bt.get_consecutive_blocks(3, blocks) await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-3])) for block in blocks[-2:]: start_t_2 = time.time() await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block)) assert time.time() - start_t_2 < 1
def service_kwargs_for_harvester( root_path: pathlib.Path, config: Dict, consensus_constants: ConsensusConstants, ) -> Dict: connect_peers = [ PeerInfo(config["farmer_peer"]["host"], config["farmer_peer"]["port"]) ] overrides = config["network_overrides"]["constants"][ config["selected_network"]] updated_constants = consensus_constants.replace_str_to_bytes(**overrides) harvester = Harvester(root_path, config, updated_constants) peer_api = HarvesterAPI(harvester) network_id = config["selected_network"] kwargs = dict( root_path=root_path, node=harvester, peer_api=peer_api, node_type=NodeType.HARVESTER, advertised_port=config["port"], service_name=SERVICE_NAME, server_listen_ports=[config["port"]], connect_peers=connect_peers, auth_connect_peers=True, network_id=network_id, ) if config["start_rpc_server"]: kwargs["rpc_info"] = (HarvesterRpcApi, config["rpc_port"]) return kwargs
def service_kwargs_for_farmer( root_path: pathlib.Path, config: Dict, config_pool: Dict, keychain: Keychain, consensus_constants: ConsensusConstants, ) -> Dict: connect_peers = [] fnp = config.get("full_node_peer") if fnp is not None: connect_peers.append(PeerInfo(fnp["host"], fnp["port"])) overrides = config["network_overrides"]["constants"][config["selected_network"]] updated_constants = consensus_constants.replace_str_to_bytes(**overrides) farmer = Farmer(root_path, config, config_pool, keychain, consensus_constants=updated_constants) peer_api = FarmerAPI(farmer) network_id = config["selected_network"] kwargs = dict( root_path=root_path, node=farmer, peer_api=peer_api, node_type=NodeType.FARMER, advertised_port=config["port"], service_name=SERVICE_NAME, server_listen_ports=[config["port"]], connect_peers=connect_peers, auth_connect_peers=False, on_connect_callback=farmer.on_connect, network_id=network_id, ) if config["start_rpc_server"]: kwargs["rpc_info"] = (FarmerRpcApi, config["rpc_port"]) return kwargs
async def test_get_wallet_for_colour(self, two_wallet_nodes): num_blocks = 3 full_nodes, wallets = two_wallet_nodes full_node_api = full_nodes[0] full_node_server = full_node_api.server wallet_node, server_2 = wallets[0] wallet = wallet_node.wallet_state_manager.main_wallet ph = await wallet.get_new_puzzlehash() await server_2.start_client( PeerInfo("localhost", uint16(full_node_server._port)), None) for i in range(1, num_blocks): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(ph)) funds = sum([ calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks - 1) ]) await time_out_assert(15, wallet.get_confirmed_balance, funds) cc_wallet: CCWallet = await CCWallet.create_new_cc( wallet_node.wallet_state_manager, wallet, uint64(100)) for i in range(1, num_blocks): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(32 * b"0")) colour = cc_wallet.get_colour() assert await wallet_node.wallet_state_manager.get_wallet_for_colour( colour) == cc_wallet
def service_kwargs_for_timelord( root_path: pathlib.Path, config: Dict, constants: ConsensusConstants, ) -> Dict: connect_peers = [PeerInfo(config["full_node_peer"]["host"], config["full_node_peer"]["port"])] overrides = config["network_overrides"]["constants"][config["selected_network"]] updated_constants = constants.replace_str_to_bytes(**overrides) node = Timelord(root_path, config, updated_constants) peer_api = TimelordAPI(node) network_id = config["selected_network"] kwargs = dict( root_path=root_path, peer_api=peer_api, node=node, node_type=NodeType.TIMELORD, advertised_port=config["port"], service_name=SERVICE_NAME, server_listen_ports=[config["port"]], connect_peers=connect_peers, auth_connect_peers=False, network_id=network_id, ) return kwargs
async def test_blocks_load(self, two_nodes): num_blocks = 50 full_node_1, full_node_2, server_1, server_2 = two_nodes blocks = bt.get_consecutive_blocks(num_blocks) peer = await connect_and_get_peer(server_1, server_2) await full_node_1.full_node.respond_block( full_node_protocol.RespondBlock(blocks[0]), peer) await server_2.start_client( PeerInfo(self_hostname, uint16(server_1._port)), None) async def num_connections(): return len(server_2.get_connections()) await time_out_assert(10, num_connections, 1) start_unf = time.time() for i in range(1, num_blocks): await full_node_1.full_node.respond_block( full_node_protocol.RespondBlock(blocks[i])) await full_node_2.full_node.respond_block( full_node_protocol.RespondBlock(blocks[i])) print( f"Time taken to process {num_blocks} is {time.time() - start_unf}") assert time.time() - start_unf < 100
def get_peer_info(self) -> Optional[PeerInfo]: result = self.ws._writer.transport.get_extra_info("peername") if result is None: return None connection_host = result[0] port = self.peer_server_port if self.peer_server_port is not None else self.peer_port return PeerInfo(connection_host, port)
async def test_wallet_coinbase_reorg(self, wallet_node): num_blocks = 5 full_nodes, wallets = wallet_node full_node_api = full_nodes[0] fn_server = full_node_api.full_node.server wallet_node, server_2 = wallets[0] wallet = wallet_node.wallet_state_manager.main_wallet ph = await wallet.get_new_puzzlehash() await server_2.start_client( PeerInfo(self_hostname, uint16(fn_server._port)), None) for i in range(0, num_blocks): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(ph)) funds = sum([ calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks) ]) await time_out_assert(5, wallet.get_confirmed_balance, funds) await full_node_api.reorg_from_index_to_new_index( ReorgProtocol(uint32(3), uint32(num_blocks + 6), 32 * b"0")) funds = sum([ calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks - 2) ]) await time_out_assert(5, wallet.get_confirmed_balance, funds)
async def disconnect_all_and_reconnect(server: ChiaServer, reconnect_to: ChiaServer) -> bool: cons = list(server.all_connections.values())[:] for con in cons: await con.close() return await server.start_client( PeerInfo(self_hostname, uint16(reconnect_to._port)), None)
def get_host_addr(host: Union[PeerInfo, str], prefer_ipv6: Optional[bool]) -> str: # If there was no preference passed in (from config), set the system-wise # default here. Not a great place to locate a default value, and we should # probabaly do something to write it into the config, but. For now... if prefer_ipv6 is None: prefer_ipv6 = False # Use PeerInfo.is_valid() to see if it's already an address if isinstance(host, PeerInfo): hoststr = host.host if host.is_valid(True): return hoststr else: hoststr = host if PeerInfo(hoststr, uint16(0)).is_valid(True): return hoststr addrset: List[Tuple["socket.AddressFamily", "socket.SocketKind", int, str, Union[Tuple[str, int], Tuple[str, int, int, int]]]] = socket.getaddrinfo( hoststr, None) # Addrset is never empty, an exception is thrown or data is returned. for t in addrset: if prefer_ipv6 and t[0] == socket.AF_INET6: return t[4][0] if not prefer_ipv6 and t[0] == socket.AF_INET: return t[4][0] # If neither matched preference, just return the first available return addrset[0][4][0]
async def test_colour_creation(self, two_wallet_nodes): num_blocks = 3 full_nodes, wallets = two_wallet_nodes full_node_api = full_nodes[0] full_node_server = full_node_api.server wallet_node, server_2 = wallets[0] wallet = wallet_node.wallet_state_manager.main_wallet ph = await wallet.get_new_puzzlehash() await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None) for i in range(1, num_blocks): await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph)) funds = sum( [ calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks - 1) ] ) await time_out_assert(15, wallet.get_confirmed_balance, funds) cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100)) tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent() tx_record = tx_queue[0] await time_out_assert( 15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name() ) for i in range(1, num_blocks): await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0")) await time_out_assert(15, cc_wallet.get_confirmed_balance, 100) await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
async def test_block_ses_mismatch(self, two_nodes, default_1000_blocks): full_node_1, full_node_2, server_1, server_2 = two_nodes blocks = default_1000_blocks for block in blocks[:501]: await full_node_1.full_node.respond_block( full_node_protocol.RespondBlock(block)) peak1 = full_node_1.full_node.blockchain.get_peak() full_node_2.full_node.sync_store.set_long_sync(True) await server_2.start_client( PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect) wp = await full_node_1.full_node.weight_proof_handler.get_proof_of_weight( peak1.header_hash) summaries1, _ = _validate_sub_epoch_summaries( full_node_1.full_node.weight_proof_handler.constants, wp) summaries2 = summaries1 s = summaries1[1] # change summary so check would fail on 2 sub epoch summaries2[1] = SubEpochSummary( s.prev_subepoch_summary_hash, s.reward_chain_hash, s.num_blocks_overflow, s.new_difficulty * 2, s.new_sub_slot_iters * 2, ) await full_node_2.full_node.sync_from_fork_point( 0, 500, peak1.header_hash, summaries2) log.info( f"full node height {full_node_2.full_node.blockchain.get_peak().height}" ) assert node_height_between(full_node_2, 320, 400)
async def test_basic_sync_wallet(self, wallet_node, default_400_blocks): full_node_api, wallet_node, full_node_server, wallet_server = wallet_node for block in default_400_blocks: await full_node_api.full_node.respond_block( full_node_protocol.RespondBlock(block)) await wallet_server.start_client( PeerInfo(self_hostname, uint16(full_node_server._port)), None) # The second node should eventually catch up to the first one, and have the # same tip at height num_blocks - 1. await time_out_assert(100, wallet_height_at_least, True, wallet_node, len(default_400_blocks) - 1) # Tests a reorg with the wallet num_blocks = 30 blocks_reorg = bt.get_consecutive_blocks( num_blocks, block_list_input=default_400_blocks[:-5]) for i in range(1, len(blocks_reorg)): await full_node_api.full_node.respond_block( full_node_protocol.RespondBlock(blocks_reorg[i])) await disconnect_all_and_reconnect(wallet_server, full_node_server) await time_out_assert(100, wallet_height_at_least, True, wallet_node, len(default_400_blocks) + num_blocks - 5 - 1)
def __init__( self, server: ChiaServer, root_path: Path, target_outbound_count: int, peer_db_path: str, introducer_info: Optional[Dict], peer_connect_interval: int, log, ): self.server: ChiaServer = server self.message_queue: asyncio.Queue = asyncio.Queue() self.is_closed = False self.target_outbound_count = target_outbound_count self.peer_db_path = path_from_root(root_path, peer_db_path) if introducer_info is not None: self.introducer_info: Optional[PeerInfo] = PeerInfo( introducer_info["host"], introducer_info["port"], ) else: self.introducer_info = None self.peer_connect_interval = peer_connect_interval self.log = log self.relay_queue = None self.address_manager: Optional[AddressManager] = None self.connection_time_pretest: Dict = {} self.received_count_from_peers: Dict = {} self.lock = asyncio.Lock() self.connect_peers_task: Optional[asyncio.Task] = None self.serialize_task: Optional[asyncio.Task] = None self.cleanup_task: Optional[asyncio.Task] = None self.initial_wait: int = 0
def from_string(cls, peer_str: str): blobs = peer_str.split(" ") assert len(blobs) == 5 peer_info = TimestampedPeerInfo(blobs[0], uint16(int(blobs[1])), uint64(int(blobs[2]))) src_peer = PeerInfo(blobs[3], uint16(int(blobs[4]))) return cls(peer_info, src_peer)
async def wallets_prefarm(two_wallet_nodes, trusted): """ Sets up the node with 10 blocks, and returns a payer and payee wallet. """ farm_blocks = 10 buffer = 4 full_nodes, wallets = two_wallet_nodes full_node_api = full_nodes[0] full_node_server = full_node_api.server wallet_node_0, wallet_server_0 = wallets[0] wallet_node_1, wallet_server_1 = wallets[1] wallet_0 = wallet_node_0.wallet_state_manager.main_wallet wallet_1 = wallet_node_1.wallet_state_manager.main_wallet ph0 = await wallet_0.get_new_puzzlehash() ph1 = await wallet_1.get_new_puzzlehash() if trusted: wallet_node_0.config["trusted_peers"] = { full_node_server.node_id.hex(): full_node_server.node_id.hex() } wallet_node_1.config["trusted_peers"] = { full_node_server.node_id.hex(): full_node_server.node_id.hex() } else: wallet_node_0.config["trusted_peers"] = {} wallet_node_1.config["trusted_peers"] = {} await wallet_server_0.start_client( PeerInfo("localhost", uint16(full_node_server._port)), None) await wallet_server_1.start_client( PeerInfo("localhost", uint16(full_node_server._port)), None) for i in range(0, farm_blocks): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(ph0)) for i in range(0, farm_blocks): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(ph1)) for i in range(0, buffer): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(token_bytes())) return wallet_node_0, wallet_node_1, full_node_api
def get_peer_logging(self) -> PeerInfo: info: Optional[PeerInfo] = self.get_peer_info() if info is None: # in this case, we will use self.peer_host which is friendlier for logging port = self.peer_server_port if self.peer_server_port is not None else self.peer_port return PeerInfo(self.peer_host, port) else: return info
async def test_cat_creation(self, two_wallet_nodes, trusted): num_blocks = 3 full_nodes, wallets = two_wallet_nodes full_node_api = full_nodes[0] full_node_server = full_node_api.server wallet_node, server_2 = wallets[0] wallet = wallet_node.wallet_state_manager.main_wallet ph = await wallet.get_new_puzzlehash() if trusted: wallet_node.config["trusted_peers"] = { full_node_server.node_id.hex(): full_node_server.node_id.hex() } else: wallet_node.config["trusted_peers"] = {} await server_2.start_client( PeerInfo("localhost", uint16(full_node_server._port)), None) for i in range(0, num_blocks): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(ph)) await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(32 * b"0")) funds = sum([ calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1) ]) await time_out_assert(15, wallet.get_confirmed_balance, funds) async with wallet_node.wallet_state_manager.lock: cat_wallet: CATWallet = await CATWallet.create_new_cat_wallet( wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100)) # The next 2 lines are basically a noop, it just adds test coverage cat_wallet = await CATWallet.create( wallet_node.wallet_state_manager, wallet, cat_wallet.wallet_info) await wallet_node.wallet_state_manager.add_new_wallet( cat_wallet, cat_wallet.id()) tx_queue: List[ TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent( ) tx_record = tx_queue[0] await time_out_assert(15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()) for i in range(1, num_blocks): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(32 * b"0")) await time_out_assert(15, cat_wallet.get_confirmed_balance, 100) await time_out_assert(15, cat_wallet.get_spendable_balance, 100) await time_out_assert(15, cat_wallet.get_unconfirmed_balance, 100)
async def test_serialization(self, tmp_path: Path): addrman = AddressManagerTest() now = int(math.floor(time.time())) t_peer1 = TimestampedPeerInfo("250.7.1.1", uint16(8333), uint64(now - 10000)) t_peer2 = TimestampedPeerInfo("250.7.2.2", uint16(9999), uint64(now - 20000)) t_peer3 = TimestampedPeerInfo("250.7.3.3", uint16(9999), uint64(now - 30000)) source = PeerInfo("252.5.1.1", uint16(8333)) await addrman.add_to_new_table([t_peer1, t_peer2, t_peer3], source) await addrman.mark_good(PeerInfo("250.7.1.1", uint16(8333))) peers_dat_filename = tmp_path / "peers.dat" if peers_dat_filename.exists(): peers_dat_filename.unlink() # Write out the serialized peer data await AddressManagerStore.serialize(addrman, peers_dat_filename) # Read in the serialized peer data addrman2 = await AddressManagerStore.create_address_manager( peers_dat_filename) retrieved_peers = [] for _ in range(50): peer = await addrman2.select_peer() if peer not in retrieved_peers: retrieved_peers.append(peer) if len(retrieved_peers) == 3: break assert len(retrieved_peers) == 3 wanted_peers = [ ExtendedPeerInfo(t_peer1, source), ExtendedPeerInfo(t_peer2, source), ExtendedPeerInfo(t_peer3, source), ] recovered = 0 for target_peer in wanted_peers: for current_peer in retrieved_peers: if (current_peer is not None and current_peer.peer_info == target_peer.peer_info and current_peer.src == target_peer.src and current_peer.timestamp == target_peer.timestamp): recovered += 1 assert recovered == 3 peers_dat_filename.unlink()
async def test_addrman_get_peers(self): addrman = AddressManagerTest() assert await addrman.size() == 0 peers1 = await addrman.get_peers() assert len(peers1) == 0 peer1 = TimestampedPeerInfo("250.250.2.1", 8444, time.time()) peer2 = TimestampedPeerInfo("250.250.2.2", 9999, time.time()) peer3 = TimestampedPeerInfo("251.252.2.3", 8444, time.time()) peer4 = TimestampedPeerInfo("251.252.2.4", 8444, time.time()) peer5 = TimestampedPeerInfo("251.252.2.5", 8444, time.time()) source1 = PeerInfo("250.1.2.1", 8444) source2 = PeerInfo("250.2.3.3", 8444) # Test: Ensure GetPeers works with new addresses. assert await addrman.add_to_new_table([peer1], source1) assert await addrman.add_to_new_table([peer2], source2) assert await addrman.add_to_new_table([peer3], source1) assert await addrman.add_to_new_table([peer4], source1) assert await addrman.add_to_new_table([peer5], source1) # GetPeers returns 23% of addresses, 23% of 5 is 2 rounded up. peers2 = await addrman.get_peers() assert len(peers2) == 2 # Test: Ensure GetPeers works with new and tried addresses. await addrman.mark_good(PeerInfo(peer1.host, peer1.port)) await addrman.mark_good(PeerInfo(peer2.host, peer2.port)) peers3 = await addrman.get_peers() assert len(peers3) == 2 # Test: Ensure GetPeers still returns 23% when addrman has many addrs. for i in range(1, 8 * 256): octet1 = i % 256 octet2 = i >> 8 % 256 peer = TimestampedPeerInfo( str(octet1) + "." + str(octet2) + ".1.23", 8444, time.time()) await addrman.add_to_new_table([peer]) if i % 8 == 0: await addrman.mark_good(PeerInfo(peer.host, peer.port)) peers4 = await addrman.get_peers() percent = await addrman.size() percent = math.ceil(percent * 23 / 100) assert len(peers4) == percent
async def test_addrman_collisions_new(self): addrman = AddressManagerTest() assert await addrman.size() == 0 source = PeerInfo("252.2.2.2", 8444) for i in range(1, 8): peer = PeerInfo("250.1.1." + str(i), 8444) assert await addrman.add_peer_info([peer], source) assert await addrman.size() == i # Test: new table collision! peer1 = PeerInfo("250.1.1.8", 8444) assert await addrman.add_peer_info([peer1], source) assert await addrman.size() == 7 peer2 = PeerInfo("250.1.1.9", 8444) assert await addrman.add_peer_info([peer2], source) assert await addrman.size() == 8
async def get_peer_info(self) -> Optional[PeerInfo]: ip = None port = self._port try: async with ClientSession() as session: async with session.get("https://checkip.amazonaws.com/") as resp: if resp.status == 200: ip = str(await resp.text()) ip = ip.rstrip() except Exception: ip = None if ip is None: return None peer = PeerInfo(ip, uint16(port)) if not peer.is_valid(): return None return peer
async def test_wallet_reorg_sync(self, wallet_node_simulator, default_400_blocks, trusted): num_blocks = 5 full_nodes, wallets = wallet_node_simulator full_node_api = full_nodes[0] wallet_node, server_2 = wallets[0] fn_server = full_node_api.full_node.server wsm: WalletStateManager = wallet_node.wallet_state_manager wallet = wsm.main_wallet ph = await wallet.get_new_puzzlehash() if trusted: wallet_node.config["trusted_peers"] = { fn_server.node_id.hex(): fn_server.node_id.hex() } else: wallet_node.config["trusted_peers"] = {} await server_2.start_client( PeerInfo(self_hostname, uint16(fn_server._port)), None) # Insert 400 blocks for block in default_400_blocks: await full_node_api.full_node.respond_block( full_node_protocol.RespondBlock(block)) # Farm few more with reward for i in range(0, num_blocks): await full_node_api.farm_new_transaction_block( FarmNewBlockProtocol(ph)) # Confirm we have the funds funds = sum([ calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks) ]) await time_out_assert(5, wallet.get_confirmed_balance, funds) async def get_tx_count(wallet_id): txs = await wsm.get_all_transactions(wallet_id) return len(txs) await time_out_assert(5, get_tx_count, 2 * (num_blocks - 1), 1) # Reorg blocks that carry reward num_blocks = 30 blocks_reorg = bt.get_consecutive_blocks( num_blocks, block_list_input=default_400_blocks[:-5]) for block in blocks_reorg[-30:]: await full_node_api.full_node.respond_block( full_node_protocol.RespondBlock(block)) await time_out_assert(5, get_tx_count, 0, 1) await time_out_assert(5, wallet.get_confirmed_balance, 0)
async def test_public_connections(self, wallet_node): full_nodes, wallets = wallet_node full_node_api = full_nodes[0] server_1: ChiaServer = full_node_api.full_node.server wallet_node, server_2 = wallets[0] success = await server_2.start_client( PeerInfo(self_hostname, uint16(server_1._port)), None) assert success is True
def __init__( self, server: ChiaServer, root_path: Path, target_outbound_count: int, peer_db_path: str, introducer_info: Optional[Dict], dns_servers: List[str], peer_connect_interval: int, selected_network: str, default_port: Optional[int], log, ): self.server: ChiaServer = server self.message_queue: asyncio.Queue = asyncio.Queue() self.is_closed = False self.target_outbound_count = target_outbound_count # This is a double check to make sure testnet and mainnet peer databases never mix up. # If the network is not 'mainnet', it names the peer db differently, including the selected_network. if selected_network != "mainnet": if not peer_db_path.endswith(".sqlite"): raise ValueError( f"Invalid path for peer table db: {peer_db_path}. Make the path end with .sqlite" ) peer_db_path = peer_db_path[:-7] + "_" + selected_network + ".sqlite" self.peer_db_path = path_from_root(root_path, peer_db_path) self.dns_servers = dns_servers if introducer_info is not None: self.introducer_info: Optional[PeerInfo] = PeerInfo( introducer_info["host"], introducer_info["port"], ) else: self.introducer_info = None self.peer_connect_interval = peer_connect_interval self.log = log self.relay_queue = None self.address_manager: Optional[AddressManager] = None self.connection_time_pretest: Dict = {} self.received_count_from_peers: Dict = {} self.lock = asyncio.Lock() self.connect_peers_task: Optional[asyncio.Task] = None self.serialize_task: Optional[asyncio.Task] = None self.cleanup_task: Optional[asyncio.Task] = None self.initial_wait: int = 0 try: self.resolver: Optional[ dns.asyncresolver.Resolver] = dns.asyncresolver.Resolver() except Exception: self.resolver = None self.log.exception("Error initializing asyncresolver") self.pending_outbound_connections: Set[str] = set() self.pending_tasks: Set[asyncio.Task] = set() self.default_port: Optional[int] = default_port if default_port is None and selected_network in NETWORK_ID_DEFAULT_PORTS: self.default_port = NETWORK_ID_DEFAULT_PORTS[selected_network]
def mark_good_(self, addr: PeerInfo, test_before_evict: bool, timestamp: int) -> None: self.last_good = timestamp (info, node_id) = self.find_(addr) if not addr.is_valid(self.allow_private_subnets): return if info is None: return if node_id is None: return if not (info.peer_info.host == addr.host and info.peer_info.port == addr.port): return # update info info.last_success = timestamp info.last_try = timestamp info.num_attempts = 0 # timestamp is not updated here, to avoid leaking information about # currently-connected peers. # if it is already in the tried set, don't do anything else if info.is_tried: return # find a bucket it is in now bucket_rand = randrange(NEW_BUCKET_COUNT) new_bucket = -1 for n in range(NEW_BUCKET_COUNT): cur_new_bucket = (n + bucket_rand) % NEW_BUCKET_COUNT cur_new_bucket_pos = info.get_bucket_position( self.key, True, cur_new_bucket) if self.new_matrix[cur_new_bucket][cur_new_bucket_pos] == node_id: new_bucket = cur_new_bucket break # if no bucket is found, something bad happened; if new_bucket == -1: return # NOTE(Florin): Double check this. It's not used anywhere else. # which tried bucket to move the entry to tried_bucket = info.get_tried_bucket(self.key) tried_bucket_pos = info.get_bucket_position(self.key, False, tried_bucket) # Will moving this address into tried evict another entry? if test_before_evict and self.tried_matrix[tried_bucket][ tried_bucket_pos] != -1: if len(self.tried_collisions) < TRIED_COLLISION_SIZE: if node_id not in self.tried_collisions: self.tried_collisions.append(node_id) else: self.make_tried_(info, node_id)
async def test_serialization(self): addrman = AddressManagerTest() now = int(math.floor(time.time())) t_peer1 = TimestampedPeerInfo("250.7.1.1", 8333, now - 10000) t_peer2 = TimestampedPeerInfo("250.7.2.2", 9999, now - 20000) t_peer3 = TimestampedPeerInfo("250.7.3.3", 9999, now - 30000) source = PeerInfo("252.5.1.1", 8333) await addrman.add_to_new_table([t_peer1, t_peer2, t_peer3], source) await addrman.mark_good(PeerInfo("250.7.1.1", 8333)) db_filename = Path("peer_table.db") if db_filename.exists(): db_filename.unlink() connection = await aiosqlite.connect(db_filename) address_manager_store = await AddressManagerStore.create(connection) await address_manager_store.serialize(addrman) addrman2 = await address_manager_store.deserialize() retrieved_peers = [] for _ in range(50): peer = await addrman2.select_peer() if peer not in retrieved_peers: retrieved_peers.append(peer) if len(retrieved_peers) == 3: break assert len(retrieved_peers) == 3 wanted_peers = [ ExtendedPeerInfo(t_peer1, source), ExtendedPeerInfo(t_peer2, source), ExtendedPeerInfo(t_peer3, source), ] recovered = 0 for target_peer in wanted_peers: for current_peer in retrieved_peers: if ( current_peer.peer_info == target_peer.peer_info and current_peer.src == target_peer.src and current_peer.timestamp == target_peer.timestamp ): recovered += 1 assert recovered == 3 await connection.close() db_filename.unlink()
async def test_addrman_create(self): addrman = AddressManagerTest() assert await addrman.size() == 0 peer1 = PeerInfo("250.1.2.1", 8444) t_peer = TimestampedPeerInfo("250.1.2.1", 8444, 0) info, node_id = addrman.create_(t_peer, peer1) assert info.peer_info == peer1 info, _ = addrman.find_(peer1) assert info.peer_info == peer1