async def test_serialization(self): addrman = AddressManagerTest() now = int(math.floor(time.time())) t_peer1 = TimestampedPeerInfo("250.7.1.1", 8333, now - 10000) t_peer2 = TimestampedPeerInfo("250.7.2.2", 9999, now - 20000) t_peer3 = TimestampedPeerInfo("250.7.3.3", 9999, now - 30000) source = PeerInfo("252.5.1.1", 8333) await addrman.add_to_new_table([t_peer1, t_peer2, t_peer3], source) await addrman.mark_good(PeerInfo("250.7.1.1", 8333)) db_filename = Path("peer_table.db") if db_filename.exists(): db_filename.unlink() connection = await aiosqlite.connect(db_filename) address_manager_store = await AddressManagerStore.create(connection) await address_manager_store.serialize(addrman) addrman2 = await address_manager_store.deserialize() retrieved_peers = [] for _ in range(50): peer = await addrman2.select_peer() if peer not in retrieved_peers: retrieved_peers.append(peer) if len(retrieved_peers) == 3: break assert len(retrieved_peers) == 3 wanted_peers = [ ExtendedPeerInfo(t_peer1, source), ExtendedPeerInfo(t_peer2, source), ExtendedPeerInfo(t_peer3, source), ] recovered = 0 for target_peer in wanted_peers: for current_peer in retrieved_peers: if ( current_peer.peer_info == target_peer.peer_info and current_peer.src == target_peer.src and current_peer.timestamp == target_peer.timestamp ): recovered += 1 assert recovered == 3 await connection.close() db_filename.unlink()
async def get_nodes(self) -> List[Tuple[int, ExtendedPeerInfo]]: cursor = await self.db.execute("SELECT node_id, value from peer_nodes") nodes_id = await cursor.fetchall() await cursor.close() return [(node_id, ExtendedPeerInfo.from_string(info_str)) for node_id, info_str in nodes_id]
async def test_addrman_new_bucket(self): t_peer1 = TimestampedPeerInfo("250.1.2.1", 8444, 0) source1 = PeerInfo("250.1.2.1", 8444) t_peer2 = TimestampedPeerInfo("250.1.2.1", 9999, 0) peer_info1 = ExtendedPeerInfo(t_peer1, source1) # Test: Make sure key actually randomizes bucket placement. A fail on # this test could be a security issue. key1 = 2**256 - 1 key2 = 2**128 - 1 bucket1 = peer_info1.get_new_bucket(key1) bucket2 = peer_info1.get_new_bucket(key2) assert bucket1 != bucket2 # Test: Ports should not affect bucket placement in the addr peer_info2 = ExtendedPeerInfo(t_peer2, source1) assert peer_info1.get_new_bucket(key1) == peer_info2.get_new_bucket( key1) # Test: IP addresses in the same group (\16 prefix for IPv4) should # always map to the same bucket. buckets = [] for i in range(255): peer = PeerInfo("250.1.1." + str(i), 8444) t_peer = TimestampedPeerInfo("250.1.1." + str(i), 8444, 0) extended_peer_info = ExtendedPeerInfo(t_peer, peer) bucket = extended_peer_info.get_new_bucket(key1) if bucket not in buckets: buckets.append(bucket) assert len(buckets) == 1 # Test: IP addresses in the same source groups should map to no more # than 64 buckets. buckets = [] for i in range(4 * 255): src = PeerInfo("251.4.1.1", 8444) peer = PeerInfo( str(250 + i // 255) + "." + str(i % 256) + ".1.1", 8444) t_peer = TimestampedPeerInfo( str(250 + i // 255) + "." + str(i % 256) + ".1.1", 8444, 0) extended_peer_info = ExtendedPeerInfo(t_peer, src) bucket = extended_peer_info.get_new_bucket(key1) if bucket not in buckets: buckets.append(bucket) assert len(buckets) <= 64 # Test: IP addresses in the different source groups should map to more # than 64 buckets. buckets = [] for i in range(255): src = PeerInfo("250." + str(i) + ".1.1", 8444) peer = PeerInfo("250.1.1.1", 8444) t_peer = TimestampedPeerInfo("250.1.1.1", 8444, 0) extended_peer_info = ExtendedPeerInfo(t_peer, src) bucket = extended_peer_info.get_new_bucket(key1) if bucket not in buckets: buckets.append(bucket) assert len(buckets) > 64
async def test_addrman_tried_bucket(self): peer1 = PeerInfo("250.1.1.1", 8444) t_peer1 = TimestampedPeerInfo("250.1.1.1", 8444, 0) peer2 = PeerInfo("250.1.1.1", 9999) t_peer2 = TimestampedPeerInfo("250.1.1.1", 9999, 0) source1 = PeerInfo("250.1.1.1", 8444) peer_info1 = ExtendedPeerInfo(t_peer1, source1) # Test: Make sure key actually randomizes bucket placement. A fail on # this test could be a security issue. key1 = 2**256 - 1 key2 = 2**128 - 1 bucket1 = peer_info1.get_tried_bucket(key1) bucket2 = peer_info1.get_tried_bucket(key2) assert bucket1 != bucket2 # Test: Two addresses with same IP but different ports can map to # different buckets because they have different keys. peer_info2 = ExtendedPeerInfo(t_peer2, source1) assert peer1.get_key() != peer2.get_key() assert peer_info1.get_tried_bucket( key1) != peer_info2.get_tried_bucket(key1) # Test: IP addresses in the same group (\16 prefix for IPv4) should # never get more than 8 buckets buckets = [] for i in range(255): peer = PeerInfo("250.1.1." + str(i), 8444) t_peer = TimestampedPeerInfo("250.1.1." + str(i), 8444, 0) extended_peer_info = ExtendedPeerInfo(t_peer, peer) bucket = extended_peer_info.get_tried_bucket(key1) if bucket not in buckets: buckets.append(bucket) assert len(buckets) == 8 # Test: IP addresses in the different groups should map to more than # 8 buckets. buckets = [] for i in range(255): peer = PeerInfo("250." + str(i) + ".1.1", 8444) t_peer = TimestampedPeerInfo("250." + str(i) + ".1.1", 8444, 0) extended_peer_info = ExtendedPeerInfo(t_peer, peer) bucket = extended_peer_info.get_tried_bucket(key1) if bucket not in buckets: buckets.append(bucket) assert len(buckets) > 8