async def _respond_peers_common(self, request, peer_src, is_full_node):
        # Check if we got the peers from a full node or from the introducer.
        peers_adjusted_timestamp = []
        for peer in request.peer_list:
            if peer.timestamp < 100000000 or peer.timestamp > time.time(
            ) + 10 * 60:
                # Invalid timestamp, predefine a bad one.
                current_peer = TimestampedPeerInfo(
                    peer.host,
                    peer.port,
                    uint64(int(time.time() - 5 * 24 * 60 * 60)),
                )
            else:
                current_peer = peer
            if not is_full_node:
                current_peer = TimestampedPeerInfo(
                    peer.host,
                    peer.port,
                    uint64(0),
                )
            peers_adjusted_timestamp.append(current_peer)

        if is_full_node:
            await self.address_manager.add_to_new_table(
                peers_adjusted_timestamp, peer_src, 2 * 60 * 60)
        else:
            await self.address_manager.add_to_new_table(
                peers_adjusted_timestamp, None, 0)
Exemplo n.º 2
0
    async def test_addrman_new_bucket(self):
        t_peer1 = TimestampedPeerInfo("250.1.2.1", 8444, 0)
        source1 = PeerInfo("250.1.2.1", 8444)
        t_peer2 = TimestampedPeerInfo("250.1.2.1", 9999, 0)
        peer_info1 = ExtendedPeerInfo(t_peer1, source1)
        # Test: Make sure key actually randomizes bucket placement. A fail on
        # this test could be a security issue.
        key1 = 2**256 - 1
        key2 = 2**128 - 1
        bucket1 = peer_info1.get_new_bucket(key1)
        bucket2 = peer_info1.get_new_bucket(key2)
        assert bucket1 != bucket2

        # Test: Ports should not affect bucket placement in the addr
        peer_info2 = ExtendedPeerInfo(t_peer2, source1)
        assert peer_info1.get_new_bucket(key1) == peer_info2.get_new_bucket(
            key1)

        # Test: IP addresses in the same group (\16 prefix for IPv4) should
        # always map to the same bucket.
        buckets = []
        for i in range(255):
            peer = PeerInfo("250.1.1." + str(i), 8444)
            t_peer = TimestampedPeerInfo("250.1.1." + str(i), 8444, 0)
            extended_peer_info = ExtendedPeerInfo(t_peer, peer)
            bucket = extended_peer_info.get_new_bucket(key1)
            if bucket not in buckets:
                buckets.append(bucket)
        assert len(buckets) == 1

        # Test: IP addresses in the same source groups should map to no more
        # than 64 buckets.
        buckets = []
        for i in range(4 * 255):
            src = PeerInfo("251.4.1.1", 8444)
            peer = PeerInfo(
                str(250 + i // 255) + "." + str(i % 256) + ".1.1", 8444)
            t_peer = TimestampedPeerInfo(
                str(250 + i // 255) + "." + str(i % 256) + ".1.1", 8444, 0)
            extended_peer_info = ExtendedPeerInfo(t_peer, src)
            bucket = extended_peer_info.get_new_bucket(key1)
            if bucket not in buckets:
                buckets.append(bucket)
        assert len(buckets) <= 64

        # Test: IP addresses in the different source groups should map to more
        # than 64 buckets.
        buckets = []
        for i in range(255):
            src = PeerInfo("250." + str(i) + ".1.1", 8444)
            peer = PeerInfo("250.1.1.1", 8444)
            t_peer = TimestampedPeerInfo("250.1.1.1", 8444, 0)
            extended_peer_info = ExtendedPeerInfo(t_peer, src)
            bucket = extended_peer_info.get_new_bucket(key1)
            if bucket not in buckets:
                buckets.append(bucket)

        assert len(buckets) > 64
Exemplo n.º 3
0
 async def test_cleanup(self):
     addrman = AddressManagerTest()
     peer1 = TimestampedPeerInfo("250.250.2.1", 8444, 100000)
     peer2 = TimestampedPeerInfo("250.250.2.2", 9999, time.time())
     source = PeerInfo("252.5.1.1", 8333)
     assert await addrman.add_to_new_table([peer1], source)
     assert await addrman.add_to_new_table([peer2], source)
     await addrman.mark_good(PeerInfo("250.250.2.2", 9999))
     assert await addrman.size() == 2
     for _ in range(5):
         await addrman.attempt(peer1, True, time.time() - 61)
     addrman.cleanup(7 * 3600 * 24, 5)
     assert await addrman.size() == 1
Exemplo n.º 4
0
 def from_string(cls, peer_str: str):
     blobs = peer_str.split(" ")
     assert len(blobs) == 4
     peer_info = TimestampedPeerInfo(blobs[0], uint16(int(blobs[1])),
                                     uint64(0))
     src_peer = PeerInfo(blobs[2], uint16(int(blobs[3])))
     return cls(peer_info, src_peer)
    async def _periodically_self_advertise(self):
        while not self.is_closed:
            try:
                try:
                    await asyncio.sleep(24 * 3600)
                except asyncio.CancelledError:
                    return
                # Clean up known nodes for neighbours every 24 hours.
                async with self.lock:
                    for neighbour in list(self.neighbour_known_peers.keys()):
                        self.neighbour_known_peers[neighbour].clear()
                # Self advertise every 24 hours.
                peer = await self.server.get_peer_info()
                if peer is None:
                    continue
                timestamped_peer = [
                    TimestampedPeerInfo(
                        peer.host,
                        peer.port,
                        uint64(int(time.time())),
                    )
                ]
                msg = Message(
                    "respond_peers",
                    full_node_protocol.RespondPeers(timestamped_peer),
                )
                await self.server.send_to_all([msg], NodeType.FULL_NODE)

            except Exception as e:
                self.log.error(f"Exception in self advertise: {e}")
                self.log.error(f"Traceback: {traceback.format_exc()}")
Exemplo n.º 6
0
 async def on_connect(self, peer: ws.WSChiaConnection):
     if (
         peer.is_outbound is False
         and peer.peer_server_port is not None
         and peer.connection_type is NodeType.FULL_NODE
         and self.server._local_type is NodeType.FULL_NODE
         and self.address_manager is not None
     ):
         timestamped_peer_info = TimestampedPeerInfo(
             peer.peer_host,
             peer.peer_server_port,
             uint64(int(time.time())),
         )
         await self.address_manager.add_to_new_table([timestamped_peer_info], peer.get_peer_info(), 0)
         if self.relay_queue is not None:
             self.relay_queue.put_nowait((timestamped_peer_info, 1))
     if (
         peer.is_outbound
         and peer.peer_server_port is not None
         and peer.connection_type is NodeType.FULL_NODE
         and self.server._local_type is NodeType.FULL_NODE
         and self.address_manager is not None
     ):
         msg = make_msg(ProtocolMessageTypes.request_peers, full_node_protocol.RequestPeers())
         await peer.send_message(msg)
Exemplo n.º 7
0
 async def _periodically_self_advertise(self):
     while not self.is_closed:
         try:
             await asyncio.sleep(24 * 3600)
             # Clean up known nodes for neighbours every 24 hours.
             async with self.lock:
                 for neighbour in list(self.neighbour_known_peers.keys()):
                     self.neighbour_known_peers[neighbour].clear()
             # Self advertise every 24 hours.
             peer = await self.global_connections.get_local_peerinfo()
             if peer is None:
                 continue
             timestamped_peer = [
                 TimestampedPeerInfo(
                     peer.host,
                     peer.port,
                     uint64(int(time.time())),
                 )
             ]
             outbound_message = OutboundMessage(
                 NodeType.FULL_NODE,
                 Message(
                     "respond_peers_full_node",
                     full_node_protocol.RespondPeers(timestamped_peer),
                 ),
                 Delivery.BROADCAST,
             )
             if self.server is not None:
                 self.server.push_message(outbound_message)
         except Exception as e:
             self.log.error(f"Exception in self advertise: {e}")
             self.log.error(f"Traceback: {traceback.format_exc()}")
Exemplo n.º 8
0
 async def _process_messages(self):
     connection_time_pretest: Dict = {}
     while not self.is_closed:
         try:
             message, peer_info = await self.message_queue.get()
             if peer_info is None or not peer_info.port:
                 continue
             if message == "make_tried":
                 await self.address_manager.mark_good(peer_info, True)
                 await self.address_manager.connect(peer_info)
             elif message == "mark_attempted":
                 await self.address_manager.attempt(peer_info, True)
             elif message == "mark_attempted_soft":
                 await self.address_manager.attempt(peer_info, False)
             elif message == "update_connection_time":
                 if peer_info.host not in connection_time_pretest:
                     connection_time_pretest[peer_info.host] = time.time()
                 if time.time() - connection_time_pretest[
                         peer_info.host] > 60:
                     await self.address_manager.connect(peer_info)
                     connection_time_pretest[peer_info.host] = time.time()
             elif message == "new_inbound_connection":
                 timestamped_peer_info = TimestampedPeerInfo(
                     peer_info.host,
                     peer_info.port,
                     uint64(int(time.time())),
                 )
                 await self.address_manager.add_to_new_table(
                     [timestamped_peer_info], peer_info, 0)
                 # await self.address_manager.mark_good(peer_info, True)
                 if self.relay_queue is not None:
                     self.relay_queue.put_nowait((timestamped_peer_info, 1))
         except Exception as e:
             self.log.error(f"Exception in process message: {e}")
Exemplo n.º 9
0
    async def request_peers_introducer(
        self,
        request: RequestPeersIntroducer,
        peer: WSChiaConnection,
    ) -> Optional[Message]:
        max_peers = self.introducer.max_peers_to_send
        if self.introducer.server is None or self.introducer.server.introducer_peers is None:
            return None
        rawpeers = self.introducer.server.introducer_peers.get_peers(
            max_peers * 5, True, self.introducer.recent_peer_threshold)

        peers = []
        for r_peer in rawpeers:
            if r_peer.get_hash() not in self.introducer.vetted:
                continue
            if self.introducer.vetted[r_peer.get_hash()]:
                if r_peer.host == peer.peer_host and r_peer.port == peer.peer_server_port:
                    continue
                peer_without_timestamp = TimestampedPeerInfo(
                    r_peer.host,
                    r_peer.port,
                    uint64(0),
                )
                peers.append(peer_without_timestamp)

            if len(peers) >= max_peers:
                break

        self.introducer.log.info(f"Sending vetted {peers}")

        msg = make_msg(ProtocolMessageTypes.respond_peers_introducer,
                       RespondPeersIntroducer(peers))
        return msg
Exemplo n.º 10
0
    async def request_peers_with_peer_info(
        self,
        request: RequestPeers,
        peer_info: PeerInfo,
    ) -> AsyncGenerator[OutboundMessage, None]:
        max_peers = self.max_peers_to_send
        if self.global_connections.introducer_peers is None:
            return
        rawpeers = self.global_connections.introducer_peers.get_peers(
            max_peers * 5, True, self.recent_peer_threshold
        )

        peers = []
        for peer in rawpeers:
            if peer.get_hash() not in self.vetted:
                continue
            if self.vetted[peer.get_hash()]:
                if peer.host == peer_info.host and peer.port == peer_info.port:
                    continue
                peer_without_timestamp = TimestampedPeerInfo(
                    peer.host,
                    peer.port,
                    uint64(0),
                )
                peers.append(peer_without_timestamp)

            if len(peers) >= max_peers:
                break

        log.info(f"Sending vetted {peers}")

        msg = Message("respond_peers", RespondPeers(peers))
        yield OutboundMessage(NodeType.FULL_NODE, msg, Delivery.RESPOND)
        yield OutboundMessage(NodeType.WALLET, msg, Delivery.RESPOND)
Exemplo n.º 11
0
    async def test_addrman_tried_bucket(self):
        peer1 = PeerInfo("250.1.1.1", 8444)
        t_peer1 = TimestampedPeerInfo("250.1.1.1", 8444, 0)
        peer2 = PeerInfo("250.1.1.1", 9999)
        t_peer2 = TimestampedPeerInfo("250.1.1.1", 9999, 0)
        source1 = PeerInfo("250.1.1.1", 8444)
        peer_info1 = ExtendedPeerInfo(t_peer1, source1)
        # Test: Make sure key actually randomizes bucket placement. A fail on
        # this test could be a security issue.
        key1 = 2**256 - 1
        key2 = 2**128 - 1
        bucket1 = peer_info1.get_tried_bucket(key1)
        bucket2 = peer_info1.get_tried_bucket(key2)
        assert bucket1 != bucket2

        # Test: Two addresses with same IP but different ports can map to
        # different buckets because they have different keys.
        peer_info2 = ExtendedPeerInfo(t_peer2, source1)
        assert peer1.get_key() != peer2.get_key()
        assert peer_info1.get_tried_bucket(
            key1) != peer_info2.get_tried_bucket(key1)

        # Test: IP addresses in the same group (\16 prefix for IPv4) should
        # never get more than 8 buckets
        buckets = []
        for i in range(255):
            peer = PeerInfo("250.1.1." + str(i), 8444)
            t_peer = TimestampedPeerInfo("250.1.1." + str(i), 8444, 0)
            extended_peer_info = ExtendedPeerInfo(t_peer, peer)
            bucket = extended_peer_info.get_tried_bucket(key1)
            if bucket not in buckets:
                buckets.append(bucket)

        assert len(buckets) == 8

        # Test: IP addresses in the different groups should map to more than
        # 8 buckets.
        buckets = []
        for i in range(255):
            peer = PeerInfo("250." + str(i) + ".1.1", 8444)
            t_peer = TimestampedPeerInfo("250." + str(i) + ".1.1", 8444, 0)
            extended_peer_info = ExtendedPeerInfo(t_peer, peer)
            bucket = extended_peer_info.get_tried_bucket(key1)
            if bucket not in buckets:
                buckets.append(bucket)
        assert len(buckets) > 8
Exemplo n.º 12
0
    async def _respond_peers_common(self, request, peer_src, is_full_node):
        # Check if we got the peers from a full node or from the introducer.
        peers_adjusted_timestamp = []
        is_misbehaving = False
        if len(request.peer_list) > MAX_PEERS_RECEIVED_PER_REQUEST:
            is_misbehaving = True
        if is_full_node:
            if peer_src is None:
                return
            async with self.lock:
                if peer_src.host not in self.received_count_from_peers:
                    self.received_count_from_peers[peer_src.host] = 0
                self.received_count_from_peers[peer_src.host] += len(
                    request.peer_list)
                if self.received_count_from_peers[
                        peer_src.host] > MAX_TOTAL_PEERS_RECEIVED:
                    is_misbehaving = True
        if is_misbehaving:
            return
        for peer in request.peer_list:
            if peer.timestamp < 100000000 or peer.timestamp > time.time(
            ) + 10 * 60:
                # Invalid timestamp, predefine a bad one.
                current_peer = TimestampedPeerInfo(
                    peer.host,
                    peer.port,
                    uint64(int(time.time() - 5 * 24 * 60 * 60)),
                )
            else:
                current_peer = peer
            if not is_full_node:
                current_peer = TimestampedPeerInfo(
                    peer.host,
                    peer.port,
                    uint64(0),
                )
            peers_adjusted_timestamp.append(current_peer)

        if is_full_node:
            await self.address_manager.add_to_new_table(
                peers_adjusted_timestamp, peer_src, 2 * 60 * 60)
        else:
            await self.address_manager.add_to_new_table(
                peers_adjusted_timestamp, None, 0)
Exemplo n.º 13
0
    async def test_serialization(self):
        addrman = AddressManagerTest()
        now = int(math.floor(time.time()))
        t_peer1 = TimestampedPeerInfo("250.7.1.1", 8333, now - 10000)
        t_peer2 = TimestampedPeerInfo("250.7.2.2", 9999, now - 20000)
        t_peer3 = TimestampedPeerInfo("250.7.3.3", 9999, now - 30000)
        source = PeerInfo("252.5.1.1", 8333)
        await addrman.add_to_new_table([t_peer1, t_peer2, t_peer3], source)
        await addrman.mark_good(PeerInfo("250.7.1.1", 8333))

        db_filename = Path("peer_table.db")
        if db_filename.exists():
            db_filename.unlink()
        connection = await aiosqlite.connect(db_filename)
        address_manager_store = await AddressManagerStore.create(connection)
        await address_manager_store.serialize(addrman)
        addrman2 = await address_manager_store.deserialize()

        retrieved_peers = []
        for _ in range(50):
            peer = await addrman2.select_peer()
            if peer not in retrieved_peers:
                retrieved_peers.append(peer)
            if len(retrieved_peers) == 3:
                break
        assert len(retrieved_peers) == 3
        wanted_peers = [
            ExtendedPeerInfo(t_peer1, source),
            ExtendedPeerInfo(t_peer2, source),
            ExtendedPeerInfo(t_peer3, source),
        ]
        recovered = 0
        for target_peer in wanted_peers:
            for current_peer in retrieved_peers:
                if (
                    current_peer.peer_info == target_peer.peer_info
                    and current_peer.src == target_peer.src
                    and current_peer.timestamp == target_peer.timestamp
                ):
                    recovered += 1
        assert recovered == 3
        await connection.close()
        db_filename.unlink()
Exemplo n.º 14
0
    async def test_addrman_create(self):
        addrman = AddressManagerTest()
        assert await addrman.size() == 0

        peer1 = PeerInfo("250.1.2.1", 8444)
        t_peer = TimestampedPeerInfo("250.1.2.1", 8444, 0)
        info, node_id = addrman.create_(t_peer, peer1)
        assert info.peer_info == peer1
        info, _ = addrman.find_(peer1)
        assert info.peer_info == peer1
Exemplo n.º 15
0
 async def add_peer_info(self, peers, peer_src=None):
     timestamped_peers = [
         TimestampedPeerInfo(
             peer.host,
             peer.port,
             0,
         ) for peer in peers
     ]
     added = await self.add_to_new_table(timestamped_peers, peer_src)
     return added
Exemplo n.º 16
0
 def get_peers(self,
               max_peers: int = 0,
               randomize: bool = False,
               recent_threshold=9999999) -> List[TimestampedPeerInfo]:
     target_peers = [
         TimestampedPeerInfo(peer.host, uint16(peer.port), uint64(0))
         for peer in self._peers if time.time() -
         self.time_added[peer.get_hash()] < recent_threshold
     ]
     if not max_peers or max_peers > len(target_peers):
         max_peers = len(target_peers)
     if randomize:
         random.shuffle(target_peers)
     return target_peers[:max_peers]
Exemplo n.º 17
0
    async def test_addrman_delete(self):
        addrman = AddressManagerTest()
        assert await addrman.size() == 0

        peer1 = PeerInfo("250.1.2.1", 8444)
        t_peer = TimestampedPeerInfo("250.1.2.1", 8444, 0)
        info, node_id = addrman.create_(t_peer, peer1)

        # Test: Delete should actually delete the addr.
        assert await addrman.size() == 1
        addrman.delete_new_entry_(node_id)
        assert await addrman.size() == 0
        info2, _ = addrman.find_(peer1)
        assert info2 is None
Exemplo n.º 18
0
    def get_peers_(self) -> List[TimestampedPeerInfo]:
        addr: List[TimestampedPeerInfo] = []
        num_nodes = math.ceil(23 * len(self.random_pos) / 100)
        if num_nodes > 1000:
            num_nodes = 1000
        for n in range(len(self.random_pos)):
            if len(addr) >= num_nodes:
                return addr

            rand_pos = randrange(len(self.random_pos) - n) + n
            self.swap_random_(n, rand_pos)
            info = self.map_info[self.random_pos[n]]
            if not info.is_terrible():
                cur_peer_info = TimestampedPeerInfo(
                    info.peer_info.host,
                    uint16(info.peer_info.port),
                    uint64(info.timestamp),
                )
                addr.append(cur_peer_info)

        return addr
Exemplo n.º 19
0
    async def test_addrman_get_peers(self):
        addrman = AddressManagerTest()
        assert await addrman.size() == 0
        peers1 = await addrman.get_peers()
        assert len(peers1) == 0

        peer1 = TimestampedPeerInfo("250.250.2.1", 8444, time.time())
        peer2 = TimestampedPeerInfo("250.250.2.2", 9999, time.time())
        peer3 = TimestampedPeerInfo("251.252.2.3", 8444, time.time())
        peer4 = TimestampedPeerInfo("251.252.2.4", 8444, time.time())
        peer5 = TimestampedPeerInfo("251.252.2.5", 8444, time.time())
        source1 = PeerInfo("250.1.2.1", 8444)
        source2 = PeerInfo("250.2.3.3", 8444)

        # Test: Ensure GetPeers works with new addresses.
        assert await addrman.add_to_new_table([peer1], source1)
        assert await addrman.add_to_new_table([peer2], source2)
        assert await addrman.add_to_new_table([peer3], source1)
        assert await addrman.add_to_new_table([peer4], source1)
        assert await addrman.add_to_new_table([peer5], source1)

        # GetPeers returns 23% of addresses, 23% of 5 is 2 rounded up.
        peers2 = await addrman.get_peers()
        assert len(peers2) == 2

        # Test: Ensure GetPeers works with new and tried addresses.
        await addrman.mark_good(peer1)
        await addrman.mark_good(peer2)
        peers3 = await addrman.get_peers()
        assert len(peers3) == 2

        # Test: Ensure GetPeers still returns 23% when addrman has many addrs.
        for i in range(1, 8 * 256):
            octet1 = i % 256
            octet2 = i >> 8 % 256
            peer = TimestampedPeerInfo(
                str(octet1) + "." + str(octet2) + ".1.23", 8444, time.time())
            await addrman.add_to_new_table([peer])
            if i % 8 == 0:
                await addrman.mark_good(peer)

        peers4 = await addrman.get_peers()
        percent = await addrman.size()
        percent = math.ceil(percent * 23 / 100)
        assert len(peers4) == percent