コード例 #1
0
def start_reconnect_task(server: ChiaServer, peer_info_arg: PeerInfo, log,
                         auth: bool, prefer_ipv6: Optional[bool]):
    """
    Start a background task that checks connection and reconnects periodically to a peer.
    """
    # If peer_info_arg is already an address, use it, otherwise resolve it here.
    if peer_info_arg.is_valid():
        peer_info = peer_info_arg
    else:
        peer_info = PeerInfo(get_host_addr(peer_info_arg, prefer_ipv6),
                             peer_info_arg.port)

    async def connection_check():
        while True:
            peer_retry = True
            for _, connection in server.all_connections.items():
                if connection.get_peer_info(
                ) == peer_info or connection.get_peer_info() == peer_info_arg:
                    peer_retry = False
            if peer_retry:
                log.info(f"Reconnecting to peer {peer_info}")
                try:
                    await server.start_client(peer_info, None, auth=auth)
                except Exception as e:
                    log.info(f"Failed to connect to {peer_info} {e}")
            await asyncio.sleep(3)

    return asyncio.create_task(connection_check())
コード例 #2
0
ファイル: server.py プロジェクト: notpeter/chia-blockchain
    async def get_peer_info(self) -> Optional[PeerInfo]:
        ip = None
        port = self._port

        # Use chia's service first.
        try:
            timeout = ClientTimeout(total=15)
            async with ClientSession(timeout=timeout) as session:
                async with session.get("https://ip.chia.net/") as resp:
                    if resp.status == 200:
                        ip = str(await resp.text())
                        ip = ip.rstrip()
        except Exception:
            ip = None

        # Fallback to `checkip` from amazon.
        if ip is None:
            try:
                timeout = ClientTimeout(total=15)
                async with ClientSession(timeout=timeout) as session:
                    async with session.get(
                            "https://checkip.amazonaws.com/") as resp:
                        if resp.status == 200:
                            ip = str(await resp.text())
                            ip = ip.rstrip()
            except Exception:
                ip = None
        if ip is None:
            return None
        peer = PeerInfo(ip, uint16(port))
        if not peer.is_valid():
            return None
        return peer
コード例 #3
0
 def has_full_node(self) -> bool:
     if self.server is None:
         return False
     if "full_node_peer" in self.config:
         full_node_peer = PeerInfo(
             self.config["full_node_peer"]["host"],
             self.config["full_node_peer"]["port"],
         )
         peers = [
             c.get_peer_info()
             for c in self.server.get_full_node_connections()
         ]
         # If full_node_peer is already an address, use it, otherwise
         # resolve it here.
         if full_node_peer.is_valid():
             full_node_resolved = full_node_peer
         else:
             full_node_resolved = PeerInfo(
                 get_host_addr(full_node_peer.host,
                               self.base_config.get("prefer_ipv6")),
                 full_node_peer.port)
         if full_node_peer in peers or full_node_resolved in peers:
             self.log.info(
                 f"Will not attempt to connect to other nodes, already connected to {full_node_peer}"
             )
             for connection in self.server.get_full_node_connections():
                 if (connection.get_peer_info() != full_node_peer and
                         connection.get_peer_info() != full_node_resolved):
                     self.log.info(
                         f"Closing unnecessary connection to {connection.get_peer_logging()}."
                     )
                     asyncio.create_task(connection.close())
             return True
     return False
コード例 #4
0
    def mark_good_(self, addr: PeerInfo, test_before_evict: bool,
                   timestamp: int) -> None:
        self.last_good = timestamp
        (info, node_id) = self.find_(addr)
        if not addr.is_valid(self.allow_private_subnets):
            return
        if info is None:
            return
        if node_id is None:
            return

        if not (info.peer_info.host == addr.host
                and info.peer_info.port == addr.port):
            return

        # update info
        info.last_success = timestamp
        info.last_try = timestamp
        info.num_attempts = 0
        # timestamp is not updated here, to avoid leaking information about
        # currently-connected peers.

        # if it is already in the tried set, don't do anything else
        if info.is_tried:
            return

        # find a bucket it is in now
        bucket_rand = randrange(NEW_BUCKET_COUNT)
        new_bucket = -1
        for n in range(NEW_BUCKET_COUNT):
            cur_new_bucket = (n + bucket_rand) % NEW_BUCKET_COUNT
            cur_new_bucket_pos = info.get_bucket_position(
                self.key, True, cur_new_bucket)
            if self.new_matrix[cur_new_bucket][cur_new_bucket_pos] == node_id:
                new_bucket = cur_new_bucket
                break

        # if no bucket is found, something bad happened;
        if new_bucket == -1:
            return

        # NOTE(Florin): Double check this. It's not used anywhere else.

        # which tried bucket to move the entry to
        tried_bucket = info.get_tried_bucket(self.key)
        tried_bucket_pos = info.get_bucket_position(self.key, False,
                                                    tried_bucket)

        # Will moving this address into tried evict another entry?
        if test_before_evict and self.tried_matrix[tried_bucket][
                tried_bucket_pos] != -1:
            if len(self.tried_collisions) < TRIED_COLLISION_SIZE:
                if node_id not in self.tried_collisions:
                    self.tried_collisions.append(node_id)
        else:
            self.make_tried_(info, node_id)
コード例 #5
0
 async def _address_relay(self):
     while not self.is_closed:
         try:
             try:
                 relay_peer, num_peers = await self.relay_queue.get()
             except asyncio.CancelledError:
                 return None
             relay_peer_info = PeerInfo(relay_peer.host, relay_peer.port)
             if not relay_peer_info.is_valid():
                 continue
             # https://en.bitcoin.it/wiki/Satoshi_Client_Node_Discovery#Address_Relay
             connections = self.server.get_full_node_connections()
             hashes = []
             cur_day = int(time.time()) // (24 * 60 * 60)
             for connection in connections:
                 peer_info = connection.get_peer_info()
                 if peer_info is None:
                     continue
                 cur_hash = int.from_bytes(
                     bytes(
                         std_hash(
                             self.key.to_bytes(32, byteorder="big")
                             + peer_info.get_key()
                             + cur_day.to_bytes(3, byteorder="big")
                         )
                     ),
                     byteorder="big",
                 )
                 hashes.append((cur_hash, connection))
             hashes.sort(key=lambda x: x[0])
             for index, (_, connection) in enumerate(hashes):
                 if index >= num_peers:
                     break
                 peer_info = connection.get_peer_info()
                 pair = (peer_info.host, peer_info.port)
                 async with self.lock:
                     if pair in self.neighbour_known_peers and relay_peer.host in self.neighbour_known_peers[pair]:
                         continue
                     if pair not in self.neighbour_known_peers:
                         self.neighbour_known_peers[pair] = set()
                     self.neighbour_known_peers[pair].add(relay_peer.host)
                 if connection.peer_node_id is None:
                     continue
                 msg = make_msg(
                     ProtocolMessageTypes.respond_peers,
                     full_node_protocol.RespondPeers([relay_peer]),
                 )
                 await connection.send_message(msg)
         except Exception as e:
             self.log.error(f"Exception in address relay: {e}")
             self.log.error(f"Traceback: {traceback.format_exc()}")
コード例 #6
0
    async def get_peer_info(self) -> Optional[PeerInfo]:
        ip = None
        port = self._port

        try:
            async with ClientSession() as session:
                async with session.get("https://checkip.amazonaws.com/") as resp:
                    if resp.status == 200:
                        ip = str(await resp.text())
                        ip = ip.rstrip()
        except Exception:
            ip = None
        if ip is None:
            return None
        peer = PeerInfo(ip, uint16(port))
        if not peer.is_valid():
            return None
        return peer
コード例 #7
0
    def add_to_new_table_(self, addr: TimestampedPeerInfo,
                          source: Optional[PeerInfo], penalty: int) -> bool:
        is_unique = False
        peer_info = PeerInfo(
            addr.host,
            addr.port,
        )
        if not peer_info.is_valid(self.allow_private_subnets):
            return False
        (info, node_id) = self.find_(peer_info)
        if info is not None and info.peer_info.host == addr.host and info.peer_info.port == addr.port:
            penalty = 0

        if info is not None:
            # periodically update timestamp
            currently_online = time.time() - addr.timestamp < 24 * 60 * 60
            update_interval = 60 * 60 if currently_online else 24 * 60 * 60
            if addr.timestamp > 0 and (info.timestamp > 0
                                       or info.timestamp < addr.timestamp -
                                       update_interval - penalty):
                info.timestamp = max(0, addr.timestamp - penalty)

            # do not update if no new information is present
            if addr.timestamp == 0 or (info.timestamp > 0
                                       and addr.timestamp <= info.timestamp):
                return False

            # do not update if the entry was already in the "tried" table
            if info.is_tried:
                return False

            # do not update if the max reference count is reached
            if info.ref_count == NEW_BUCKETS_PER_ADDRESS:
                return False

            # stochastic test: previous ref_count == N: 2^N times harder to increase it
            factor = 1 << info.ref_count
            if factor > 1 and randrange(factor) != 0:
                return False
        else:
            (info, node_id) = self.create_(addr, source)
            info.timestamp = max(0, info.timestamp - penalty)
            self.new_count += 1
            is_unique = True

        new_bucket = info.get_new_bucket(self.key, source)
        new_bucket_pos = info.get_bucket_position(self.key, True, new_bucket)
        if self.new_matrix[new_bucket][new_bucket_pos] != node_id:
            add_to_new = self.new_matrix[new_bucket][new_bucket_pos] == -1
            if not add_to_new:
                info_existing = self.map_info[self.new_matrix[new_bucket]
                                              [new_bucket_pos]]
                if info_existing.is_terrible() or (info_existing.ref_count > 1
                                                   and info.ref_count == 0):
                    add_to_new = True
            if add_to_new:
                self.clear_new_(new_bucket, new_bucket_pos)
                info.ref_count += 1
                if node_id is not None:
                    self._set_new_matrix(new_bucket, new_bucket_pos, node_id)
            else:
                if info.ref_count == 0:
                    if node_id is not None:
                        self.delete_new_entry_(node_id)
        return is_unique