Exemple #1
0
    async def get_local_peerinfo(self) -> Optional[PeerInfo]:
        ip = None
        port = None
        for c in self._all_connections:
            if c.connection_type == NodeType.FULL_NODE:
                port = c.local_port
                break
        if port is None:
            return None

        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(
                        "https://checkip.amazonaws.com/") as resp:
                    if resp.status == 200:
                        ip = str(await resp.text())
                        ip = ip.rstrip()
                    else:
                        ip = None
        except Exception:
            ip = None
        if ip is None:
            return None
        peer = PeerInfo(ip, uint16(port))
        if not peer.is_valid():
            return None
        return peer
    def mark_good_(self, addr: PeerInfo, test_before_evict: bool,
                   timestamp: int):
        self.last_good = timestamp
        (info, node_id) = self.find_(addr)
        if not addr.is_valid():
            return
        if info is None:
            return
        if node_id is None:
            return

        if not (info.peer_info.host == addr.host
                and info.peer_info.port == addr.port):
            return

        # update info
        info.last_success = timestamp
        info.last_try = timestamp
        info.num_attempts = 0
        # timestamp is not updated here, to avoid leaking information about
        # currently-connected peers.

        # if it is already in the tried set, don't do anything else
        if info.is_tried:
            return

        # find a bucket it is in now
        bucket_rand = randrange(NEW_BUCKET_COUNT)
        new_bucket = -1
        for n in range(NEW_BUCKET_COUNT):
            cur_new_bucket = (n + bucket_rand) % NEW_BUCKET_COUNT
            cur_new_bucket_pos = info.get_bucket_position(
                self.key, True, cur_new_bucket)
            if self.new_matrix[cur_new_bucket][cur_new_bucket_pos] == node_id:
                new_bucket = cur_new_bucket
                break

        # if no bucket is found, something bad happened;
        if new_bucket == -1:
            return

        # NOTE(Florin): Double check this. It's not used anywhere else.

        # which tried bucket to move the entry to
        tried_bucket = info.get_tried_bucket(self.key)
        tried_bucket_pos = info.get_bucket_position(self.key, False,
                                                    tried_bucket)

        # Will moving this address into tried evict another entry?
        if (test_before_evict
                and self.tried_matrix[tried_bucket][tried_bucket_pos] != -1):
            if len(self.tried_collisions) < TRIED_COLLISION_SIZE:
                if node_id not in self.tried_collisions:
                    self.tried_collisions.append(node_id)
        else:
            self.make_tried_(info, node_id)
 async def _address_relay(self):
     while not self.is_closed:
         try:
             relay_peer, num_peers = await self.relay_queue.get()
             relay_peer_info = PeerInfo(relay_peer.host, relay_peer.port)
             if not relay_peer_info.is_valid():
                 continue
             # https://en.bitcoin.it/wiki/Satoshi_Client_Node_Discovery#Address_Relay
             connections = self.global_connections.get_full_node_connections(
             )
             hashes = []
             cur_day = int(time.time()) // (24 * 60 * 60)
             for connection in connections:
                 peer_info = connection.get_peer_info()
                 cur_hash = int.from_bytes(
                     bytes(
                         std_hash(
                             self.key.to_bytes(32, byteorder="big") +
                             peer_info.get_key() +
                             cur_day.to_bytes(3, byteorder="big"))),
                     byteorder="big",
                 )
                 hashes.append((cur_hash, connection))
             hashes.sort(key=lambda x: x[0])
             for index, (_, connection) in enumerate(hashes):
                 if index >= num_peers:
                     break
                 peer_info = connection.get_peer_info()
                 pair = (peer_info.host, peer_info.port)
                 async with self.lock:
                     if (pair in self.neighbour_known_peers
                             and relay_peer.host
                             in self.neighbour_known_peers[pair]):
                         continue
                     if pair not in self.neighbour_known_peers:
                         self.neighbour_known_peers[pair] = set()
                     self.neighbour_known_peers[pair].add(relay_peer.host)
                 if connection.node_id is None:
                     continue
                 msg = OutboundMessage(
                     NodeType.FULL_NODE,
                     Message(
                         "respond_peers_full_node",
                         full_node_protocol.RespondPeers([relay_peer]),
                     ),
                     Delivery.SPECIFIC,
                     connection.node_id,
                 )
                 self.server.push_message(msg)
         except Exception as e:
             self.log.error(f"Exception in address relay: {e}")
             self.log.error(f"Traceback: {traceback.format_exc()}")
Exemple #4
0
    async def get_peer_info(self) -> Optional[PeerInfo]:
        ip = None
        port = self._port

        try:
            async with ClientSession() as session:
                async with session.get("https://checkip.amazonaws.com/") as resp:
                    if resp.status == 200:
                        ip = str(await resp.text())
                        ip = ip.rstrip()
        except Exception:
            ip = None
        if ip is None:
            return None
        peer = PeerInfo(ip, uint16(port))
        if not peer.is_valid():
            return None
        return peer
    def add_to_new_table_(self, addr: TimestampedPeerInfo, source: Optional[PeerInfo], penalty: int) -> bool:
        is_unique = False
        peer_info = PeerInfo(
            addr.host,
            addr.port,
        )
        if not peer_info.is_valid(self.allow_private_subnets):
            return False
        (info, node_id) = self.find_(peer_info)
        if info is not None and info.peer_info.host == addr.host and info.peer_info.port == addr.port:
            penalty = 0

        if info is not None:
            # periodically update timestamp
            currently_online = time.time() - addr.timestamp < 24 * 60 * 60
            update_interval = 60 * 60 if currently_online else 24 * 60 * 60
            if addr.timestamp > 0 and (
                info.timestamp > 0 or info.timestamp < addr.timestamp - update_interval - penalty
            ):
                info.timestamp = max(0, addr.timestamp - penalty)

            # do not update if no new information is present
            if addr.timestamp == 0 or (info.timestamp > 0 and addr.timestamp <= info.timestamp):
                return False

            # do not update if the entry was already in the "tried" table
            if info.is_tried:
                return False

            # do not update if the max reference count is reached
            if info.ref_count == NEW_BUCKETS_PER_ADDRESS:
                return False

            # stochastic test: previous ref_count == N: 2^N times harder to increase it
            factor = 1 << info.ref_count
            if factor > 1 and randrange(factor) != 0:
                return False
        else:
            (info, node_id) = self.create_(addr, source)
            info.timestamp = max(0, info.timestamp - penalty)
            self.new_count += 1
            is_unique = True

        new_bucket = info.get_new_bucket(self.key, source)
        new_bucket_pos = info.get_bucket_position(self.key, True, new_bucket)
        if self.new_matrix[new_bucket][new_bucket_pos] != node_id:
            add_to_new = self.new_matrix[new_bucket][new_bucket_pos] == -1
            if not add_to_new:
                info_existing = self.map_info[self.new_matrix[new_bucket][new_bucket_pos]]
                if info_existing.is_terrible() or (info_existing.ref_count > 1 and info.ref_count == 0):
                    add_to_new = True
            if add_to_new:
                self.clear_new_(new_bucket, new_bucket_pos)
                info.ref_count += 1
                if node_id is not None:
                    self._set_new_matrix(new_bucket, new_bucket_pos, node_id)
            else:
                if info.ref_count == 0:
                    if node_id is not None:
                        self.delete_new_entry_(node_id)
        return is_unique