コード例 #1
0
    async def respond_header(self, response: wallet_protocol.RespondHeader):
        """
        The full node responds to our RequestHeader call. We cannot finish this block
        until we have the required additions / removals for our wallets.
        """
        while True:
            if self._shut_down:
                return
            # We loop, to avoid infinite recursion. At the end of each iteration, we might want to
            # process the next block, if it exists.

            block = response.header_block

            # If we already have, return
            if block.header_hash in self.wallet_state_manager.block_records:
                return
            if block.height < 1:
                return

            block_record = BlockRecord(
                block.header_hash,
                block.prev_header_hash,
                block.height,
                block.weight,
                None,
                None,
                response.header_block.header.data.total_iters,
                response.header_block.challenge.get_hash(),
            )

            if self.wallet_state_manager.sync_mode:
                self.potential_blocks_received[uint32(block.height)].set()
                self.potential_header_hashes[block.height] = block.header_hash

            # Caches the block so we can finalize it when additions and removals arrive
            self.cached_blocks[block_record.header_hash] = (
                block_record,
                block,
                response.transactions_filter,
            )

            if block.prev_header_hash not in self.wallet_state_manager.block_records:
                # We do not have the previous block record, so wait for that. When the previous gets added to chain,
                # this method will get called again and we can continue. During sync, the previous blocks are already
                # requested. During normal operation, this might not be the case.
                self.future_block_hashes[block.prev_header_hash] = block.header_hash

                lca = self.wallet_state_manager.block_records[
                    self.wallet_state_manager.lca
                ]
                if (
                    block_record.height - lca.height < self.short_sync_threshold
                    and not self.wallet_state_manager.sync_mode
                ):
                    # Only requests the previous block if we are not in sync mode, close to the new block,
                    # and don't have prev
                    header_request = wallet_protocol.RequestHeader(
                        uint32(block_record.height - 1), block_record.prev_header_hash,
                    )
                    yield OutboundMessage(
                        NodeType.FULL_NODE,
                        Message("request_header", header_request),
                        Delivery.RESPOND,
                    )
                return

            # If the block has transactions that we are interested in, fetch adds/deletes
            if response.transactions_filter is not None:
                (
                    additions,
                    removals,
                ) = await self.wallet_state_manager.get_filter_additions_removals(
                    block_record, response.transactions_filter
                )
                if len(additions) > 0 or len(removals) > 0:
                    request_a = wallet_protocol.RequestAdditions(
                        block.height, block.header_hash, additions
                    )
                    yield OutboundMessage(
                        NodeType.FULL_NODE,
                        Message("request_additions", request_a),
                        Delivery.RESPOND,
                    )
                    return

            # If we don't have any transactions in filter, don't fetch, and finish the block
            block_record = BlockRecord(
                block_record.header_hash,
                block_record.prev_header_hash,
                block_record.height,
                block_record.weight,
                [],
                [],
                block_record.total_iters,
                block_record.new_challenge_hash,
            )
            respond_header_msg: Optional[
                wallet_protocol.RespondHeader
            ] = await self._block_finished(
                block_record, block, response.transactions_filter
            )
            if respond_header_msg is None:
                return
            else:
                response = respond_header_msg
コード例 #2
0
    async def respond_additions(self, response: wallet_protocol.RespondAdditions):
        """
        The full node has responded with the additions for a block. We will use this
        to try to finish the block, and add it to the state.
        """
        if self._shut_down:
            return
        if response.header_hash not in self.cached_blocks:
            self.log.warning("Do not have header for additions")
            return
        block_record, header_block, transaction_filter = self.cached_blocks[
            response.header_hash
        ]
        assert response.height == block_record.height

        additions: List[Coin]
        if response.proofs is None:
            # If there are no proofs, it means all additions were returned in the response.
            # we must find the ones relevant to our wallets.
            all_coins: List[Coin] = []
            for puzzle_hash, coin_list_0 in response.coins:
                all_coins += coin_list_0
            additions = await self.wallet_state_manager.get_relevant_additions(
                all_coins
            )
            # Verify root
            additions_merkle_set = MerkleSet()

            # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
            for puzzle_hash, coins in response.coins:
                additions_merkle_set.add_already_hashed(puzzle_hash)
                additions_merkle_set.add_already_hashed(hash_coin_list(coins))

            additions_root = additions_merkle_set.get_root()
            if header_block.header.data.additions_root != additions_root:
                return
        else:
            # This means the full node has responded only with the relevant additions
            # for our wallet. Each merkle proof must be verified.
            additions = []
            assert len(response.coins) == len(response.proofs)
            for i in range(len(response.coins)):
                assert response.coins[i][0] == response.proofs[i][0]
                coin_list_1: List[Coin] = response.coins[i][1]
                puzzle_hash_proof: bytes32 = response.proofs[i][1]
                coin_list_proof: Optional[bytes32] = response.proofs[i][2]
                if len(coin_list_1) == 0:
                    # Verify exclusion proof for puzzle hash
                    assert confirm_not_included_already_hashed(
                        header_block.header.data.additions_root,
                        response.coins[i][0],
                        puzzle_hash_proof,
                    )
                else:
                    # Verify inclusion proof for puzzle hash
                    assert confirm_included_already_hashed(
                        header_block.header.data.additions_root,
                        response.coins[i][0],
                        puzzle_hash_proof,
                    )
                    # Verify inclusion proof for coin list
                    assert confirm_included_already_hashed(
                        header_block.header.data.additions_root,
                        hash_coin_list(coin_list_1),
                        coin_list_proof,
                    )
                    for coin in coin_list_1:
                        assert coin.puzzle_hash == response.coins[i][0]
                    additions += coin_list_1
        new_br = BlockRecord(
            block_record.header_hash,
            block_record.prev_header_hash,
            block_record.height,
            block_record.weight,
            additions,
            None,
            block_record.total_iters,
            header_block.challenge.get_hash(),
        )
        self.cached_blocks[response.header_hash] = (
            new_br,
            header_block,
            transaction_filter,
        )

        if transaction_filter is None:
            raise RuntimeError("Got additions for block with no transactions.")

        (_, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
            new_br, transaction_filter
        )
        request_all_removals = False
        for coin in additions:
            puzzle_store = self.wallet_state_manager.puzzle_store
            record_info: Optional[
                DerivationRecord
            ] = await puzzle_store.get_derivation_record_for_puzzle_hash(
                coin.puzzle_hash.hex()
            )
            if (
                record_info is not None
                and record_info.wallet_type == WalletType.COLOURED_COIN
            ):
                request_all_removals = True
                break

        if len(removals) > 0 or request_all_removals:
            if request_all_removals:
                request_r = wallet_protocol.RequestRemovals(
                    header_block.height, header_block.header_hash, None
                )
            else:
                request_r = wallet_protocol.RequestRemovals(
                    header_block.height, header_block.header_hash, removals
                )
            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_removals", request_r),
                Delivery.RESPOND,
            )
        else:
            # We have collected all three things: header, additions, and removals (since there are no
            # relevant removals for us). Can proceed. Otherwise, we wait for the removals to arrive.
            new_br = BlockRecord(
                new_br.header_hash,
                new_br.prev_header_hash,
                new_br.height,
                new_br.weight,
                new_br.additions,
                [],
                new_br.total_iters,
                new_br.new_challenge_hash,
            )
            respond_header_msg: Optional[
                wallet_protocol.RespondHeader
            ] = await self._block_finished(new_br, header_block, transaction_filter)
            if respond_header_msg is not None:
                async for msg in self.respond_header(respond_header_msg):
                    yield msg
コード例 #3
0
 async def on_connect() -> OutboundMessageGenerator:
     msg = Message("request_peers", full_node_protocol.RequestPeers())
     yield OutboundMessage(NodeType.INTRODUCER, msg, Delivery.RESPOND)
コード例 #4
0
    async def _sync(self):
        """
        Wallet has fallen far behind (or is starting up for the first time), and must be synced
        up to the LCA of the blockchain.
        """
        # 1. Get all header hashes
        self.header_hashes = []
        self.header_hashes_error = False
        self.proof_hashes = []
        self.potential_header_hashes = {}
        genesis = FullBlock.from_bytes(self.constants["GENESIS_BLOCK"])
        genesis_challenge = genesis.proof_of_space.challenge_hash
        request_header_hashes = wallet_protocol.RequestAllHeaderHashesAfter(
            uint32(0), genesis_challenge
        )
        yield OutboundMessage(
            NodeType.FULL_NODE,
            Message("request_all_header_hashes_after", request_header_hashes),
            Delivery.RESPOND,
        )
        timeout = 100
        sleep_interval = 10
        sleep_interval_short = 1
        start_wait = time.time()
        while time.time() - start_wait < timeout:
            if self._shut_down:
                return
            if self.header_hashes_error:
                raise ValueError(
                    f"Received error from full node while fetching hashes from {request_header_hashes}."
                )
            if len(self.header_hashes) > 0:
                break
            await asyncio.sleep(0.5)
        if len(self.header_hashes) == 0:
            raise TimeoutError("Took too long to fetch header hashes.")

        # 2. Find fork point
        fork_point_height: uint32 = self.wallet_state_manager.find_fork_point_alternate_chain(
            self.header_hashes
        )
        fork_point_hash: bytes32 = self.header_hashes[fork_point_height]

        # Sync a little behind, in case there is a short reorg
        tip_height = (
            len(self.header_hashes) - 5
            if len(self.header_hashes) > 5
            else len(self.header_hashes)
        )
        self.log.info(
            f"Fork point: {fork_point_hash} at height {fork_point_height}. Will sync up to {tip_height}"
        )
        for height in range(0, tip_height + 1):
            self.potential_blocks_received[uint32(height)] = asyncio.Event()

        header_validate_start_height: uint32
        if self.config["starting_height"] == 0:
            header_validate_start_height = fork_point_height
        else:
            # Request all proof hashes
            request_proof_hashes = wallet_protocol.RequestAllProofHashes()
            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_all_proof_hashes", request_proof_hashes),
                Delivery.RESPOND,
            )
            start_wait = time.time()
            while time.time() - start_wait < timeout:
                if self._shut_down:
                    return
                if len(self.proof_hashes) > 0:
                    break
                await asyncio.sleep(0.5)
            if len(self.proof_hashes) == 0:
                raise TimeoutError("Took too long to fetch proof hashes.")
            if len(self.proof_hashes) < tip_height:
                raise ValueError("Not enough proof hashes fetched.")

            # Creates map from height to difficulty
            heights: List[uint32] = []
            difficulty_weights: List[uint64] = []
            difficulty: uint64
            for i in range(tip_height):
                if self.proof_hashes[i][1] is not None:
                    difficulty = self.proof_hashes[i][1]
                if i > (fork_point_height + 1) and i % 2 == 1:  # Only add odd heights
                    heights.append(uint32(i))
                    difficulty_weights.append(difficulty)

            # Randomly sample based on difficulty
            query_heights_odd = sorted(
                list(
                    set(
                        random.choices(
                            heights, difficulty_weights, k=min(100, len(heights))
                        )
                    )
                )
            )
            query_heights: List[uint32] = []

            for odd_height in query_heights_odd:
                query_heights += [uint32(odd_height - 1), odd_height]

            # Send requests for these heights
            # Verify these proofs
            last_request_time = float(0)
            highest_height_requested = uint32(0)
            request_made = False

            for height_index in range(len(query_heights)):
                total_time_slept = 0
                while True:
                    if self._shut_down:
                        return
                    if total_time_slept > timeout:
                        raise TimeoutError("Took too long to fetch blocks")

                    # Request batches that we don't have yet
                    for batch_start_index in range(
                        height_index,
                        min(
                            height_index + self.config["num_sync_batches"],
                            len(query_heights),
                        ),
                    ):
                        blocks_missing = not self.potential_blocks_received[
                            uint32(query_heights[batch_start_index])
                        ].is_set()
                        if (
                            (
                                time.time() - last_request_time > sleep_interval
                                and blocks_missing
                            )
                            or (query_heights[batch_start_index])
                            > highest_height_requested
                        ):
                            self.log.info(
                                f"Requesting sync header {query_heights[batch_start_index]}"
                            )
                            if (
                                query_heights[batch_start_index]
                                > highest_height_requested
                            ):
                                highest_height_requested = uint32(
                                    query_heights[batch_start_index]
                                )
                            request_made = True
                            request_header = wallet_protocol.RequestHeader(
                                uint32(query_heights[batch_start_index]),
                                self.header_hashes[query_heights[batch_start_index]],
                            )
                            yield OutboundMessage(
                                NodeType.FULL_NODE,
                                Message("request_header", request_header),
                                Delivery.RANDOM,
                            )
                    if request_made:
                        last_request_time = time.time()
                        request_made = False
                    try:
                        aw = self.potential_blocks_received[
                            uint32(query_heights[height_index])
                        ].wait()
                        await asyncio.wait_for(aw, timeout=sleep_interval)
                        break
                    except concurrent.futures.TimeoutError:
                        total_time_slept += sleep_interval
                        self.log.info("Did not receive desired headers")

            self.log.info(
                f"Finished downloading sample of headers at heights: {query_heights}, validating."
            )
            # Validates the downloaded proofs
            assert self.wallet_state_manager.validate_select_proofs(
                self.proof_hashes,
                query_heights_odd,
                self.cached_blocks,
                self.potential_header_hashes,
            )
            self.log.info("All proofs validated successfuly.")

            # Add blockrecords one at a time, to catch up to starting height
            weight = self.wallet_state_manager.block_records[fork_point_hash].weight
            header_validate_start_height = min(
                max(fork_point_height, self.config["starting_height"] - 1),
                tip_height + 1,
            )
            if fork_point_height == 0:
                difficulty = self.constants["DIFFICULTY_STARTING"]
            else:
                fork_point_parent_hash = self.wallet_state_manager.block_records[
                    fork_point_hash
                ].prev_header_hash
                fork_point_parent_weight = self.wallet_state_manager.block_records[
                    fork_point_parent_hash
                ]
                difficulty = uint64(weight - fork_point_parent_weight)
            for height in range(fork_point_height + 1, header_validate_start_height):
                _, difficulty_change, total_iters = self.proof_hashes[height]
                weight += difficulty
                block_record = BlockRecord(
                    self.header_hashes[height],
                    self.header_hashes[height - 1],
                    uint32(height),
                    weight,
                    [],
                    [],
                    total_iters,
                    None,
                )
                res = await self.wallet_state_manager.receive_block(block_record, None)
                assert (
                    res == ReceiveBlockResult.ADDED_TO_HEAD
                    or res == ReceiveBlockResult.ADDED_AS_ORPHAN
                )
            self.log.info(
                f"Fast sync successful up to height {header_validate_start_height - 1}"
            )

        # Download headers in batches, and verify them as they come in. We download a few batches ahead,
        # in case there are delays. TODO(mariano): optimize sync by pipelining
        last_request_time = float(0)
        highest_height_requested = uint32(0)
        request_made = False

        for height_checkpoint in range(
            header_validate_start_height + 1, tip_height + 1
        ):
            total_time_slept = 0
            while True:
                if self._shut_down:
                    return
                if total_time_slept > timeout:
                    raise TimeoutError("Took too long to fetch blocks")

                # Request batches that we don't have yet
                for batch_start in range(
                    height_checkpoint,
                    min(
                        height_checkpoint + self.config["num_sync_batches"],
                        tip_height + 1,
                    ),
                ):
                    batch_end = min(batch_start + 1, tip_height + 1)
                    blocks_missing = any(
                        [
                            not (self.potential_blocks_received[uint32(h)]).is_set()
                            for h in range(batch_start, batch_end)
                        ]
                    )
                    if (
                        time.time() - last_request_time > sleep_interval
                        and blocks_missing
                    ) or (batch_end - 1) > highest_height_requested:
                        self.log.info(f"Requesting sync header {batch_start}")
                        if batch_end - 1 > highest_height_requested:
                            highest_height_requested = uint32(batch_end - 1)
                        request_made = True
                        request_header = wallet_protocol.RequestHeader(
                            uint32(batch_start), self.header_hashes[batch_start],
                        )
                        yield OutboundMessage(
                            NodeType.FULL_NODE,
                            Message("request_header", request_header),
                            Delivery.RANDOM,
                        )
                if request_made:
                    last_request_time = time.time()
                    request_made = False

                awaitables = [
                    self.potential_blocks_received[uint32(height_checkpoint)].wait()
                ]
                future = asyncio.gather(*awaitables, return_exceptions=True)
                try:
                    await asyncio.wait_for(future, timeout=sleep_interval)
                except concurrent.futures.TimeoutError:
                    try:
                        await future
                    except asyncio.CancelledError:
                        pass
                    total_time_slept += sleep_interval
                    self.log.info("Did not receive desired headers")
                    continue

                # Succesfully downloaded header. Now confirm it's added to chain.
                hh = self.potential_header_hashes[height_checkpoint]
                if hh in self.wallet_state_manager.block_records:
                    # Successfully added the block to chain
                    break
                else:
                    # Not added to chain yet. Try again soon.
                    await asyncio.sleep(sleep_interval_short)
                    total_time_slept += sleep_interval_short
                    if hh in self.wallet_state_manager.block_records:
                        break
                    else:
                        self.log.warning(
                            "Received header, but it has not been added to chain. Retrying."
                        )
                        _, hb, tfilter = self.cached_blocks[hh]
                        respond_header_msg = wallet_protocol.RespondHeader(hb, tfilter)
                        async for msg in self.respond_header(respond_header_msg):
                            yield msg

        self.log.info(
            f"Finished sync process up to height {max(self.wallet_state_manager.height_to_hash.keys())}"
        )
コード例 #5
0
    async def unfinished_block(
        self, unfinished_block: peer_protocol.UnfinishedBlock
    ) -> AsyncGenerator[OutboundMessage, None]:
        """
        We have received an unfinished block, either created by us, or from another peer.
        We can validate it and if it's a good block, propagate it to other peers and
        timelords.
        """
        if not self.blockchain.is_child_of_head(unfinished_block.block):
            return

        if not await self.blockchain.validate_unfinished_block(
                unfinished_block.block):
            raise InvalidUnfinishedBlock()

        prev_block: Optional[
            HeaderBlock] = await self.blockchain.get_header_block(
                unfinished_block.block.prev_header_hash)
        assert prev_block
        assert prev_block.challenge

        challenge_hash: bytes32 = prev_block.challenge.get_hash()
        difficulty: uint64 = await self.blockchain.get_next_difficulty(
            unfinished_block.block.header_block.prev_header_hash)
        vdf_ips: uint64 = await self.blockchain.get_next_ips(
            unfinished_block.block.header_block.prev_header_hash)

        iterations_needed: uint64 = calculate_iterations(
            unfinished_block.block.header_block.proof_of_space,
            difficulty,
            vdf_ips,
            constants["MIN_BLOCK_TIME"],
        )

        if (await self.store.get_unfinished_block(
            (challenge_hash, iterations_needed)) is not None):
            return

        expected_time: uint64 = uint64(
            int(iterations_needed /
                (await self.store.get_proof_of_time_estimate_ips())))

        if expected_time > constants["PROPAGATION_DELAY_THRESHOLD"]:
            log.info(
                f"Block is slow, expected {expected_time} seconds, waiting")
            # If this block is slow, sleep to allow faster blocks to come out first
            await asyncio.sleep(5)

        async with self.store.lock:
            leader: Tuple[uint32,
                          uint64] = self.store.get_unfinished_block_leader()
            if leader is None or unfinished_block.block.height > leader[0]:
                log.info(
                    f"This is the first block at height {unfinished_block.block.height}, so propagate."
                )
                # If this is the first block we see at this height, propagate
                self.store.set_unfinished_block_leader(
                    (unfinished_block.block.height, expected_time))
            elif unfinished_block.block.height == leader[0]:
                if expected_time > leader[1] + constants[
                        "PROPAGATION_THRESHOLD"]:
                    # If VDF is expected to finish X seconds later than the best, don't propagate
                    log.info(
                        f"VDF will finish too late {expected_time} seconds, so don't propagate"
                    )
                    return
                elif expected_time < leader[1]:
                    log.info(
                        f"New best unfinished block at height {unfinished_block.block.height}"
                    )
                    # If this will be the first block to finalize, update our leader
                    self.store.set_unfinished_block_leader(
                        (leader[0], expected_time))
            else:
                # If we have seen an unfinished block at a greater or equal height, don't propagate
                log.info(f"Unfinished block at old height, so don't propagate")
                return

            await self.store.add_unfinished_block(
                (challenge_hash, iterations_needed), unfinished_block.block)

        timelord_request = timelord_protocol.ProofOfSpaceInfo(
            challenge_hash, iterations_needed)

        yield OutboundMessage(
            NodeType.TIMELORD,
            Message("proof_of_space_info", timelord_request),
            Delivery.BROADCAST,
        )
        yield OutboundMessage(
            NodeType.FULL_NODE,
            Message("unfinished_block", unfinished_block),
            Delivery.BROADCAST_TO_OTHERS,
        )
コード例 #6
0
    async def _do_process_communication(
        self, challenge_hash, challenge_weight, ip, reader, writer
    ):
        disc: int = create_discriminant(challenge_hash, self.discriminant_size_bits)
        # Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
        # the timelord tells the vdf_client what to execute.
        if not self.sanitizer_mode:
            if self.config["fast_algorithm"]:
                # Run n-wesolowski (fast) algorithm.
                writer.write(b"N")
            else:
                # Run two-wesolowski (slow) algorithm.
                writer.write(b"T")
        else:
            # Create compact proofs of time.
            writer.write(b"S")
        await writer.drain()

        prefix = str(len(str(disc)))
        if len(prefix) == 1:
            prefix = "00" + prefix
        writer.write((prefix + str(disc)).encode())
        await writer.drain()

        try:
            ok = await reader.readexactly(2)
        except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
            log.warning(f"{type(e)} {e}")
            async with self.lock:
                if challenge_hash not in self.done_discriminants:
                    self.done_discriminants.append(challenge_hash)
                if self.sanitizer_mode:
                    if challenge_hash in self.pending_iters:
                        del self.pending_iters[challenge_hash]
                    if challenge_hash in self.submitted_iters:
                        del self.submitted_iters[challenge_hash]
            return

        if ok.decode() != "OK":
            return

        log.info("Got handshake with VDF client.")

        async with self.lock:
            self.active_discriminants[challenge_hash] = (writer, challenge_weight, ip)
            self.active_discriminants_start_time[challenge_hash] = time.time()

        asyncio.create_task(self._send_iterations(challenge_hash, writer))

        # Listen to the client until "STOP" is received.
        while True:
            try:
                data = await reader.readexactly(4)
            except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
                log.warning(f"{type(e)} {e}")
                async with self.lock:
                    if challenge_hash in self.active_discriminants:
                        del self.active_discriminants[challenge_hash]
                    if challenge_hash in self.active_discriminants_start_time:
                        del self.active_discriminants_start_time[challenge_hash]
                    if challenge_hash not in self.done_discriminants:
                        self.done_discriminants.append(challenge_hash)
                    if self.sanitizer_mode:
                        if challenge_hash in self.pending_iters:
                            del self.pending_iters[challenge_hash]
                        if challenge_hash in self.submitted_iters:
                            del self.submitted_iters[challenge_hash]
                break

            msg = ""
            try:
                msg = data.decode()
            except Exception as e:
                log.error(f"Exception while decoding data {e}")

            if msg == "STOP":
                log.info(f"Stopped client running on ip {ip}.")
                async with self.lock:
                    writer.write(b"ACK")
                    await writer.drain()
                break
            else:
                try:
                    # This must be a proof, 4bytes is length prefix
                    length = int.from_bytes(data, "big")
                    proof = await reader.readexactly(length)
                    stdout_bytes_io: io.BytesIO = io.BytesIO(
                        bytes.fromhex(proof.decode())
                    )
                except (
                    asyncio.IncompleteReadError,
                    ConnectionResetError,
                    Exception,
                ) as e:
                    log.warning(f"{type(e)} {e}")
                    async with self.lock:
                        if challenge_hash in self.active_discriminants:
                            del self.active_discriminants[challenge_hash]
                        if challenge_hash in self.active_discriminants_start_time:
                            del self.active_discriminants_start_time[challenge_hash]
                        if challenge_hash not in self.done_discriminants:
                            self.done_discriminants.append(challenge_hash)
                        if self.sanitizer_mode:
                            if challenge_hash in self.pending_iters:
                                del self.pending_iters[challenge_hash]
                            if challenge_hash in self.submitted_iters:
                                del self.submitted_iters[challenge_hash]
                    break

                iterations_needed = uint64(
                    int.from_bytes(stdout_bytes_io.read(8), "big", signed=True)
                )

                y_size_bytes = stdout_bytes_io.read(8)
                y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True))

                y_bytes = stdout_bytes_io.read(y_size)
                witness_type = uint8(
                    int.from_bytes(stdout_bytes_io.read(1), "big", signed=True)
                )
                proof_bytes: bytes = stdout_bytes_io.read()

                # Verifies our own proof just in case
                a = int.from_bytes(y_bytes[:129], "big", signed=True)
                b = int.from_bytes(y_bytes[129:], "big", signed=True)

                output = ClassgroupElement(int512(a), int512(b))

                proof_of_time = ProofOfTime(
                    challenge_hash,
                    iterations_needed,
                    output,
                    witness_type,
                    proof_bytes,
                )

                if not proof_of_time.is_valid(self.discriminant_size_bits):
                    log.error("Invalid proof of time")

                response = timelord_protocol.ProofOfTimeFinished(proof_of_time)

                await self._update_avg_ips(challenge_hash, iterations_needed, ip)

                async with self.lock:
                    self.proofs_to_write.append(
                        OutboundMessage(
                            NodeType.FULL_NODE,
                            Message("proof_of_time_finished", response),
                            Delivery.BROADCAST,
                        )
                    )

                if not self.sanitizer_mode:
                    await self._update_proofs_count(challenge_weight)
                else:
                    async with self.lock:
                        writer.write(b"010")
                        await writer.drain()
                        try:
                            del self.active_discriminants[challenge_hash]
                            del self.active_discriminants_start_time[challenge_hash]
                            del self.pending_iters[challenge_hash]
                            del self.submitted_iters[challenge_hash]
                        except KeyError:
                            log.error("Discriminant stopped anormally.")
コード例 #7
0
    async def _do_process_communication(self, challenge_hash, challenge_weight,
                                        ip, port):
        disc: int = create_discriminant(challenge_hash,
                                        constants["DISCRIMINANT_SIZE_BITS"])

        log.info("Attempting SSH connection")
        proc = await asyncio.create_subprocess_shell(
            f"./lib/chiavdf/fast_vdf/vdf_server {port}")

        # TODO(Florin): Handle connection failure (attempt another server)
        writer: Optional[StreamWriter] = None
        reader: Optional[StreamReader] = None
        for _ in range(10):
            try:
                reader, writer = await asyncio.open_connection(ip, port)
                # socket = writer.get_extra_info("socket")
                # socket.settimeout(None)
                break
            except Exception as e:
                e_to_str = str(e)
            await asyncio.sleep(1)
        if not writer or not reader:
            raise Exception("Unable to connect to VDF server")

        writer.write((str(len(str(disc))) + str(disc)).encode())
        await writer.drain()

        ok = await reader.readexactly(2)
        assert ok.decode() == "OK"

        log.info("Got handshake with VDF server.")

        async with self.lock:
            self.active_discriminants[challenge_hash] = (writer,
                                                         challenge_weight, ip)
            self.active_discriminants_start_time[challenge_hash] = time.time()

        # Listen to the server until "STOP" is received.
        while True:
            async with self.lock:
                if (challenge_hash in self.active_discriminants) and (
                        challenge_hash in self.pending_iters):
                    if challenge_hash not in self.submitted_iters:
                        self.submitted_iters[challenge_hash] = []
                    log.info(
                        f"Pending: {self.pending_iters[challenge_hash]} "
                        f"Submitted: {self.submitted_iters[challenge_hash]} Hash: {challenge_hash}"
                    )
                    for iter in sorted(self.pending_iters[challenge_hash]):
                        if iter in self.submitted_iters[challenge_hash]:
                            continue
                        self.submitted_iters[challenge_hash].append(iter)
                        if len(str(iter)) < 10:
                            iter_size = "0" + str(len(str(iter)))
                        else:
                            iter_size = str(len(str(iter)))
                        writer.write((iter_size + str(iter)).encode())
                        await writer.drain()

            try:
                data = await reader.readexactly(4)
            except (asyncio.IncompleteReadError, ConnectionResetError) as e:
                log.warn(f"{type(e)} {e}")
                break

            if data.decode() == "STOP":
                log.info("Stopped server")
                async with self.lock:
                    writer.write(b"ACK")
                    await writer.drain()
                    await proc.wait()
                    # Server is now available.
                    self.free_servers.append((ip, port))
                    len_server = len(self.free_servers)
                    log.info(f"Process ended... Server length {len_server}")
                break
            elif data.decode() == "POLL":
                async with self.lock:
                    # If I have a newer discriminant... Free up the VDF server
                    if (len(self.discriminant_queue) > 0 and challenge_weight <
                            max([h for _, h in self.discriminant_queue])
                            and challenge_hash in self.active_discriminants):
                        log.info("Got poll, stopping the challenge!")
                        writer.write(b"010")
                        await writer.drain()
                        del self.active_discriminants[challenge_hash]
                        del self.active_discriminants_start_time[
                            challenge_hash]
                        self.done_discriminants.append(challenge_hash)
            else:
                try:
                    # This must be a proof, read the continuation.
                    proof = await reader.readexactly(1860)
                    stdout_bytes_io: io.BytesIO = io.BytesIO(
                        bytes.fromhex(data.decode() + proof.decode()))
                except Exception as e:
                    e_to_str = str(e)
                    log.error(f"Socket error: {e_to_str}")

                iterations_needed = uint64(
                    int.from_bytes(stdout_bytes_io.read(8), "big",
                                   signed=True))
                y = ClassgroupElement.parse(stdout_bytes_io)
                proof_bytes: bytes = stdout_bytes_io.read()

                # Verifies our own proof just in case
                proof_blob = (ClassGroup.from_ab_discriminant(
                    y.a, y.b, disc).serialize() + proof_bytes)
                x = ClassGroup.from_ab_discriminant(2, 1, disc)
                if not check_proof_of_time_nwesolowski(
                        disc,
                        x,
                        proof_blob,
                        iterations_needed,
                        constants["DISCRIMINANT_SIZE_BITS"],
                        self.config["n_wesolowski"],
                ):
                    log.error("My proof is incorrect!")

                output = ClassgroupElement(y.a, y.b)
                proof_of_time = ProofOfTime(
                    challenge_hash,
                    iterations_needed,
                    output,
                    self.config["n_wesolowski"],
                    [uint8(b) for b in proof_bytes],
                )
                response = timelord_protocol.ProofOfTimeFinished(proof_of_time)

                await self._update_avg_ips(challenge_hash, iterations_needed,
                                           ip)

                async with self.lock:
                    self.proofs_to_write.append(
                        OutboundMessage(
                            NodeType.FULL_NODE,
                            Message("proof_of_time_finished", response),
                            Delivery.BROADCAST,
                        ))

                await self._update_proofs_count(challenge_weight)
コード例 #8
0
    async def _sync(self):
        """
        Performs a full sync of the blockchain.
            - Check which are the heaviest tips
            - Request headers for the heaviest
            - Verify the weight of the tip, using the headers
            - Find the fork point to see where to start downloading blocks
            - Blacklist peers that provide invalid blocks
            - Sync blockchain up to heads (request blocks in batches)
        """
        log.info("Starting to perform sync with peers.")
        log.info("Waiting to receive tips from peers.")
        # TODO: better way to tell that we have finished receiving tips
        await asyncio.sleep(5)
        highest_weight: uint64 = uint64(0)
        tip_block: FullBlock
        tip_height = 0

        # Based on responses from peers about the current heads, see which head is the heaviest
        # (similar to longest chain rule).
        async with self.store.lock:
            potential_tips: List[Tuple[
                bytes32,
                FullBlock]] = await self.store.get_potential_tips_tuples()
            log.info(f"Have collected {len(potential_tips)} potential tips")
            for header_hash, block in potential_tips:
                if block.header_block.challenge is None:
                    raise ValueError(
                        f"Invalid tip block {block.header_hash} received")
                if block.header_block.challenge.total_weight > highest_weight:
                    highest_weight = block.header_block.challenge.total_weight
                    tip_block = block
                    tip_height = block.header_block.challenge.height
            if highest_weight <= max(
                [t.weight for t in self.blockchain.get_current_tips()]):
                log.info("Not performing sync, already caught up.")
                return

        assert tip_block
        log.info(
            f"Tip block {tip_block.header_hash} tip height {tip_block.height}")

        for height in range(0, tip_block.height + 1):
            self.store.set_potential_headers_received(uint32(height), Event())
            self.store.set_potential_blocks_received(uint32(height), Event())
            self.store.set_potential_hashes_received(Event())

        timeout = 200
        sleep_interval = 10
        total_time_slept = 0

        while True:
            if total_time_slept > timeout:
                raise TimeoutError("Took too long to fetch header hashes.")
            if self._shut_down:
                return
            # Download all the header hashes and find the fork point
            request = peer_protocol.RequestAllHeaderHashes(
                tip_block.header_hash)
            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_all_header_hashes", request),
                Delivery.RANDOM,
            )
            try:
                await asyncio.wait_for(
                    self.store.get_potential_hashes_received().wait(),
                    timeout=sleep_interval,
                )
                break
            except concurrent.futures.TimeoutError:
                total_time_slept += sleep_interval
                log.warning("Did not receive desired header hashes")

        # Finding the fork point allows us to only download headers and blocks from the fork point
        async with self.store.lock:
            header_hashes = self.store.get_potential_hashes()
            fork_point_height: uint32 = self.blockchain.find_fork_point(
                header_hashes)
            fork_point_hash: bytes32 = header_hashes[fork_point_height]
        log.info(
            f"Fork point: {fork_point_hash} at height {fork_point_height}")

        # Now, we download all of the headers in order to verify the weight, in batches
        headers: List[HeaderBlock] = []

        # Download headers in batches. We download a few batches ahead in case there are delays or peers
        # that don't have the headers that we need.
        last_request_time: float = 0
        highest_height_requested: uint32 = uint32(0)
        request_made: bool = False
        for height_checkpoint in range(fork_point_height + 1, tip_height + 1,
                                       self.config["max_headers_to_send"]):
            end_height = min(
                height_checkpoint + self.config["max_headers_to_send"],
                tip_height + 1)

            total_time_slept = 0
            while True:
                if self._shut_down:
                    return
                if total_time_slept > timeout:
                    raise TimeoutError("Took too long to fetch blocks")

                # Request batches that we don't have yet
                for batch in range(0, self.config["num_sync_batches"]):
                    batch_start = (height_checkpoint +
                                   batch * self.config["max_headers_to_send"])
                    batch_end = min(
                        batch_start + self.config["max_headers_to_send"],
                        tip_height + 1)

                    if batch_start > tip_height:
                        # We have asked for all blocks
                        break

                    blocks_missing = any([
                        not (self.store.get_potential_headers_received(
                            uint32(h))).is_set()
                        for h in range(batch_start, batch_end)
                    ])
                    if (time.time() - last_request_time > sleep_interval
                            and blocks_missing
                        ) or (batch_end - 1) > highest_height_requested:
                        # If we are missing header blocks in this batch, and we haven't made a request in a while,
                        # Make a request for this batch. Also, if we have never requested this batch, make
                        # the request
                        if batch_end - 1 > highest_height_requested:
                            highest_height_requested = batch_end - 1

                        request_made = True
                        request_hb = peer_protocol.RequestHeaderBlocks(
                            tip_block.header_block.header.get_hash(),
                            [uint32(h) for h in range(batch_start, batch_end)],
                        )
                        log.info(
                            f"Requesting header blocks {batch_start, batch_end}."
                        )
                        yield OutboundMessage(
                            NodeType.FULL_NODE,
                            Message("request_header_blocks", request_hb),
                            Delivery.RANDOM,
                        )
                if request_made:
                    # Reset the timer for requests, so we don't overload other peers with requests
                    last_request_time = time.time()
                    request_made = False

                # Wait for the first batch (the next "max_blocks_to_send" blocks to arrive)
                awaitables = [
                    (self.store.get_potential_headers_received(
                        uint32(height))).wait()
                    for height in range(height_checkpoint, end_height)
                ]
                future = asyncio.gather(*awaitables, return_exceptions=True)
                try:
                    await asyncio.wait_for(future, timeout=sleep_interval)
                    break
                except concurrent.futures.TimeoutError:
                    try:
                        await future
                    except asyncio.CancelledError:
                        pass
                    total_time_slept += sleep_interval
                    log.info(f"Did not receive desired header blocks")

        async with self.store.lock:
            for h in range(fork_point_height + 1, tip_height + 1):
                header = self.store.get_potential_header(uint32(h))
                assert header is not None
                headers.append(header)

        log.error(f"Downloaded headers up to tip height: {tip_height}")
        if not verify_weight(
                tip_block.header_block,
                headers,
                self.blockchain.header_blocks[fork_point_hash],
        ):
            raise errors.InvalidWeight(
                f"Weight of {tip_block.header_block.header.get_hash()} not valid."
            )

        log.error(
            f"Validated weight of headers. Downloaded {len(headers)} headers, tip height {tip_height}"
        )
        assert tip_height == fork_point_height + len(headers)

        # Download blocks in batches, and verify them as they come in. We download a few batches ahead,
        # in case there are delays.
        last_request_time = 0
        highest_height_requested = uint32(0)
        request_made = False
        for height_checkpoint in range(fork_point_height + 1, tip_height + 1,
                                       self.config["max_blocks_to_send"]):
            end_height = min(
                height_checkpoint + self.config["max_blocks_to_send"],
                tip_height + 1)

            total_time_slept = 0
            while True:
                if self._shut_down:
                    return
                if total_time_slept > timeout:
                    raise TimeoutError("Took too long to fetch blocks")

                # Request batches that we don't have yet
                for batch in range(0, self.config["num_sync_batches"]):
                    batch_start = (height_checkpoint +
                                   batch * self.config["max_blocks_to_send"])
                    batch_end = min(
                        batch_start + self.config["max_blocks_to_send"],
                        tip_height + 1)

                    if batch_start > tip_height:
                        # We have asked for all blocks
                        break

                    blocks_missing = any([
                        not (self.store.get_potential_blocks_received(
                            uint32(h))).is_set()
                        for h in range(batch_start, batch_end)
                    ])
                    if (time.time() - last_request_time > sleep_interval
                            and blocks_missing
                        ) or (batch_end - 1) > highest_height_requested:
                        # If we are missing blocks in this batch, and we haven't made a request in a while,
                        # Make a request for this batch. Also, if we have never requested this batch, make
                        # the request
                        log.info(
                            f"Requesting sync blocks {[i for i in range(batch_start, batch_end)]}"
                        )
                        if batch_end - 1 > highest_height_requested:
                            highest_height_requested = batch_end - 1
                        request_made = True
                        request_sync = peer_protocol.RequestSyncBlocks(
                            tip_block.header_block.header.header_hash,
                            [
                                uint32(height)
                                for height in range(batch_start, batch_end)
                            ],
                        )
                        yield OutboundMessage(
                            NodeType.FULL_NODE,
                            Message("request_sync_blocks", request_sync),
                            Delivery.RANDOM,
                        )
                if request_made:
                    # Reset the timer for requests, so we don't overload other peers with requests
                    last_request_time = time.time()
                    request_made = False

                # Wait for the first batch (the next "max_blocks_to_send" blocks to arrive)
                awaitables = [
                    (self.store.get_potential_blocks_received(
                        uint32(height))).wait()
                    for height in range(height_checkpoint, end_height)
                ]
                future = asyncio.gather(*awaitables, return_exceptions=True)
                try:
                    await asyncio.wait_for(future, timeout=sleep_interval)
                    break
                except concurrent.futures.TimeoutError:
                    try:
                        await future
                    except asyncio.CancelledError:
                        pass
                    total_time_slept += sleep_interval
                    log.info("Did not receive desired blocks")

            # Verifies this batch, which we are guaranteed to have (since we broke from the above loop)
            for height in range(height_checkpoint, end_height):
                if self._shut_down:
                    return
                block = await self.store.get_potential_block(uint32(height))
                assert block is not None
                start = time.time()
                async with self.store.lock:
                    # The block gets permanantly added to the blockchain
                    result = await self.blockchain.receive_block(block)
                    if (result == ReceiveBlockResult.INVALID_BLOCK or result
                            == ReceiveBlockResult.DISCONNECTED_BLOCK):
                        raise RuntimeError(
                            f"Invalid block {block.header_hash}")
                    log.info(
                        f"Took {time.time() - start} seconds to validate and add block {block.height}."
                    )
                    assert (max([
                        h.height for h in self.blockchain.get_current_tips()
                    ]) >= height)
                    await self.store.set_proof_of_time_estimate_ips(
                        await self.blockchain.get_next_ips(block.header_hash))
        assert max([h.height
                    for h in self.blockchain.get_current_tips()]) == tip_height
        log.info(f"Finished sync up to height {tip_height}")
コード例 #9
0
    async def respond_proof_of_space(
            self, response: harvester_protocol.RespondProofOfSpace):
        """
        This is a response from the harvester with a proof of space. We check it's validity,
        and request a pool partial, a header signature, or both, if the proof is good enough.
        """

        if response.proof.pool_pubkey not in self.pool_public_keys:
            raise RuntimeError("Pool pubkey not in list of approved keys")

        challenge_hash: bytes32 = self.harvester_responses_challenge[
            response.quality_string]
        challenge_weight: uint128 = self.challenge_to_weight[challenge_hash]
        challenge_height: uint32 = self.challenge_to_height[challenge_hash]
        new_proof_height: uint32 = uint32(challenge_height + 1)
        difficulty: uint64 = uint64(0)
        for posf in self.challenges[challenge_weight]:
            if posf.challenge_hash == challenge_hash:
                difficulty = posf.difficulty
        if difficulty == 0:
            raise RuntimeError("Did not find challenge")

        computed_quality_string = response.proof.verify_and_get_quality_string(
        )
        if response.quality_string != computed_quality_string:
            raise RuntimeError("Invalid quality for proof of space")

        self.harvester_responses_proofs[
            response.quality_string] = response.proof
        self.harvester_responses_proof_hash_to_qual[
            response.proof.get_hash()] = response.quality_string

        estimate_min = (self.proof_of_time_estimate_ips *
                        self.constants["BLOCK_TIME_TARGET"] /
                        self.constants["MIN_ITERS_PROPORTION"])
        number_iters: uint64 = calculate_iterations_quality(
            computed_quality_string,
            response.proof.size,
            difficulty,
            estimate_min,
        )
        estimate_secs: float = number_iters / self.proof_of_time_estimate_ips

        if estimate_secs < self.config["pool_share_threshold"]:
            request1 = harvester_protocol.RequestPartialProof(
                response.quality_string,
                self.wallet_target,
            )
            yield OutboundMessage(
                NodeType.HARVESTER,
                Message("request_partial_proof", request1),
                Delivery.RESPOND,
            )
        if estimate_secs < self.config["propagate_threshold"]:
            pool_pk = bytes(response.proof.pool_pubkey)
            if pool_pk not in self.pool_sks_map:
                log.error(
                    f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
                )
                return
            sk = self.pool_sks_map[pool_pk]
            coinbase_reward = uint64(
                calculate_block_reward(uint32(new_proof_height)))

            coinbase, signature = create_coinbase_coin_and_signature(
                new_proof_height,
                self.pool_target,
                coinbase_reward,
                sk,
            )

            request2 = farmer_protocol.RequestHeaderHash(
                challenge_hash,
                coinbase,
                signature,
                self.wallet_target,
                response.proof,
            )

            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_header_hash", request2),
                Delivery.BROADCAST,
            )
コード例 #10
0
 async def _on_connect(self):
     # Sends a handshake to the harvester
     msg = harvester_protocol.HarvesterHandshake(self.pool_public_keys)
     yield OutboundMessage(NodeType.HARVESTER,
                           Message("harvester_handshake", msg),
                           Delivery.RESPOND)
コード例 #11
0
async def handle_message(
    triple: Tuple[ChiaConnection, Message, PeerConnections], api: Any
) -> AsyncGenerator[Tuple[ChiaConnection, OutboundMessage, PeerConnections],
                    None]:
    """
    Async generator which takes messages, parses, them, executes the right
    api function, and yields responses (to same connection, propagated, etc).
    """
    connection, full_message, global_connections = triple

    try:
        if len(full_message.function) == 0 or full_message.function.startswith(
                "_"):
            # This prevents remote calling of private methods that start with "_"
            raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE,
                                [full_message.function])

        connection.log.info(
            f"<- {full_message.function} from peer {connection.get_peername()}"
        )
        if full_message.function == "ping":
            ping_msg = Ping(full_message.data["nonce"])
            assert connection.connection_type
            outbound_message = OutboundMessage(
                connection.connection_type,
                Message("pong", Pong(ping_msg.nonce)),
                Delivery.RESPOND,
            )
            global_connections.update_connection_time(connection)
            yield connection, outbound_message, global_connections
            return
        elif full_message.function == "pong":
            global_connections.update_connection_time(connection)
            return

        f_with_peer_name = getattr(api,
                                   full_message.function + "_with_peer_name",
                                   None)
        f_with_peer_info = getattr(api,
                                   full_message.function + "_with_peer_info",
                                   None)
        if f_with_peer_name is not None:
            result = f_with_peer_name(full_message.data,
                                      connection.get_peername())
        elif f_with_peer_info is not None:
            result = f_with_peer_info(full_message.data,
                                      connection.get_peer_info())
        else:
            f = getattr(api, full_message.function, None)

            if f is None:
                raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE,
                                    [full_message.function])

            result = f(full_message.data)

        if isinstance(result, AsyncGenerator):
            async for outbound_message in result:
                yield connection, outbound_message, global_connections
        else:
            await result

        global_connections.update_connection_time(connection)
    except Exception:
        tb = traceback.format_exc()
        connection.log.error(f"Error, closing connection {connection}. {tb}")
        # TODO: Exception means peer gave us invalid information, so ban this peer.
        global_connections.close(connection)
コード例 #12
0
    async def respond_proof_of_space(
        self, response: harvester_protocol.RespondProofOfSpace
    ):
        """
        This is a response from the harvester with a proof of space. We check it's validity,
        and request a pool partial, a header signature, or both, if the proof is good enough.
        """

        challenge_hash: bytes32 = response.proof.challenge_hash
        challenge_weight: uint128 = self.challenge_to_weight[challenge_hash]
        difficulty: uint64 = uint64(0)
        for posf in self.challenges[challenge_weight]:
            if posf.challenge_hash == challenge_hash:
                difficulty = posf.difficulty
        if difficulty == 0:
            raise RuntimeError("Did not find challenge")

        computed_quality_string = response.proof.verify_and_get_quality_string(
            self.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG
        )
        if computed_quality_string is None:
            raise RuntimeError("Invalid proof of space")

        self.harvester_responses_proofs[
            (response.proof.challenge_hash, response.plot_id, response.response_number)
        ] = response.proof
        self.harvester_responses_proof_hash_to_info[response.proof.get_hash()] = (
            response.proof.challenge_hash,
            response.plot_id,
            response.response_number,
        )

        estimate_min = (
            self.proof_of_time_estimate_ips
            * self.constants.BLOCK_TIME_TARGET
            / self.constants.MIN_ITERS_PROPORTION
        )
        estimate_min = uint64(int(estimate_min))
        number_iters: uint64 = calculate_iterations_quality(
            computed_quality_string,
            response.proof.size,
            difficulty,
            estimate_min,
        )
        estimate_secs: float = number_iters / self.proof_of_time_estimate_ips

        if estimate_secs < self.config["pool_share_threshold"]:
            # TODO: implement pooling
            pass
        if estimate_secs < self.config["propagate_threshold"]:
            pool_pk = bytes(response.proof.pool_public_key)
            if pool_pk not in self.pool_sks_map:
                log.error(
                    f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
                )
                return
            pool_target: PoolTarget = PoolTarget(self.pool_target, uint32(0))
            pool_target_signature: G2Element = AugSchemeMPL.sign(
                self.pool_sks_map[pool_pk], bytes(pool_target)
            )

            request2 = farmer_protocol.RequestHeaderHash(
                challenge_hash,
                response.proof,
                pool_target,
                pool_target_signature,
                self.wallet_target,
            )

            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_header_hash", request2),
                Delivery.BROADCAST,
            )
コード例 #13
0
ファイル: harvester.py プロジェクト: snikch/chia-blockchain
    async def new_challenge(self,
                            new_challenge: harvester_protocol.NewChallenge):
        """
        The harvester receives a new challenge from the farmer, and looks up the quality string
        for any proofs of space that are are found in the plots. If proofs are found, a
        ChallengeResponse message is sent for each of the proofs found.
        """
        if len(self.pool_public_keys) == 0 or len(
                self.farmer_public_keys) == 0:
            self.cached_challenges = self.cached_challenges[:5]
            self.cached_challenges.insert(0, new_challenge)
            return

        start = time.time()
        assert len(new_challenge.challenge_hash) == 32

        # Refresh plots to see if there are any new ones
        await self._refresh_plots()

        loop = asyncio.get_running_loop()

        def blocking_lookup(filename: Path,
                            prover: DiskProver) -> Optional[List]:
            # Uses the DiskProver object to lookup qualities. This is a blocking call,
            # so it should be run in a threadpool.
            try:
                quality_strings = prover.get_qualities_for_challenge(
                    new_challenge.challenge_hash)
            except Exception:
                log.error(
                    "Error using prover object. Reinitializing prover object.")
                try:
                    self.prover = DiskProver(str(filename))
                    quality_strings = self.prover.get_qualities_for_challenge(
                        new_challenge.challenge_hash)
                except Exception:
                    log.error(
                        f"Retry-Error using prover object on {filename}. Giving up."
                    )
                    quality_strings = None
            return quality_strings

        async def lookup_challenge(
                filename: Path, prover: DiskProver
        ) -> List[harvester_protocol.ChallengeResponse]:
            # Exectures a DiskProverLookup in a threadpool, and returns responses
            all_responses: List[harvester_protocol.ChallengeResponse] = []
            quality_strings = await loop.run_in_executor(
                self.executor, blocking_lookup, filename, prover)
            if quality_strings is not None:
                for index, quality_str in enumerate(quality_strings):
                    response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse(
                        new_challenge.challenge_hash,
                        str(filename),
                        uint8(index),
                        quality_str,
                        prover.get_size(),
                    )
                    all_responses.append(response)
            return all_responses

        awaitables = []
        for filename, plot_info in self.provers.items():
            if filename.exists() and ProofOfSpace.can_create_proof(
                    plot_info.prover.get_id(),
                    new_challenge.challenge_hash,
                    self.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
            ):
                awaitables.append(lookup_challenge(filename, plot_info.prover))

        # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism
        total_proofs_found = 0
        for sublist_awaitable in asyncio.as_completed(awaitables):
            for response in await sublist_awaitable:
                total_proofs_found += 1
                yield OutboundMessage(
                    NodeType.FARMER,
                    Message("challenge_response", response),
                    Delivery.RESPOND,
                )
        log.info(
            f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..."
            f" Found {total_proofs_found} proofs. Time: {time.time() - start}. "
            f"Total {len(self.provers)} plots")
コード例 #14
0
ファイル: timelord.py プロジェクト: mariano54/chia-blockchain
    async def _do_process_communication(
        self, challenge_hash, challenge_weight, ip, reader, writer
    ):
        disc: int = create_discriminant(
            challenge_hash, self.constants["DISCRIMINANT_SIZE_BITS"]
        )

        prefix = str(len(str(disc)))
        if len(prefix) == 1:
            prefix = "00" + prefix
        writer.write((prefix + str(disc)).encode())
        await writer.drain()

        try:
            ok = await reader.readexactly(2)
        except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
            log.warning(f"{type(e)} {e}")
            async with self.lock:
                if challenge_hash not in self.done_discriminants:
                    self.done_discriminants.append(challenge_hash)
            return

        if ok.decode() != "OK":
            return

        log.info("Got handshake with VDF client.")

        async with self.lock:
            self.active_discriminants[challenge_hash] = (writer, challenge_weight, ip)
            self.active_discriminants_start_time[challenge_hash] = time.time()

        asyncio.create_task(self._send_iterations(challenge_hash, writer))

        # Listen to the client until "STOP" is received.
        while True:
            try:
                data = await reader.readexactly(4)
            except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
                log.warning(f"{type(e)} {e}")
                async with self.lock:
                    if challenge_hash in self.active_discriminants:
                        del self.active_discriminants[challenge_hash]
                    if challenge_hash in self.active_discriminants_start_time:
                        del self.active_discriminants_start_time[challenge_hash]
                    if challenge_hash not in self.done_discriminants:
                        self.done_discriminants.append(challenge_hash)
                break

            msg = ""
            try:
                msg = data.decode()
            except Exception as e:
                log.error(f"Exception while decoding data {e}")
                pass

            if msg == "STOP":
                log.info(f"Stopped client running on ip {ip}.")
                async with self.lock:
                    writer.write(b"ACK")
                    await writer.drain()
                break
            else:
                try:
                    # This must be a proof, 4bytes is length prefix
                    length = int.from_bytes(data, "big")
                    proof = await reader.readexactly(length)
                    stdout_bytes_io: io.BytesIO = io.BytesIO(
                        bytes.fromhex(proof.decode())
                    )
                except (
                    asyncio.IncompleteReadError,
                    ConnectionResetError,
                    Exception,
                ) as e:
                    log.warning(f"{type(e)} {e}")
                    async with self.lock:
                        if challenge_hash in self.active_discriminants:
                            del self.active_discriminants[challenge_hash]
                        if challenge_hash in self.active_discriminants_start_time:
                            del self.active_discriminants_start_time[challenge_hash]
                        if challenge_hash not in self.done_discriminants:
                            self.done_discriminants.append(challenge_hash)
                    break

                iterations_needed = uint64(
                    int.from_bytes(stdout_bytes_io.read(8), "big", signed=True)
                )

                y_size_bytes = stdout_bytes_io.read(8)
                y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True))

                y_bytes = stdout_bytes_io.read(y_size)

                proof_bytes: bytes = stdout_bytes_io.read()

                # Verifies our own proof just in case
                a = int.from_bytes(y_bytes[:129], "big", signed=True)
                b = int.from_bytes(y_bytes[129:], "big", signed=True)

                output = ClassgroupElement(int512(a), int512(b))

                proof_of_time = ProofOfTime(
                    challenge_hash,
                    iterations_needed,
                    output,
                    self.config["n_wesolowski"],
                    proof_bytes,
                )

                if not proof_of_time.is_valid(self.constants["DISCRIMINANT_SIZE_BITS"]):
                    log.error("Invalid proof of time")

                response = timelord_protocol.ProofOfTimeFinished(proof_of_time)

                await self._update_avg_ips(challenge_hash, iterations_needed, ip)

                async with self.lock:
                    self.proofs_to_write.append(
                        OutboundMessage(
                            NodeType.FULL_NODE,
                            Message("proof_of_time_finished", response),
                            Delivery.BROADCAST,
                        )
                    )

                await self._update_proofs_count(challenge_weight)
コード例 #15
0
    async def new_challenge(self,
                            new_challenge: harvester_protocol.NewChallenge):
        """
        The harvester receives a new challenge from the farmer, and looks up the quality string
        for any proofs of space that are are found in the plots. If proofs are found, a
        ChallengeResponse message is sent for each of the proofs found.
        """
        start = time.time()
        challenge_size = len(new_challenge.challenge_hash)
        if challenge_size != 32:
            raise ValueError(
                f"Invalid challenge size {challenge_size}, 32 was expected")

        loop = asyncio.get_running_loop()

        def blocking_lookup(filename: Path,
                            prover: DiskProver) -> Optional[List]:
            # Uses the DiskProver object to lookup qualities. This is a blocking call,
            # so it should be run in a threadpool.
            try:
                quality_strings = prover.get_qualities_for_challenge(
                    new_challenge.challenge_hash)
            except RuntimeError:
                log.error(
                    "Error using prover object. Reinitializing prover object.")
                try:
                    self.prover = DiskProver(str(filename))
                    quality_strings = self.prover.get_qualities_for_challenge(
                        new_challenge.challenge_hash)
                except RuntimeError:
                    log.error(
                        f"Retry-Error using prover object on {filename}. Giving up."
                    )
                    quality_strings = None
            return quality_strings

        async def lookup_challenge(
                filename: Path, prover: DiskProver
        ) -> List[harvester_protocol.ChallengeResponse]:
            # Exectures a DiskProverLookup in a threadpool, and returns responses
            all_responses: List[harvester_protocol.ChallengeResponse] = []
            quality_strings = await loop.run_in_executor(
                self.executor, blocking_lookup, filename, prover)
            if quality_strings is not None:
                for index, quality_str in enumerate(quality_strings):
                    self.challenge_hashes[quality_str] = (
                        new_challenge.challenge_hash,
                        filename,
                        uint8(index),
                    )
                    response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse(
                        new_challenge.challenge_hash, quality_str,
                        prover.get_size())
                    all_responses.append(response)
            return all_responses

        awaitables = [
            lookup_challenge(filename, prover)
            for filename, prover in self.provers.items()
        ]

        # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism
        for sublist_awaitable in asyncio.as_completed(awaitables):
            for response in await sublist_awaitable:
                yield OutboundMessage(
                    NodeType.FARMER,
                    Message("challenge_response", response),
                    Delivery.RESPOND,
                )
        log.info(
            f"Time taken to lookup qualities in {len(self.provers)} plots: {time.time() - start}"
        )
コード例 #16
0
 async def on_connect():
     msg = Message("request_peers", peer_protocol.RequestPeers())
     yield OutboundMessage(NodeType.INTRODUCER, msg,
                           Delivery.RESPOND)
コード例 #17
0
ファイル: farmer.py プロジェクト: mariano54/chia-blockchain
    async def respond_proof_of_space(
            self, response: harvester_protocol.RespondProofOfSpace):
        """
        This is a response from the harvester with a proof of space. We check it's validity,
        and request a pool partial, a header signature, or both, if the proof is good enough.
        """

        pool_sks: List[PrivateKey] = [
            PrivateKey.from_bytes(bytes.fromhex(ce))
            for ce in self.key_config["pool_sks"]
        ]
        if response.proof.pool_pubkey not in [
                sk.get_public_key() for sk in pool_sks
        ]:
            raise RuntimeError("Pool pubkey not in list of approved keys")

        challenge_hash: bytes32 = self.harvester_responses_challenge[
            response.quality_string]
        challenge_weight: uint128 = self.challenge_to_weight[challenge_hash]
        challenge_height: uint32 = self.challenge_to_height[challenge_hash]
        new_proof_height: uint32 = uint32(challenge_height + 1)
        difficulty: uint64 = uint64(0)
        for posf in self.challenges[challenge_weight]:
            if posf.challenge_hash == challenge_hash:
                difficulty = posf.difficulty
        if difficulty == 0:
            raise RuntimeError("Did not find challenge")

        computed_quality_string = response.proof.verify_and_get_quality_string(
        )
        if response.quality_string != computed_quality_string:
            raise RuntimeError("Invalid quality for proof of space")

        self.harvester_responses_proofs[
            response.quality_string] = response.proof
        self.harvester_responses_proof_hash_to_qual[
            response.proof.get_hash()] = response.quality_string

        estimate_min = (self.proof_of_time_estimate_ips *
                        self.constants["BLOCK_TIME_TARGET"] /
                        self.constants["MIN_ITERS_PROPORTION"])
        number_iters: uint64 = calculate_iterations_quality(
            computed_quality_string,
            response.proof.size,
            difficulty,
            estimate_min,
        )
        estimate_secs: float = number_iters / self.proof_of_time_estimate_ips

        if estimate_secs < self.config["pool_share_threshold"]:
            request1 = harvester_protocol.RequestPartialProof(
                response.quality_string,
                bytes.fromhex(self.key_config["wallet_target"]),
            )
            yield OutboundMessage(
                NodeType.HARVESTER,
                Message("request_partial_proof", request1),
                Delivery.RESPOND,
            )
        if estimate_secs < self.config["propagate_threshold"]:
            if new_proof_height not in self.coinbase_rewards:
                log.error(
                    f"Don't have coinbase transaction for height {new_proof_height}, cannot submit PoS"
                )
                return

            coinbase, signature = self.coinbase_rewards[new_proof_height]
            request2 = farmer_protocol.RequestHeaderHash(
                challenge_hash,
                coinbase,
                signature,
                bytes.fromhex(self.key_config["wallet_target"]),
                response.proof,
            )

            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_header_hash", request2),
                Delivery.BROADCAST,
            )
コード例 #18
0
    async def request_header_hash(
        self, request: farmer_protocol.RequestHeaderHash
    ) -> AsyncGenerator[OutboundMessage, None]:
        """
        Creates a block body and header, with the proof of space, coinbase, and fee targets provided
        by the farmer, and sends the hash of the header data back to the farmer.
        """
        plot_seed: bytes32 = request.proof_of_space.get_plot_seed()

        # Checks that the proof of space is valid
        quality_string: bytes = Verifier().validate_proof(
            plot_seed,
            request.proof_of_space.size,
            request.challenge_hash,
            bytes(request.proof_of_space.proof),
        )
        assert quality_string

        async with self.store.lock:
            # Retrieves the correct head for the challenge
            heads: List[HeaderBlock] = self.blockchain.get_current_tips()
            target_head: Optional[HeaderBlock] = None
            for head in heads:
                assert head.challenge
                if head.challenge.get_hash() == request.challenge_hash:
                    target_head = head
            if target_head is None:
                # TODO: should we still allow the farmer to farm?
                log.warning(
                    f"Challenge hash: {request.challenge_hash} not in one of three heads"
                )
                return

            # TODO: use mempool to grab best transactions, for the selected head
            transactions_generator: bytes32 = sha256(b"").digest()
            # TODO: calculate the fees of these transactions
            fees: FeesTarget = FeesTarget(request.fees_target_puzzle_hash,
                                          uint64(0))
            aggregate_sig: Signature = PrivateKey.from_seed(b"12345").sign(
                b"anything")
            # TODO: calculate aggregate signature based on transactions
            # TODO: calculate cost of all transactions
            cost = uint64(0)

            # Creates a block with transactions, coinbase, and fees
            body: Body = Body(
                request.coinbase,
                request.coinbase_signature,
                fees,
                aggregate_sig,
                transactions_generator,
                cost,
            )

            # Creates the block header
            prev_header_hash: bytes32 = target_head.header.get_hash()
            timestamp: uint64 = uint64(int(time.time()))

            # TODO: use a real BIP158 filter based on transactions
            filter_hash: bytes32 = token_bytes(32)
            proof_of_space_hash: bytes32 = request.proof_of_space.get_hash()
            body_hash: Body = body.get_hash()
            extension_data: bytes32 = bytes32([0] * 32)
            block_header_data: HeaderData = HeaderData(
                prev_header_hash,
                timestamp,
                filter_hash,
                proof_of_space_hash,
                body_hash,
                extension_data,
            )

            block_header_data_hash: bytes32 = block_header_data.get_hash()

            # self.stores this block so we can submit it to the blockchain after it's signed by harvester
            await self.store.add_candidate_block(proof_of_space_hash, body,
                                                 block_header_data,
                                                 request.proof_of_space)

        message = farmer_protocol.HeaderHash(proof_of_space_hash,
                                             block_header_data_hash)
        yield OutboundMessage(NodeType.FARMER, Message("header_hash", message),
                              Delivery.RESPOND)
コード例 #19
0
ファイル: farmer.py プロジェクト: mariano54/chia-blockchain
    async def proof_of_space_finalized(
            self,
            proof_of_space_finalized: farmer_protocol.ProofOfSpaceFinalized):
        """
        Full node notifies farmer that a proof of space has been completed. It gets added to the
        challenges list at that weight, and weight is updated if necessary
        """
        get_proofs: bool = False
        if (proof_of_space_finalized.weight >= self.current_weight
                and proof_of_space_finalized.challenge_hash
                not in self.seen_challenges):
            # Only get proofs for new challenges, at a current or new weight
            get_proofs = True
            if proof_of_space_finalized.weight > self.current_weight:
                self.current_weight = proof_of_space_finalized.weight

            # TODO: ask the pool for this information

            pool_sks: List[PrivateKey] = [
                PrivateKey.from_bytes(bytes.fromhex(ce))  # type: ignore # noqa
                for ce in self.key_config["pool_sks"]
            ]

            coinbase_reward = uint64(
                calculate_block_reward(
                    uint32(proof_of_space_finalized.height + 1)))

            coinbase_coin, coinbase_signature = create_coinbase_coin_and_signature(
                proof_of_space_finalized.height + 1,
                bytes.fromhex(self.key_config["pool_target"]),
                coinbase_reward,
                pool_sks[0],
            )

            self.coinbase_rewards[uint32(proof_of_space_finalized.height +
                                         1)] = (
                                             coinbase_coin,
                                             coinbase_signature,
                                         )

            log.info(f"\tCurrent weight set to {self.current_weight}")
        self.seen_challenges.add(proof_of_space_finalized.challenge_hash)
        if proof_of_space_finalized.weight not in self.challenges:
            self.challenges[proof_of_space_finalized.weight] = [
                proof_of_space_finalized
            ]
        else:
            self.challenges[proof_of_space_finalized.weight].append(
                proof_of_space_finalized)
        self.challenge_to_weight[
            proof_of_space_finalized.
            challenge_hash] = proof_of_space_finalized.weight
        self.challenge_to_height[
            proof_of_space_finalized.
            challenge_hash] = proof_of_space_finalized.height

        if get_proofs:
            message = harvester_protocol.NewChallenge(
                proof_of_space_finalized.challenge_hash)
            yield OutboundMessage(
                NodeType.HARVESTER,
                Message("new_challenge", message),
                Delivery.BROADCAST,
            )
コード例 #20
0
    async def block(
            self, block: peer_protocol.Block
    ) -> AsyncGenerator[OutboundMessage, None]:
        """
        Receive a full block from a peer full node (or ourselves).
        """
        header_hash = block.block.header_block.header.get_hash()

        async with self.store.lock:
            if await self.store.get_sync_mode():
                # Add the block to our potential tips list
                await self.store.add_potential_tip(block.block)
                return

            # Tries to add the block to the blockchain
            added: ReceiveBlockResult = await self.blockchain.receive_block(
                block.block)
        if added == ReceiveBlockResult.ALREADY_HAVE_BLOCK:
            return
        elif added == ReceiveBlockResult.INVALID_BLOCK:
            log.warning(
                f"Block {header_hash} at height {block.block.height} is invalid."
            )
            return
        elif added == ReceiveBlockResult.DISCONNECTED_BLOCK:
            log.warning(f"Disconnected block {header_hash}")
            async with self.store.lock:
                tip_height = min([
                    head.height for head in self.blockchain.get_current_tips()
                ])

            if (block.block.height >
                    tip_height + self.config["sync_blocks_behind_threshold"]):
                async with self.store.lock:
                    if await self.store.get_sync_mode():
                        return
                    await self.store.clear_sync_info()
                    await self.store.add_potential_tip(block.block)
                    await self.store.set_sync_mode(True)
                log.info(
                    f"We are too far behind this block. Our height is {tip_height} and block is at "
                    f"{block.block.height}")
                try:
                    # Performs sync, and catch exceptions so we don't close the connection
                    async for msg in self._sync():
                        yield msg
                except asyncio.CancelledError:
                    log.warning("Syncing failed, CancelledError")
                except BaseException as e:
                    log.warning(f"Error {type(e)}{e} with syncing")
                finally:
                    async for msg in self._finish_sync():
                        yield msg

            elif block.block.height >= tip_height - 3:
                log.info(
                    f"We have received a disconnected block at height {block.block.height}, current tip is {tip_height}"
                )
                msg = Message(
                    "request_block",
                    peer_protocol.RequestBlock(block.block.prev_header_hash),
                )
                async with self.store.lock:
                    await self.store.add_disconnected_block(block.block)
                yield OutboundMessage(NodeType.FULL_NODE, msg,
                                      Delivery.RESPOND)
            return
        elif added == ReceiveBlockResult.ADDED_TO_HEAD:
            # Only propagate blocks which extend the blockchain (becomes one of the heads)
            ips_changed: bool = False
            async with self.store.lock:
                log.info(
                    f"Updated heads, new heights: {[b.height for b in self.blockchain.get_current_tips()]}"
                )

                difficulty = await self.blockchain.get_next_difficulty(
                    block.block.prev_header_hash)
                next_vdf_ips = await self.blockchain.get_next_ips(
                    block.block.header_hash)
                log.info(f"Difficulty {difficulty} IPS {next_vdf_ips}")
                if next_vdf_ips != await self.store.get_proof_of_time_estimate_ips(
                ):
                    await self.store.set_proof_of_time_estimate_ips(
                        next_vdf_ips)
                    ips_changed = True
            if ips_changed:
                rate_update = farmer_protocol.ProofOfTimeRate(next_vdf_ips)
                log.error(f"Sending proof of time rate {next_vdf_ips}")
                yield OutboundMessage(
                    NodeType.FARMER,
                    Message("proof_of_time_rate", rate_update),
                    Delivery.BROADCAST,
                )

            assert block.block.header_block.proof_of_time
            assert block.block.header_block.challenge
            pos_quality = (block.block.header_block.proof_of_space.
                           verify_and_get_quality())

            farmer_request = farmer_protocol.ProofOfSpaceFinalized(
                block.block.header_block.challenge.get_hash(),
                block.block.height,
                block.block.weight,
                pos_quality,
                difficulty,
            )
            timelord_request = timelord_protocol.ChallengeStart(
                block.block.header_block.challenge.get_hash(),
                block.block.header_block.challenge.total_weight,
            )
            # Tell timelord to stop previous challenge and start with new one
            yield OutboundMessage(
                NodeType.TIMELORD,
                Message("challenge_start", timelord_request),
                Delivery.BROADCAST,
            )

            # Tell full nodes about the new block
            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("block", block),
                Delivery.BROADCAST_TO_OTHERS,
            )

            # Tell farmer about the new block
            yield OutboundMessage(
                NodeType.FARMER,
                Message("proof_of_space_finalized", farmer_request),
                Delivery.BROADCAST,
            )

        elif added == ReceiveBlockResult.ADDED_AS_ORPHAN:
            assert block.block.header_block.proof_of_time
            assert block.block.header_block.challenge
            log.info(
                f"Received orphan block of height {block.block.header_block.challenge.height}"
            )
        else:
            # Should never reach here, all the cases are covered
            assert False
            # Recursively process the next block if we have it

        async with self.store.lock:
            next_block: Optional[
                FullBlock] = await self.store.get_disconnected_block_by_prev(
                    block.block.header_hash)
        if next_block is not None:
            async for msg in self.block(peer_protocol.Block(next_block)):
                yield msg

        async with self.store.lock:
            # Removes all temporary data for old blocks
            lowest_tip = min(tip.height
                             for tip in self.blockchain.get_current_tips())
            clear_height = uint32(max(0, lowest_tip - 30))
            await self.store.clear_candidate_blocks_below(clear_height)
            await self.store.clear_unfinished_blocks_below(clear_height)
            await self.store.clear_disconnected_blocks_below(clear_height)
コード例 #21
0
    async def _add_to_request_sets(self) -> OutboundMessageGenerator:
        """
        Refreshes the pointers of how far we validated and how far we downloaded. Then goes through
        all peers and sends requests to peers for the blocks we have not requested yet, or have
        requested to a peer that did not respond in time or disconnected.
        """
        if not self.sync_store.get_sync_mode():
            return

        #     fork       fully validated                           MAX_GAP   target
        # $$$$$X$$$$$$$$$$$$$$$X================----==---=--====---=--X------->
        #      $
        #      $
        #      $$$$$$$$$$$$$$$$$$$$$$$$>
        #                         prev tip

        # Refresh the fully_validated_up_to pointer
        target_height = len(self.header_hashes) - 1
        for height in range(self.fully_validated_up_to + 1, target_height + 1):
            if self.header_hashes[height] in self.blockchain.headers:
                self.fully_validated_up_to = uint32(height)
            else:
                break

        # Number of request slots
        free_slots = 0
        for node_id, request_set in self.current_outbound_sets.items():
            free_slots += self.MAX_REQUESTS_PER_PEER - len(request_set)

        to_send: List[uint32] = []
        # Finds a block height
        for height in range(
                self.fully_validated_up_to + 1,
                min(self.fully_validated_up_to + self.MAX_GAP + 1,
                    target_height + 1),
        ):
            if len(to_send) == free_slots:
                # No more slots to send to any peers
                break
            header_hash = self.header_hashes[uint32(height)]
            if header_hash in self.blockchain.headers:
                # Avoids downloading blocks and headers that we already have
                continue

            if self.potential_blocks_received[uint32(height)].is_set():
                continue
            already_requested = False
            # If we have asked for this block to some peer, we don't want to ask for it again yet.
            for node_id_2, request_set_2 in self.current_outbound_sets.items():
                if self.header_hashes[height] in request_set_2:
                    already_requested = True
                    break
            if already_requested:
                continue

            to_send.append(uint32(height))

        # Sort by the peers that have the least outgoing messages
        outbound_sets_list = list(self.current_outbound_sets.items())
        outbound_sets_list.sort(key=lambda x: len(x[1]))
        index = 0
        to_yield: List[Any] = []
        for height in to_send:
            # Find a the next peer with an empty slot. There must be an empty slot: to_send
            # includes up to free_slots things, and current_outbound sets cannot change since there is
            # no await from when free_slots is computed (and thus no context switch).
            while (len(outbound_sets_list[index % len(outbound_sets_list)][1])
                   == self.MAX_REQUESTS_PER_PEER):
                index += 1

            # Add to peer request
            node_id, request_set = outbound_sets_list[index %
                                                      len(outbound_sets_list)]
            request_set[self.header_hashes[height]] = uint64(int(time.time()))

            to_yield.append(
                full_node_protocol.RequestBlock(height,
                                                self.header_hashes[height]))

        for request in to_yield:
            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_block", request),
                Delivery.SPECIFIC,
                node_id,
            )