示例#1
0
async def validate_unfinished_block_header(
    constants: Dict,
    headers: Dict[bytes32, Header],
    height_to_hash: Dict[uint32, bytes32],
    block_header: Header,
    proof_of_space: ProofOfSpace,
    prev_header_block: Optional[HeaderBlock],
    pre_validated: bool = False,
    pos_quality_string: bytes32 = None,
) -> Tuple[Optional[Err], Optional[uint64]]:
    """
    Block validation algorithm. Returns the number of VDF iterations that this block's
    proof of time must have, if the candidate block is fully valid (except for proof of
    time). The same as validate_block, but without proof of time and challenge validation.
    If the block is invalid, an error code is returned.

    Does NOT validate transactions and fees.
    """
    if not pre_validated:
        # 1. The hash of the proof of space must match header_data.proof_of_space_hash
        if proof_of_space.get_hash() != block_header.data.proof_of_space_hash:
            return (Err.INVALID_POSPACE_HASH, None)

        # 2. The coinbase signature must be valid, according the the pool public key
        pair = block_header.data.coinbase_signature.PkMessagePair(
            proof_of_space.pool_pubkey,
            block_header.data.coinbase.name(),
        )

        if not block_header.data.coinbase_signature.validate([pair]):
            return (Err.INVALID_COINBASE_SIGNATURE, None)

        # 3. Check harvester signature of header data is valid based on harvester key
        if not block_header.harvester_signature.verify(
            [blspy.Util.hash256(block_header.data.get_hash())],
            [proof_of_space.plot_pubkey],
        ):
            return (Err.INVALID_HARVESTER_SIGNATURE, None)

    # 4. If not genesis, the previous block must exist
    if prev_header_block is not None and block_header.prev_header_hash not in headers:
        return (Err.DOES_NOT_EXTEND, None)

    # 5. If not genesis, the timestamp must be >= the average timestamp of last 11 blocks
    # and less than 2 hours in the future (if block height < 11, average all previous blocks).
    # Average is the sum, int diveded by the number of timestamps
    if prev_header_block is not None:
        last_timestamps: List[uint64] = []
        curr = prev_header_block.header
        while len(last_timestamps) < constants["NUMBER_OF_TIMESTAMPS"]:
            last_timestamps.append(curr.data.timestamp)
            fetched = headers.get(curr.prev_header_hash, None)
            if not fetched:
                break
            curr = fetched
        if len(last_timestamps) != constants["NUMBER_OF_TIMESTAMPS"]:
            # For blocks 1 to 10, average timestamps of all previous blocks
            assert curr.height == 0
        prev_time: uint64 = uint64(
            int(sum(last_timestamps) // len(last_timestamps)))
        if block_header.data.timestamp < prev_time:
            return (Err.TIMESTAMP_TOO_FAR_IN_PAST, None)
        if block_header.data.timestamp > time.time(
        ) + constants["MAX_FUTURE_TIME"]:
            return (Err.TIMESTAMP_TOO_FAR_IN_FUTURE, None)

    # 7. Extension data must be valid, if any is present

    # Compute challenge of parent
    challenge_hash: bytes32
    if prev_header_block is not None:
        challenge: Challenge = prev_header_block.challenge
        challenge_hash = challenge.get_hash()
        # 8. Check challenge hash of prev is the same as in pos
        if challenge_hash != proof_of_space.challenge_hash:
            return (Err.INVALID_POSPACE_CHALLENGE, None)

    # 10. The proof of space must be valid on the challenge
    if pos_quality_string is None:
        pos_quality_string = proof_of_space.verify_and_get_quality_string()
        if not pos_quality_string:
            return (Err.INVALID_POSPACE, None)

    if prev_header_block is not None:
        # 11. If not genesis, the height on the previous block must be one less than on this block
        if block_header.height != prev_header_block.height + 1:
            return (Err.INVALID_HEIGHT, None)
    else:
        # 12. If genesis, the height must be 0
        if block_header.height != 0:
            return (Err.INVALID_HEIGHT, None)

    # 13. The coinbase reward must match the block schedule
    coinbase_reward = calculate_block_reward(block_header.height)
    if coinbase_reward != block_header.data.coinbase.amount:
        return (Err.INVALID_COINBASE_AMOUNT, None)

    # 13b. The coinbase parent id must be the height
    if block_header.data.coinbase.parent_coin_info != block_header.height.to_bytes(
            32, "big"):
        return (Err.INVALID_COINBASE_PARENT, None)

    # 13c. The fees coin parent id must be hash(hash(height))
    if block_header.data.fees_coin.parent_coin_info != std_hash(
            std_hash(uint32(block_header.height))):
        return (Err.INVALID_FEES_COIN_PARENT, None)

    difficulty: uint64
    if prev_header_block is not None:
        difficulty = get_next_difficulty(constants, headers, height_to_hash,
                                         prev_header_block.header)
        min_iters = get_next_min_iters(constants, headers, height_to_hash,
                                       prev_header_block)
    else:
        difficulty = uint64(constants["DIFFICULTY_STARTING"])
        min_iters = uint64(constants["MIN_ITERS_STARTING"])

    number_of_iters: uint64 = calculate_iterations_quality(
        pos_quality_string,
        proof_of_space.size,
        difficulty,
        min_iters,
    )

    assert count_significant_bits(difficulty) <= constants["SIGNIFICANT_BITS"]
    assert count_significant_bits(min_iters) <= constants["SIGNIFICANT_BITS"]

    if prev_header_block is not None:
        # 17. If not genesis, the total weight must be the parent weight + difficulty
        if block_header.weight != prev_header_block.weight + difficulty:
            return (Err.INVALID_WEIGHT, None)

        # 18. If not genesis, the total iters must be parent iters + number_iters
        if (block_header.data.total_iters !=
                prev_header_block.header.data.total_iters + number_of_iters):
            return (Err.INVALID_TOTAL_ITERS, None)
    else:
        # 19. If genesis, the total weight must be starting difficulty
        if block_header.weight != difficulty:
            return (Err.INVALID_WEIGHT, None)

        # 20. If genesis, the total iters must be number iters
        if block_header.data.total_iters != number_of_iters:
            return (Err.INVALID_TOTAL_ITERS, None)

    return (None, number_of_iters)
示例#2
0
    async def test_basic_database(self):
        blocks = bt.get_consecutive_blocks(test_constants, 9, [], 9, b"0")

        db = FullNodeStore("fndb_test")
        await db._clear_database()
        genesis = FullBlock.from_bytes(constants["GENESIS_BLOCK"])

        # Save/get block
        for block in blocks:
            await db.add_block(block)
            assert block == await db.get_block(block.header_hash)

        # Save/get sync
        for sync_mode in (False, True):
            await db.set_sync_mode(sync_mode)
            assert sync_mode == await db.get_sync_mode()

        # clear sync info
        await db.clear_sync_info()

        # add/get potential tip, get potential tips num
        await db.add_potential_tip(blocks[6])
        assert blocks[6] == await db.get_potential_tip(blocks[6].header_hash)

        # add/get potential trunk
        header = genesis.header_block
        db.add_potential_header(header)
        assert db.get_potential_header(genesis.height) == header

        # Add potential block
        await db.add_potential_block(genesis)
        assert genesis == await db.get_potential_block(uint32(0))

        # Add/get candidate block
        assert await db.get_candidate_block(0) is None
        partial = (
            blocks[5].body,
            blocks[5].header_block.header.data,
            blocks[5].header_block.proof_of_space,
        )
        await db.add_candidate_block(blocks[5].header_hash, *partial)
        assert await db.get_candidate_block(blocks[5].header_hash) == partial
        await db.clear_candidate_blocks_below(uint32(8))
        assert await db.get_candidate_block(blocks[5].header_hash) is None

        # Add/get unfinished block
        i = 1
        for block in blocks:
            key = (block.header_hash, uint64(1000))
            assert await db.get_unfinished_block(key) is None
            await db.add_unfinished_block(key, block)
            assert await db.get_unfinished_block(key) == block
            assert len(await db.get_unfinished_blocks()) == i
            i += 1
        await db.clear_unfinished_blocks_below(uint32(5))
        assert len(await db.get_unfinished_blocks()) == 5

        # Set/get unf block leader
        assert db.get_unfinished_block_leader() == (0, (1 << 64) - 1)
        db.set_unfinished_block_leader(key)
        assert db.get_unfinished_block_leader() == key

        assert await db.get_disconnected_block(blocks[0].prev_header_hash) is None
        # Disconnected blocks
        for block in blocks:
            await db.add_disconnected_block(block)
            await db.get_disconnected_block(block.prev_header_hash) == block

        await db.clear_disconnected_blocks_below(uint32(5))
        assert await db.get_disconnected_block(blocks[4].prev_header_hash) is None

        h_hash_1 = bytes32(token_bytes(32))
        assert not db.seen_unfinished_block(h_hash_1)
        assert db.seen_unfinished_block(h_hash_1)
        db.clear_seen_unfinished_blocks()
        assert not db.seen_unfinished_block(h_hash_1)

        assert not db.seen_block(h_hash_1)
        assert db.seen_block(h_hash_1)
        db.clear_seen_blocks()
        assert not db.seen_block(h_hash_1)
示例#3
0
def get_next_difficulty(
    constants: ConsensusConstants,
    headers: Dict[bytes32, Header],
    height_to_hash: Dict[uint32, bytes32],
    block: Header,
) -> uint64:
    """
    Returns the difficulty of the next block that extends onto block.
    Used to calculate the number of iterations. When changing this, also change the implementation
    in wallet_state_manager.py.
    """

    next_height: uint32 = uint32(block.height + 1)
    if next_height < constants.DIFFICULTY_EPOCH:
        # We are in the first epoch
        return uint64(constants.DIFFICULTY_STARTING)

    # Epochs are diffined as intervals of DIFFICULTY_EPOCH blocks, inclusive and indexed at 0.
    # For example, [0-2047], [2048-4095], etc. The difficulty changes DIFFICULTY_DELAY into the
    # epoch, as opposed to the first block (as in Bitcoin).
    elif next_height % constants.DIFFICULTY_EPOCH != constants.DIFFICULTY_DELAY:
        # Not at a point where difficulty would change
        prev_block: Header = headers[block.prev_header_hash]
        return uint64(block.weight - prev_block.weight)

    #       old diff                  curr diff       new diff
    # ----------|-----|----------------------|-----|-----...
    #           h1    h2                     h3   i-1
    # Height1 is the last block 2 epochs ago, so we can include the time to mine 1st block in previous epoch
    height1 = uint32(next_height - constants.DIFFICULTY_EPOCH -
                     constants.DIFFICULTY_DELAY - 1)
    # Height2 is the DIFFICULTY DELAYth block in the previous epoch
    height2 = uint32(next_height - constants.DIFFICULTY_EPOCH - 1)
    # Height3 is the last block in the previous epoch
    height3 = uint32(next_height - constants.DIFFICULTY_DELAY - 1)

    # h1 to h2 timestamps are mined on previous difficulty, while  and h2 to h3 timestamps are mined on the
    # current difficulty

    block1, block2, block3 = None, None, None

    # We need to backtrack until we merge with the LCA chain, so we can use the height_to_hash dict.
    # This is important if we are on a fork, or beyond the LCA.
    curr: Optional[Header] = block
    assert curr is not None
    while (curr.height not in height_to_hash
           or height_to_hash[curr.height] != curr.header_hash):
        if curr.height == height1:
            block1 = curr
        elif curr.height == height2:
            block2 = curr
        elif curr.height == height3:
            block3 = curr
        curr = headers.get(curr.prev_header_hash, None)
        assert curr is not None

    # Once we are before the fork point (and before the LCA), we can use the height_to_hash map
    if not block1 and height1 >= 0:
        # height1 could be -1, for the first difficulty calculation
        block1 = headers[height_to_hash[height1]]
    if not block2:
        block2 = headers[height_to_hash[height2]]
    if not block3:
        block3 = headers[height_to_hash[height3]]
    assert block2 is not None and block3 is not None

    # Current difficulty parameter (diff of block h = i - 1)
    Tc = get_next_difficulty(constants, headers, height_to_hash,
                             headers[block.prev_header_hash])

    # Previous difficulty parameter (diff of block h = i - 2048 - 1)
    Tp = get_next_difficulty(constants, headers, height_to_hash,
                             headers[block2.prev_header_hash])
    if block1:
        timestamp1 = int(block1.data.timestamp)  # i - 512 - 1
    else:
        # In the case of height == -1, there is no timestamp here, so assume the genesis block
        # took constants.BLOCK_TIME_TARGET seconds to mine.
        genesis = headers[height_to_hash[uint32(0)]]
        timestamp1 = genesis.data.timestamp - constants.BLOCK_TIME_TARGET
    timestamp2 = block2.data.timestamp  # i - 2048 + 512 - 1
    timestamp3 = block3.data.timestamp  # i - 512 - 1

    # Numerator fits in 128 bits, so big int is not necessary
    # We multiply by the denominators here, so we only have one fraction in the end (avoiding floating point)
    term1 = (constants.DIFFICULTY_DELAY * Tp * (timestamp3 - timestamp2) *
             constants.BLOCK_TIME_TARGET)
    term2 = ((constants.DIFFICULTY_WARP_FACTOR - 1) *
             (constants.DIFFICULTY_EPOCH - constants.DIFFICULTY_DELAY) * Tc *
             (timestamp2 - timestamp1) * constants.BLOCK_TIME_TARGET)

    # Round down after the division
    new_difficulty_precise: uint64 = uint64(
        (term1 + term2) // (constants.DIFFICULTY_WARP_FACTOR *
                            (timestamp3 - timestamp2) *
                            (timestamp2 - timestamp1)))
    # Take only DIFFICULTY_SIGNIFICANT_BITS significant bits
    new_difficulty = uint64(
        truncate_to_significant_bits(new_difficulty_precise,
                                     constants.SIGNIFICANT_BITS))
    assert count_significant_bits(new_difficulty) <= constants.SIGNIFICANT_BITS

    # Only change by a max factor, to prevent attacks, as in greenpaper, and must be at least 1
    max_diff = uint64(
        truncate_to_significant_bits(
            constants.DIFFICULTY_FACTOR * Tc,
            constants.SIGNIFICANT_BITS,
        ))
    min_diff = uint64(
        truncate_to_significant_bits(
            Tc // constants.DIFFICULTY_FACTOR,
            constants.SIGNIFICANT_BITS,
        ))
    if new_difficulty >= Tc:
        return min(new_difficulty, max_diff)
    else:
        return max([uint64(1), new_difficulty, min_diff])
示例#4
0
    def _create_block(
            self,
            test_constants: ConsensusConstants,
            challenge_hash: bytes32,
            height: uint32,
            prev_header_hash: bytes32,
            prev_iters: uint64,
            prev_weight: uint128,
            timestamp: uint64,
            difficulty: uint64,
            min_iters: uint64,
            seed: bytes,
            genesis: bool = False,
            reward_puzzlehash: bytes32 = None,
            transactions: Program = None,
            aggsig: G2Element = None,
            fees: uint64 = uint64(0),
    ) -> FullBlock:
        """
        Creates a block with the specified details. Uses the stored plots to create a proof of space,
        and also evaluates the VDF for the proof of time.
        """
        selected_plot_info = None
        selected_proof_index = 0
        selected_quality: Optional[bytes] = None
        best_quality = 0
        plots = [
            pinfo for _, pinfo in sorted(list(self.plots.items()),
                                         key=lambda x: str(x[0]))
        ]
        if self.use_any_pos:
            random.seed(seed)
            for i in range(len(plots) * 3):
                # Allow passing in seed, to create reorgs and different chains
                seeded_pn = random.randint(0, len(plots) - 1)
                plot_info = plots[seeded_pn]
                plot_id = plot_info.prover.get_id()
                ccp = ProofOfSpace.can_create_proof(
                    plot_id,
                    challenge_hash,
                    test_constants["NUMBER_ZERO_BITS_CHALLENGE_SIG"],
                )
                if not ccp:
                    continue
                qualities = plot_info.prover.get_qualities_for_challenge(
                    challenge_hash)
                if len(qualities) > 0:
                    selected_plot_info = plot_info
                    selected_quality = qualities[0]
                    break
        else:
            for i in range(len(plots)):
                plot_info = plots[i]
                j = 0
                plot_id = plot_info.prover.get_id()
                ccp = ProofOfSpace.can_create_proof(
                    plot_id,
                    challenge_hash,
                    test_constants["NUMBER_ZERO_BITS_CHALLENGE_SIG"],
                )
                if not ccp:
                    continue
                qualities = plot_info.prover.get_qualities_for_challenge(
                    challenge_hash)
                for quality in qualities:
                    qual_int = int.from_bytes(quality, "big", signed=False)
                    if qual_int > best_quality:
                        best_quality = qual_int
                        selected_quality = quality
                        selected_plot_info = plot_info
                        selected_proof_index = j
                    j += 1

        assert selected_plot_info is not None
        if selected_quality is None:
            raise RuntimeError("No proofs for this challenge")

        proof_xs: bytes = selected_plot_info.prover.get_full_proof(
            challenge_hash, selected_proof_index)

        plot_pk = ProofOfSpace.generate_plot_public_key(
            selected_plot_info.local_sk.get_g1(),
            selected_plot_info.farmer_public_key,
        )
        proof_of_space: ProofOfSpace = ProofOfSpace(
            challenge_hash,
            selected_plot_info.pool_public_key,
            plot_pk,
            selected_plot_info.prover.get_size(),
            proof_xs,
        )

        number_iters: uint64 = pot_iterations.calculate_iterations(
            proof_of_space,
            difficulty,
            min_iters,
            test_constants["NUMBER_ZERO_BITS_CHALLENGE_SIG"],
        )
        if self.real_plots:
            print(f"Performing {number_iters} VDF iterations")

        int_size = (test_constants["DISCRIMINANT_SIZE_BITS"] + 16) >> 4

        result = prove(challenge_hash,
                       test_constants["DISCRIMINANT_SIZE_BITS"], number_iters)

        output = ClassgroupElement(
            int512(int.from_bytes(
                result[0:int_size],
                "big",
                signed=True,
            )),
            int512(
                int.from_bytes(
                    result[int_size:2 * int_size],
                    "big",
                    signed=True,
                )),
        )
        proof_bytes = result[2 * int_size:4 * int_size]

        proof_of_time = ProofOfTime(
            challenge_hash,
            number_iters,
            output,
            uint8(0),
            proof_bytes,
        )

        # Use the extension data to create different blocks based on header hash
        extension_data: bytes32 = bytes32(
            [random.randint(0, 255) for _ in range(32)])
        cost = uint64(0)

        fee_reward = uint64(block_rewards.calculate_base_fee(height) + fees)

        std_hash(std_hash(height))

        # Create filter
        byte_array_tx: List[bytes32] = []
        tx_additions: List[Coin] = []
        tx_removals: List[bytes32] = []
        if transactions:
            error, npc_list, _ = get_name_puzzle_conditions(transactions)
            additions: List[Coin] = additions_for_npc(npc_list)
            for coin in additions:
                tx_additions.append(coin)
                byte_array_tx.append(bytearray(coin.puzzle_hash))
            for npc in npc_list:
                tx_removals.append(npc.coin_name)
                byte_array_tx.append(bytearray(npc.coin_name))
        farmer_ph = self.farmer_ph
        pool_ph = self.pool_ph
        if reward_puzzlehash is not None:
            farmer_ph = reward_puzzlehash
            pool_ph = reward_puzzlehash

        byte_array_tx.append(bytearray(farmer_ph))
        byte_array_tx.append(bytearray(pool_ph))
        bip158: PyBIP158 = PyBIP158(byte_array_tx)
        encoded = bytes(bip158.GetEncoded())

        removal_merkle_set = MerkleSet()
        addition_merkle_set = MerkleSet()

        # Create removal Merkle set
        for coin_name in tx_removals:
            removal_merkle_set.add_already_hashed(coin_name)

        # Create addition Merkle set
        puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
        cb_reward = calculate_block_reward(height)
        cb_coin = create_coinbase_coin(height, pool_ph, cb_reward)
        fees_coin = create_fees_coin(height, farmer_ph, fee_reward)
        for coin in tx_additions + [cb_coin, fees_coin]:
            if coin.puzzle_hash in puzzlehash_coin_map:
                puzzlehash_coin_map[coin.puzzle_hash].append(coin)
            else:
                puzzlehash_coin_map[coin.puzzle_hash] = [coin]

        # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
        for puzzle, coins in puzzlehash_coin_map.items():
            addition_merkle_set.add_already_hashed(puzzle)
            addition_merkle_set.add_already_hashed(hash_coin_list(coins))

        additions_root = addition_merkle_set.get_root()
        removal_root = removal_merkle_set.get_root()

        generator_hash = (transactions.get_tree_hash()
                          if transactions is not None else bytes32([0] * 32))
        filter_hash = std_hash(encoded)

        pool_target = PoolTarget(pool_ph, uint32(height))
        pool_target_signature = self.get_pool_key_signature(
            pool_target, proof_of_space.pool_public_key)
        assert pool_target_signature is not None
        final_aggsig: G2Element = pool_target_signature
        if aggsig is not None:
            final_aggsig = AugSchemeMPL.aggregate([final_aggsig, aggsig])

        header_data: HeaderData = HeaderData(
            height,
            prev_header_hash,
            timestamp,
            filter_hash,
            proof_of_space.get_hash(),
            uint128(prev_weight + difficulty),
            uint64(prev_iters + number_iters),
            additions_root,
            removal_root,
            farmer_ph,
            fee_reward,
            pool_target,
            final_aggsig,
            cost,
            extension_data,
            generator_hash,
        )

        header_hash_sig: G2Element = self.get_plot_signature(
            header_data, plot_pk)

        header: Header = Header(header_data, header_hash_sig)

        full_block: FullBlock = FullBlock(proof_of_space, proof_of_time,
                                          header, transactions, encoded)

        return full_block
示例#5
0
 def _add_coin_entry(self, coin: Coin, birthday: CoinTimestamp) -> None:
     name = coin.name()
     assert name not in self._db
     self._db[name] = CoinRecord(coin, uint32(birthday.height), uint32(0),
                                 False, False, uint64(birthday.seconds))
     self._ph_index[coin.puzzle_hash].append(name)
示例#6
0
    async def test_puzzle_store(self):
        db_filename = Path("puzzle_store_test.db")

        if db_filename.exists():
            db_filename.unlink()

        con = await aiosqlite.connect(db_filename)
        db = await WalletPuzzleStore.create(con)
        try:
            derivation_recs = []
            # wallet_types = [t for t in WalletType]
            [t for t in WalletType]

            for i in range(1000):
                derivation_recs.append(
                    DerivationRecord(
                        uint32(i),
                        token_bytes(32),
                        AugSchemeMPL.key_gen(token_bytes(32)).get_g1(),
                        WalletType.STANDARD_WALLET,
                        uint32(1),
                    ))
                derivation_recs.append(
                    DerivationRecord(
                        uint32(i),
                        token_bytes(32),
                        AugSchemeMPL.key_gen(token_bytes(32)).get_g1(),
                        WalletType.RATE_LIMITED,
                        uint32(2),
                    ))
            assert await db.puzzle_hash_exists(derivation_recs[0].puzzle_hash
                                               ) is False
            assert await db.index_for_pubkey(derivation_recs[0].pubkey) is None
            assert (await db.index_for_puzzle_hash(
                derivation_recs[2].puzzle_hash) is None)
            assert (await db.wallet_info_for_puzzle_hash(
                derivation_recs[2].puzzle_hash) is None)
            assert len((await db.get_all_puzzle_hashes())) == 0
            assert await db.get_last_derivation_path() is None
            assert await db.get_unused_derivation_path() is None
            assert await db.get_derivation_record(0, 2) is None

            await db.add_derivation_paths(derivation_recs)

            assert await db.puzzle_hash_exists(derivation_recs[0].puzzle_hash
                                               ) is True
            assert await db.index_for_pubkey(derivation_recs[4].pubkey) == 2
            assert await db.index_for_puzzle_hash(
                derivation_recs[2].puzzle_hash) == 1
            assert await db.wallet_info_for_puzzle_hash(
                derivation_recs[2].puzzle_hash
            ) == (derivation_recs[2].wallet_id, derivation_recs[2].wallet_type)
            assert len((await db.get_all_puzzle_hashes())) == 2000
            assert await db.get_last_derivation_path() == 999
            assert await db.get_unused_derivation_path() == 0
            assert await db.get_derivation_record(0, 2) == derivation_recs[1]

            # Indeces up to 250
            await db.set_used_up_to(249)

            assert await db.get_unused_derivation_path() == 250

        except Exception as e:
            print(e, type(e))
            await db._clear_database()
            await db.close()
            db_filename.unlink()
            raise e

        await db._clear_database()
        await db.close()
        db_filename.unlink()
示例#7
0
    async def test_wallet_make_transaction(self, two_wallet_nodes):
        test_rpc_port = uint16(21529)
        num_blocks = 5
        full_nodes, wallets = two_wallet_nodes
        full_node_1, server_1 = full_nodes[0]
        wallet_node, server_2 = wallets[0]
        wallet_node_2, server_3 = wallets[1]
        wallet = wallet_node.wallet_state_manager.main_wallet
        wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
        ph = await wallet.get_new_puzzlehash()
        ph_2 = await wallet_2.get_new_puzzlehash()

        await server_2.start_client(
            PeerInfo("localhost", uint16(server_1._port)), None)

        for i in range(0, num_blocks):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        initial_funds = sum([
            calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
            for i in range(1, num_blocks - 1)
        ])
        initial_funds_eventually = sum([
            calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
            for i in range(1, num_blocks + 1)
        ])

        wallet_rpc_api = WalletRpcApi(wallet_node)

        config = load_config(bt.root_path, "config.yaml")
        hostname = config["self_hostname"]
        daemon_port = config["daemon_port"]

        def stop_node_cb():
            pass

        rpc_cleanup = await start_rpc_server(
            wallet_rpc_api,
            hostname,
            daemon_port,
            test_rpc_port,
            stop_node_cb,
            connect_to_daemon=False,
        )

        await time_out_assert(5, wallet.get_confirmed_balance, initial_funds)
        await time_out_assert(5, wallet.get_unconfirmed_balance, initial_funds)

        client = await WalletRpcClient.create("localhost", test_rpc_port)
        try:
            addr = encode_puzzle_hash(await wallet_node_2.wallet_state_manager.
                                      main_wallet.get_new_puzzlehash())
            tx_amount = 15600000
            try:
                await client.send_transaction("1", 100000000000000000, addr)
                raise Exception("Should not create high value tx")
            except ValueError:
                pass

            tx = await client.send_transaction("1", tx_amount, addr)
            transaction_id = tx.name()

            async def tx_in_mempool():
                tx = await client.get_transaction("1", transaction_id)
                return tx.is_in_mempool()

            await time_out_assert(5, tx_in_mempool, True)
            await time_out_assert(5, wallet.get_unconfirmed_balance,
                                  initial_funds - tx_amount)
            assert (
                await client.get_wallet_balance("1")
            )["unconfirmed_wallet_balance"] == initial_funds - tx_amount
            assert (await client.get_wallet_balance("1")
                    )["confirmed_wallet_balance"] == initial_funds

            for i in range(0, 5):
                await client.farm_block(encode_puzzle_hash(ph_2))
                await asyncio.sleep(1)

            async def eventual_balance():
                return (
                    await
                    client.get_wallet_balance("1"))["confirmed_wallet_balance"]

            await time_out_assert(5, eventual_balance,
                                  initial_funds_eventually - tx_amount)

            address = await client.get_next_address("1")
            assert len(address) > 10

            transactions = await client.get_transactions("1")
            assert len(transactions) > 1

            pks = await client.get_public_keys()
            assert len(pks) == 1

            assert (await client.get_height_info()) > 0

            sk_dict = await client.get_private_key(pks[0])
            assert sk_dict["fingerprint"] == pks[0]
            assert sk_dict["sk"] is not None
            assert sk_dict["pk"] is not None
            assert sk_dict["seed"] is not None

            mnemonic = await client.generate_mnemonic()
            assert len(mnemonic) == 24

            await client.add_key(mnemonic)

            pks = await client.get_public_keys()
            assert len(pks) == 2

            await client.log_in_and_skip(pks[1])
            sk_dict = await client.get_private_key(pks[1])
            assert sk_dict["fingerprint"] == pks[1]

            await client.delete_key(pks[0])
            await client.log_in_and_skip(pks[1])
            assert len(await client.get_public_keys()) == 1

            assert not (await client.get_sync_status())

            wallets = await client.get_wallets()
            assert len(wallets) == 1
            balance = await client.get_wallet_balance(wallets[0]["id"])
            assert balance["unconfirmed_wallet_balance"] == 0

            test_wallet_backup_path = Path("test_wallet_backup_file")
            await client.create_backup(test_wallet_backup_path)
            assert test_wallet_backup_path.exists()
            test_wallet_backup_path.unlink()

            try:
                await client.send_transaction(wallets[0]["id"], 100, addr)
                raise Exception("Should not create tx if no balance")
            except ValueError:
                pass

            await client.delete_all_keys()

            assert len(await client.get_public_keys()) == 0
        except Exception:
            # Checks that the RPC manages to stop the node
            client.close()
            await client.await_closed()
            await rpc_cleanup()
            raise

        client.close()
        await client.await_closed()
        await rpc_cleanup()
示例#8
0
    async def block(self,
                    block: peer_protocol.Block) -> OutboundMessageGenerator:
        """
        Receive a full block from a peer full node (or ourselves).
        """
        header_hash = block.block.header_block.header.get_hash()

        # Adds the block to seen, and check if it's seen before
        if self.store.seen_block(header_hash):
            return

        async with self.store.lock:
            if await self.store.get_sync_mode():
                # Add the block to our potential tips list
                await self.store.add_potential_tip(block.block)
                return

            # Tries to add the block to the blockchain
            added: ReceiveBlockResult = await self.blockchain.receive_block(
                block.block)

            # Always immediately add the block to the database, after updating blockchain state
            if (added == ReceiveBlockResult.ADDED_AS_ORPHAN
                    or added == ReceiveBlockResult.ADDED_TO_HEAD):
                await self.store.add_block(block.block)
        if added == ReceiveBlockResult.ALREADY_HAVE_BLOCK:
            return
        elif added == ReceiveBlockResult.INVALID_BLOCK:
            log.warning(
                f"Block {header_hash} at height {block.block.height} is invalid."
            )
            return
        elif added == ReceiveBlockResult.DISCONNECTED_BLOCK:
            log.warning(f"Disconnected block {header_hash}")
            async with self.store.lock:
                tip_height = min([
                    head.height for head in self.blockchain.get_current_tips()
                ])

            if (block.block.height >
                    tip_height + self.config["sync_blocks_behind_threshold"]):
                async with self.store.lock:
                    if await self.store.get_sync_mode():
                        return
                    await self.store.clear_sync_info()
                    await self.store.add_potential_tip(block.block)
                    await self.store.set_sync_mode(True)
                log.info(
                    f"We are too far behind this block. Our height is {tip_height} and block is at "
                    f"{block.block.height}")
                try:
                    # Performs sync, and catch exceptions so we don't close the connection
                    async for ret_msg in self._sync():
                        yield ret_msg
                except asyncio.CancelledError:
                    log.warning("Syncing failed, CancelledError")
                except BaseException as e:
                    log.warning(f"Error {type(e)}{e} with syncing")
                finally:
                    async for ret_msg in self._finish_sync():
                        yield ret_msg

            elif block.block.height >= tip_height - 3:
                log.info(
                    f"We have received a disconnected block at height {block.block.height}, current tip is {tip_height}"
                )
                msg = Message(
                    "request_block",
                    peer_protocol.RequestBlock(block.block.prev_header_hash),
                )
                async with self.store.lock:
                    await self.store.add_disconnected_block(block.block)
                yield OutboundMessage(NodeType.FULL_NODE, msg,
                                      Delivery.RESPOND)
            return
        elif added == ReceiveBlockResult.ADDED_TO_HEAD:
            # Only propagate blocks which extend the blockchain (becomes one of the heads)
            ips_changed: bool = False
            async with self.store.lock:
                log.info(
                    f"Updated heads, new heights: {[b.height for b in self.blockchain.get_current_tips()]}"
                )

                difficulty = self.blockchain.get_next_difficulty(
                    block.block.prev_header_hash)
                next_vdf_ips = self.blockchain.get_next_ips(
                    block.block.header_hash)
                log.info(f"Difficulty {difficulty} IPS {next_vdf_ips}")
                if next_vdf_ips != await self.store.get_proof_of_time_estimate_ips(
                ):
                    await self.store.set_proof_of_time_estimate_ips(
                        next_vdf_ips)
                    ips_changed = True
            if ips_changed:
                rate_update = farmer_protocol.ProofOfTimeRate(next_vdf_ips)
                log.info(f"Sending proof of time rate {next_vdf_ips}")
                yield OutboundMessage(
                    NodeType.FARMER,
                    Message("proof_of_time_rate", rate_update),
                    Delivery.BROADCAST,
                )
                self.store.clear_seen_unfinished_blocks()
                self.store.clear_seen_blocks()

            assert block.block.header_block.proof_of_time
            assert block.block.header_block.challenge
            pos_quality = (block.block.header_block.proof_of_space.
                           verify_and_get_quality())

            farmer_request = farmer_protocol.ProofOfSpaceFinalized(
                block.block.header_block.challenge.get_hash(),
                block.block.height,
                block.block.weight,
                pos_quality,
                difficulty,
            )
            timelord_request = timelord_protocol.ChallengeStart(
                block.block.header_block.challenge.get_hash(),
                block.block.header_block.challenge.total_weight,
            )
            # Tell timelord to stop previous challenge and start with new one
            yield OutboundMessage(
                NodeType.TIMELORD,
                Message("challenge_start", timelord_request),
                Delivery.BROADCAST,
            )

            # Tell full nodes about the new block
            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("block", block),
                Delivery.BROADCAST_TO_OTHERS,
            )

            # Tell farmer about the new block
            yield OutboundMessage(
                NodeType.FARMER,
                Message("proof_of_space_finalized", farmer_request),
                Delivery.BROADCAST,
            )

        elif added == ReceiveBlockResult.ADDED_AS_ORPHAN:
            assert block.block.header_block.proof_of_time
            assert block.block.header_block.challenge
            log.info(
                f"Received orphan block of height {block.block.header_block.challenge.height}"
            )
        else:
            # Should never reach here, all the cases are covered
            assert False
            # Recursively process the next block if we have it

        # This code path is reached if added == ADDED_AS_ORPHAN or ADDED_TO_HEAD
        async with self.store.lock:
            next_block: Optional[
                FullBlock] = await self.store.get_disconnected_block_by_prev(
                    block.block.header_hash)
        if next_block is not None:
            async for ret_msg in self.block(peer_protocol.Block(next_block)):
                yield ret_msg

        async with self.store.lock:
            # Removes all temporary data for old blocks
            lowest_tip = min(tip.height
                             for tip in self.blockchain.get_current_tips())
            clear_height = uint32(max(0, lowest_tip - 30))
            await self.store.clear_candidate_blocks_below(clear_height)
            await self.store.clear_unfinished_blocks_below(clear_height)
            await self.store.clear_disconnected_blocks_below(clear_height)
示例#9
0
    async def new_signage_point_harvester(
        self, new_challenge: harvester_protocol.NewSignagePointHarvester, peer: WSChiaConnection
    ):
        """
        The harvester receives a new signage point from the farmer, this happens at the start of each slot.
        The harvester does a few things:
        1. The harvester applies the plot filter for each of the plots, to select the proportion which are eligible
        for this signage point and challenge.
        2. The harvester gets the qualities for each plot. This is approximately 7 reads per plot which qualifies.
        Note that each plot may have 0, 1, 2, etc qualities for that challenge: but on average it will have 1.
        3. Checks the required_iters for each quality and the given signage point, to see which are eligible for
        inclusion (required_iters < sp_interval_iters).
        4. Looks up the full proof of space in the plot for each quality, approximately 64 reads per quality
        5. Returns the proof of space to the farmer
        """
        if len(self.harvester.pool_public_keys) == 0 or len(self.harvester.farmer_public_keys) == 0:
            # This means that we have not received the handshake yet
            return

        start = time.time()
        assert len(new_challenge.challenge_hash) == 32

        # Refresh plots to see if there are any new ones
        if start - self.harvester.last_load_time > 120:
            await self.harvester.refresh_plots()
            self.harvester.last_load_time = time.time()

        loop = asyncio.get_running_loop()

        def blocking_lookup(filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]:
            # Uses the DiskProver object to lookup qualities. This is a blocking call,
            # so it should be run in a thread pool.
            try:
                sp_challenge_hash = ProofOfSpace.calculate_pos_challenge(
                    plot_info.prover.get_id(),
                    new_challenge.challenge_hash,
                    new_challenge.sp_hash,
                )
                try:
                    quality_strings = plot_info.prover.get_qualities_for_challenge(sp_challenge_hash)
                except Exception as e:
                    self.harvester.log.error(f"Error using prover object {e}")
                    return []

                responses: List[Tuple[bytes32, ProofOfSpace]] = []
                if quality_strings is not None:
                    # Found proofs of space (on average 1 is expected per plot)
                    for index, quality_str in enumerate(quality_strings):
                        required_iters: uint64 = calculate_iterations_quality(
                            self.harvester.constants.DIFFICULTY_CONSTANT_FACTOR,
                            quality_str,
                            plot_info.prover.get_size(),
                            new_challenge.difficulty,
                            new_challenge.sp_hash,
                        )
                        sp_interval_iters = calculate_sp_interval_iters(
                            self.harvester.constants, new_challenge.sub_slot_iters
                        )
                        if required_iters < sp_interval_iters:
                            # Found a very good proof of space! will fetch the whole proof from disk,
                            # then send to farmer
                            try:
                                proof_xs = plot_info.prover.get_full_proof(sp_challenge_hash, index)
                            except RuntimeError:
                                self.harvester.log.error(f"Exception fetching full proof for {filename}")
                                continue

                            # Look up local_sk from plot to save locked memory
                            (
                                pool_public_key_or_puzzle_hash,
                                farmer_public_key,
                                local_master_sk,
                            ) = parse_plot_info(plot_info.prover.get_memo())
                            local_sk = master_sk_to_local_sk(local_master_sk)
                            plot_public_key = ProofOfSpace.generate_plot_public_key(
                                local_sk.get_g1(), farmer_public_key
                            )
                            responses.append(
                                (
                                    quality_str,
                                    ProofOfSpace(
                                        sp_challenge_hash,
                                        plot_info.pool_public_key,
                                        plot_info.pool_contract_puzzle_hash,
                                        plot_public_key,
                                        uint8(plot_info.prover.get_size()),
                                        proof_xs,
                                    ),
                                )
                            )
                return responses
            except Exception as e:
                self.harvester.log.error(f"Unknown error: {e}")
                return []

        async def lookup_challenge(filename: Path, plot_info: PlotInfo) -> List[harvester_protocol.NewProofOfSpace]:
            # Executes a DiskProverLookup in a thread pool, and returns responses
            all_responses: List[harvester_protocol.NewProofOfSpace] = []
            if self.harvester._is_shutdown:
                return []
            proofs_of_space_and_q: List[Tuple[bytes32, ProofOfSpace]] = await loop.run_in_executor(
                self.harvester.executor, blocking_lookup, filename, plot_info
            )
            for quality_str, proof_of_space in proofs_of_space_and_q:
                all_responses.append(
                    harvester_protocol.NewProofOfSpace(
                        new_challenge.challenge_hash,
                        new_challenge.sp_hash,
                        quality_str.hex() + str(filename.resolve()),
                        proof_of_space,
                        new_challenge.signage_point_index,
                    )
                )
            return all_responses

        awaitables = []
        passed = 0
        total = 0
        for try_plot_filename, try_plot_info in self.harvester.provers.items():
            if try_plot_filename.exists():
                # Passes the plot filter (does not check sp filter yet though, since we have not reached sp)
                # This is being executed at the beginning of the slot
                total += 1
                if ProofOfSpace.passes_plot_filter(
                    self.harvester.constants,
                    try_plot_info.prover.get_id(),
                    new_challenge.challenge_hash,
                    new_challenge.sp_hash,
                ):
                    passed += 1
                    awaitables.append(lookup_challenge(try_plot_filename, try_plot_info))

        # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism
        total_proofs_found = 0
        for sublist_awaitable in asyncio.as_completed(awaitables):
            for response in await sublist_awaitable:
                total_proofs_found += 1
                msg = make_msg(ProtocolMessageTypes.new_proof_of_space, response)
                await peer.send_message(msg)

        now = uint64(int(time.time()))
        farming_info = FarmingInfo(
            new_challenge.challenge_hash,
            new_challenge.sp_hash,
            now,
            uint32(passed),
            uint32(total_proofs_found),
            uint32(total),
        )
        pass_msg = make_msg(ProtocolMessageTypes.farming_info, farming_info)
        await peer.send_message(pass_msg)
        self.harvester.log.info(
            f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..."
            f" Found {total_proofs_found} proofs. Time: {time.time() - start:.5f} s. "
            f"Total {len(self.harvester.provers)} plots"
        )
示例#10
0
    async def _validate_transactions(self, block: FullBlock,
                                     fee_base: uint64) -> Optional[Err]:
        # TODO(straya): review, further test the code, and number all the validation steps

        # 1. Check that transactions generator is present
        if not block.transactions_generator:
            return Err.UNKNOWN
        # Get List of names removed, puzzles hashes for removed coins and conditions crated
        error, npc_list, cost = calculate_cost_of_program(
            block.transactions_generator,
            self.constants.CLVM_COST_RATIO_CONSTANT)

        # 2. Check that cost <= MAX_BLOCK_COST_CLVM
        if cost > self.constants.MAX_BLOCK_COST_CLVM:
            return Err.BLOCK_COST_EXCEEDS_MAX
        if error:
            return error

        prev_header: Header
        if block.prev_header_hash in self.headers:
            prev_header = self.headers[block.prev_header_hash]
        else:
            return Err.EXTENDS_UNKNOWN_BLOCK

        removals: List[bytes32] = []
        removals_puzzle_dic: Dict[bytes32, bytes32] = {}
        for npc in npc_list:
            removals.append(npc.coin_name)
            removals_puzzle_dic[npc.coin_name] = npc.puzzle_hash

        additions: List[Coin] = additions_for_npc(npc_list)
        additions_dic: Dict[bytes32, Coin] = {}
        # Check additions for max coin amount
        for coin in additions:
            additions_dic[coin.name()] = coin
            if coin.amount >= self.constants.MAX_COIN_AMOUNT:
                return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM

        # Validate addition and removal roots
        root_error = self._validate_merkle_root(block, additions, removals)
        if root_error:
            return root_error

        # Validate filter
        byte_array_tx: List[bytes32] = []

        for coin in additions:
            byte_array_tx.append(bytearray(coin.puzzle_hash))
        for coin_name in removals:
            byte_array_tx.append(bytearray(coin_name))

        byte_array_tx.append(
            bytearray(block.header.data.farmer_rewards_puzzle_hash))
        byte_array_tx.append(
            bytearray(block.header.data.pool_target.puzzle_hash))

        bip158: PyBIP158 = PyBIP158(byte_array_tx)
        encoded_filter = bytes(bip158.GetEncoded())
        filter_hash = std_hash(encoded_filter)

        if filter_hash != block.header.data.filter_hash:
            return Err.INVALID_TRANSACTIONS_FILTER_HASH

        # Watch out for duplicate outputs
        addition_counter = collections.Counter(_.name() for _ in additions)
        for k, v in addition_counter.items():
            if v > 1:
                return Err.DUPLICATE_OUTPUT

        # Check for duplicate spends inside block
        removal_counter = collections.Counter(removals)
        for k, v in removal_counter.items():
            if v > 1:
                return Err.DOUBLE_SPEND

        # Check if removals exist and were not previously spend. (unspent_db + diff_store + this_block)
        fork_h = find_fork_point_in_chain(self.headers, self.lca_block,
                                          block.header)

        # Get additions and removals since (after) fork_h but not including this block
        additions_since_fork: Dict[bytes32, Tuple[Coin, uint32]] = {}
        removals_since_fork: Set[bytes32] = set()
        coinbases_since_fork: Dict[bytes32, uint32] = {}
        curr: Optional[FullBlock] = await self.block_store.get_block(
            block.prev_header_hash)
        assert curr is not None

        while curr.height > fork_h:
            removals_in_curr, additions_in_curr = await curr.tx_removals_and_additions(
            )
            for c_name in removals_in_curr:
                removals_since_fork.add(c_name)
            for c in additions_in_curr:
                additions_since_fork[c.name()] = (c, curr.height)

            coinbase_coin = curr.get_coinbase()
            fees_coin = curr.get_fees_coin()
            additions_since_fork[coinbase_coin.name()] = (
                coinbase_coin,
                curr.height,
            )
            additions_since_fork[fees_coin.name()] = (
                fees_coin,
                curr.height,
            )
            coinbases_since_fork[coinbase_coin.name()] = curr.height
            coinbases_since_fork[fees_coin.name()] = curr.height
            curr = await self.block_store.get_block(curr.prev_header_hash)
            assert curr is not None

        removal_coin_records: Dict[bytes32, CoinRecord] = {}
        for rem in removals:
            if rem in additions_dic:
                # Ephemeral coin
                rem_coin: Coin = additions_dic[rem]
                new_unspent: CoinRecord = CoinRecord(rem_coin, block.height,
                                                     uint32(0), False, False)
                removal_coin_records[new_unspent.name] = new_unspent
            else:
                assert prev_header is not None
                unspent = await self.coin_store.get_coin_record(
                    rem, prev_header)
                if unspent is not None and unspent.confirmed_block_index <= fork_h:
                    # Spending something in the current chain, confirmed before fork
                    # (We ignore all coins confirmed after fork)
                    if unspent.spent == 1 and unspent.spent_block_index <= fork_h:
                        # Spend in an ancestor block, so this is a double spend
                        return Err.DOUBLE_SPEND
                    # If it's a coinbase, check that it's not frozen
                    if unspent.coinbase == 1:
                        if (block.height < unspent.confirmed_block_index +
                                self.coinbase_freeze):
                            return Err.COINBASE_NOT_YET_SPENDABLE
                    removal_coin_records[unspent.name] = unspent
                else:
                    # This coin is not in the current heaviest chain, so it must be in the fork
                    if rem not in additions_since_fork:
                        # This coin does not exist in the fork
                        # TODO: fix this, there is a consensus bug here
                        return Err.UNKNOWN_UNSPENT
                    if rem in coinbases_since_fork:
                        # This coin is a coinbase coin
                        if (block.height < coinbases_since_fork[rem] +
                                self.coinbase_freeze):
                            return Err.COINBASE_NOT_YET_SPENDABLE
                    new_coin, confirmed_height = additions_since_fork[rem]
                    new_coin_record: CoinRecord = CoinRecord(
                        new_coin,
                        confirmed_height,
                        uint32(0),
                        False,
                        (rem in coinbases_since_fork),
                    )
                    removal_coin_records[
                        new_coin_record.name] = new_coin_record

                # This check applies to both coins created before fork (pulled from coin_store),
                # and coins created after fork (additions_since_fork)>
                if rem in removals_since_fork:
                    # This coin was spent in the fork
                    return Err.DOUBLE_SPEND

        # Check fees
        removed = 0
        for unspent in removal_coin_records.values():
            removed += unspent.coin.amount

        added = 0
        for coin in additions:
            added += coin.amount

        if removed < added:
            return Err.MINTING_COIN

        fees = removed - added
        assert_fee_sum: uint64 = uint64(0)

        for npc in npc_list:
            if ConditionOpcode.ASSERT_FEE in npc.condition_dict:
                fee_list: List[ConditionVarPair] = npc.condition_dict[
                    ConditionOpcode.ASSERT_FEE]
                for cvp in fee_list:
                    fee = int_from_bytes(cvp.var1)
                    assert_fee_sum = assert_fee_sum + fee

        if fees < assert_fee_sum:
            return Err.ASSERT_FEE_CONDITION_FAILED

        # Check coinbase reward
        if fees + fee_base != block.header.data.total_transaction_fees:
            return Err.BAD_COINBASE_REWARD

        # Verify that removed coin puzzle_hashes match with calculated puzzle_hashes
        for unspent in removal_coin_records.values():
            if unspent.coin.puzzle_hash != removals_puzzle_dic[unspent.name]:
                return Err.WRONG_PUZZLE_HASH

        # Verify conditions, create hash_key list for aggsig check
        pool_target_m = bytes(block.header.data.pool_target)

        # The pool signature on the pool target is checked here as well, since the pool signature is
        # aggregated along with the transaction signatures
        pairs_pks = [block.proof_of_space.pool_public_key]
        pairs_msgs = [pool_target_m]
        for npc in npc_list:
            unspent = removal_coin_records[npc.coin_name]
            error = blockchain_check_conditions_dict(
                unspent,
                removal_coin_records,
                npc.condition_dict,
                block.header,
            )
            if error:
                return error
            for pk, m in pkm_pairs_for_conditions_dict(npc.condition_dict,
                                                       npc.coin_name):
                pairs_pks.append(pk)
                pairs_msgs.append(m)

        # Verify aggregated signature
        # TODO: move this to pre_validate_blocks_multiprocessing so we can sync faster
        if not block.header.data.aggregated_signature:
            return Err.BAD_AGGREGATE_SIGNATURE

        validates = AugSchemeMPL.aggregate_verify(
            pairs_pks, pairs_msgs, block.header.data.aggregated_signature)
        if not validates:
            return Err.BAD_AGGREGATE_SIGNATURE

        return None
示例#11
0
    async def _sync(self) -> OutboundMessageGenerator:
        """
        Performs a full sync of the blockchain.
            - Check which are the heaviest tips
            - Request headers for the heaviest
            - Verify the weight of the tip, using the headers
            - Find the fork point to see where to start downloading blocks
            - Blacklist peers that provide invalid blocks
            - Sync blockchain up to heads (request blocks in batches)
        """
        log.info("Starting to perform sync with peers.")
        log.info("Waiting to receive tips from peers.")
        # TODO: better way to tell that we have finished receiving tips
        await asyncio.sleep(5)
        highest_weight: uint64 = uint64(0)
        tip_block: FullBlock
        tip_height = 0

        # Based on responses from peers about the current heads, see which head is the heaviest
        # (similar to longest chain rule).
        async with self.store.lock:
            potential_tips: List[Tuple[
                bytes32,
                FullBlock]] = await self.store.get_potential_tips_tuples()
            log.info(f"Have collected {len(potential_tips)} potential tips")
            for header_hash, potential_tip_block in potential_tips:
                if potential_tip_block.header_block.challenge is None:
                    raise ValueError(
                        f"Invalid tip block {potential_tip_block.header_hash} received"
                    )
                if (potential_tip_block.header_block.challenge.total_weight >
                        highest_weight):
                    highest_weight = (potential_tip_block.header_block.
                                      challenge.total_weight)
                    tip_block = potential_tip_block
                    tip_height = potential_tip_block.header_block.challenge.height
            if highest_weight <= max(
                [t.weight for t in self.blockchain.get_current_tips()]):
                log.info("Not performing sync, already caught up.")
                return

        assert tip_block
        log.info(
            f"Tip block {tip_block.header_hash} tip height {tip_block.height}")

        for height in range(0, tip_block.height + 1):
            self.store.set_potential_headers_received(uint32(height), Event())
            self.store.set_potential_blocks_received(uint32(height), Event())
            self.store.set_potential_hashes_received(Event())

        timeout = 200
        sleep_interval = 10
        total_time_slept = 0

        while True:
            if total_time_slept > timeout:
                raise TimeoutError("Took too long to fetch header hashes.")
            if self._shut_down:
                return
            # Download all the header hashes and find the fork point
            request = peer_protocol.RequestAllHeaderHashes(
                tip_block.header_hash)
            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_all_header_hashes", request),
                Delivery.RANDOM,
            )
            try:
                await asyncio.wait_for(
                    self.store.get_potential_hashes_received().wait(),
                    timeout=sleep_interval,
                )
                break
            except concurrent.futures.TimeoutError:
                total_time_slept += sleep_interval
                log.warning("Did not receive desired header hashes")

        # Finding the fork point allows us to only download headers and blocks from the fork point
        async with self.store.lock:
            header_hashes = self.store.get_potential_hashes()
            fork_point_height: uint32 = self.blockchain.find_fork_point(
                header_hashes)
            fork_point_hash: bytes32 = header_hashes[fork_point_height]
        log.info(
            f"Fork point: {fork_point_hash} at height {fork_point_height}")

        # Now, we download all of the headers in order to verify the weight, in batches
        headers: List[HeaderBlock] = []

        # Download headers in batches. We download a few batches ahead in case there are delays or peers
        # that don't have the headers that we need.
        last_request_time: float = 0
        highest_height_requested: uint32 = uint32(0)
        request_made: bool = False
        for height_checkpoint in range(fork_point_height + 1, tip_height + 1,
                                       self.config["max_headers_to_send"]):
            end_height = min(
                height_checkpoint + self.config["max_headers_to_send"],
                tip_height + 1)

            total_time_slept = 0
            while True:
                if self._shut_down:
                    return
                if total_time_slept > timeout:
                    raise TimeoutError("Took too long to fetch blocks")

                # Request batches that we don't have yet
                for batch in range(0, self.config["num_sync_batches"]):
                    batch_start = (height_checkpoint +
                                   batch * self.config["max_headers_to_send"])
                    batch_end = min(
                        batch_start + self.config["max_headers_to_send"],
                        tip_height + 1)

                    if batch_start > tip_height:
                        # We have asked for all blocks
                        break

                    blocks_missing = any([
                        not (self.store.get_potential_headers_received(
                            uint32(h))).is_set()
                        for h in range(batch_start, batch_end)
                    ])
                    if (time.time() - last_request_time > sleep_interval
                            and blocks_missing
                        ) or (batch_end - 1) > highest_height_requested:
                        # If we are missing header blocks in this batch, and we haven't made a request in a while,
                        # Make a request for this batch. Also, if we have never requested this batch, make
                        # the request
                        if batch_end - 1 > highest_height_requested:
                            highest_height_requested = batch_end - 1

                        request_made = True
                        request_hb = peer_protocol.RequestHeaderBlocks(
                            tip_block.header_block.header.get_hash(),
                            [uint32(h) for h in range(batch_start, batch_end)],
                        )
                        log.info(
                            f"Requesting header blocks {batch_start, batch_end}."
                        )
                        yield OutboundMessage(
                            NodeType.FULL_NODE,
                            Message("request_header_blocks", request_hb),
                            Delivery.RANDOM,
                        )
                if request_made:
                    # Reset the timer for requests, so we don't overload other peers with requests
                    last_request_time = time.time()
                    request_made = False

                # Wait for the first batch (the next "max_blocks_to_send" blocks to arrive)
                awaitables = [
                    (self.store.get_potential_headers_received(
                        uint32(height))).wait()
                    for height in range(height_checkpoint, end_height)
                ]
                future = asyncio.gather(*awaitables, return_exceptions=True)
                try:
                    await asyncio.wait_for(future, timeout=sleep_interval)
                    break
                except concurrent.futures.TimeoutError:
                    try:
                        await future
                    except asyncio.CancelledError:
                        pass
                    total_time_slept += sleep_interval
                    log.info(f"Did not receive desired header blocks")

        async with self.store.lock:
            for h in range(fork_point_height + 1, tip_height + 1):
                header = self.store.get_potential_header(uint32(h))
                assert header is not None
                headers.append(header)

        log.error(f"Downloaded headers up to tip height: {tip_height}")
        if not verify_weight(
                tip_block.header_block,
                headers,
                self.blockchain.header_blocks[fork_point_hash],
        ):
            raise errors.InvalidWeight(
                f"Weight of {tip_block.header_block.header.get_hash()} not valid."
            )

        log.error(
            f"Validated weight of headers. Downloaded {len(headers)} headers, tip height {tip_height}"
        )
        assert tip_height == fork_point_height + len(headers)

        # Download blocks in batches, and verify them as they come in. We download a few batches ahead,
        # in case there are delays.
        last_request_time = 0
        highest_height_requested = uint32(0)
        request_made = False
        for height_checkpoint in range(fork_point_height + 1, tip_height + 1,
                                       self.config["max_blocks_to_send"]):
            end_height = min(
                height_checkpoint + self.config["max_blocks_to_send"],
                tip_height + 1)

            total_time_slept = 0
            while True:
                if self._shut_down:
                    return
                if total_time_slept > timeout:
                    raise TimeoutError("Took too long to fetch blocks")

                # Request batches that we don't have yet
                for batch in range(0, self.config["num_sync_batches"]):
                    batch_start = (height_checkpoint +
                                   batch * self.config["max_blocks_to_send"])
                    batch_end = min(
                        batch_start + self.config["max_blocks_to_send"],
                        tip_height + 1)

                    if batch_start > tip_height:
                        # We have asked for all blocks
                        break

                    blocks_missing = any([
                        not (self.store.get_potential_blocks_received(
                            uint32(h))).is_set()
                        for h in range(batch_start, batch_end)
                    ])
                    if (time.time() - last_request_time > sleep_interval
                            and blocks_missing
                        ) or (batch_end - 1) > highest_height_requested:
                        # If we are missing blocks in this batch, and we haven't made a request in a while,
                        # Make a request for this batch. Also, if we have never requested this batch, make
                        # the request
                        log.info(
                            f"Requesting sync blocks {[i for i in range(batch_start, batch_end)]}"
                        )
                        if batch_end - 1 > highest_height_requested:
                            highest_height_requested = batch_end - 1
                        request_made = True
                        request_sync = peer_protocol.RequestSyncBlocks(
                            tip_block.header_block.header.header_hash,
                            [
                                uint32(height)
                                for height in range(batch_start, batch_end)
                            ],
                        )
                        yield OutboundMessage(
                            NodeType.FULL_NODE,
                            Message("request_sync_blocks", request_sync),
                            Delivery.RANDOM,
                        )
                if request_made:
                    # Reset the timer for requests, so we don't overload other peers with requests
                    last_request_time = time.time()
                    request_made = False

                # Wait for the first batch (the next "max_blocks_to_send" blocks to arrive)
                awaitables = [
                    (self.store.get_potential_blocks_received(
                        uint32(height))).wait()
                    for height in range(height_checkpoint, end_height)
                ]
                future = asyncio.gather(*awaitables, return_exceptions=True)
                try:
                    await asyncio.wait_for(future, timeout=sleep_interval)
                    break
                except concurrent.futures.TimeoutError:
                    try:
                        await future
                    except asyncio.CancelledError:
                        pass
                    total_time_slept += sleep_interval
                    log.info("Did not receive desired blocks")

            # Verifies this batch, which we are guaranteed to have (since we broke from the above loop)
            for height in range(height_checkpoint, end_height):
                if self._shut_down:
                    return
                block: Optional[
                    FullBlock] = await self.store.get_potential_block(
                        uint32(height))
                assert block is not None
                start = time.time()
                async with self.store.lock:
                    # The block gets permanantly added to the blockchain
                    result = await self.blockchain.receive_block(block)
                    if (result == ReceiveBlockResult.INVALID_BLOCK or result
                            == ReceiveBlockResult.DISCONNECTED_BLOCK):
                        raise RuntimeError(
                            f"Invalid block {block.header_hash}")
                    log.info(
                        f"Took {time.time() - start} seconds to validate and add block {block.height}."
                    )
                    # Always immediately add the block to the database, after updating blockchain state
                    await self.store.add_block(block)
                    assert (max([
                        h.height for h in self.blockchain.get_current_tips()
                    ]) >= height)
                    await self.store.set_proof_of_time_estimate_ips(
                        self.blockchain.get_next_ips(block.header_hash))
        assert max([h.height
                    for h in self.blockchain.get_current_tips()]) == tip_height
        log.info(f"Finished sync up to height {tip_height}")
示例#12
0
    async def create_new_cc(
        wallet_state_manager: Any,
        wallet: Wallet,
        amount: uint64,
    ):
        self = CCWallet()
        self.cost_of_single_tx = None
        self.base_puzzle_program = None
        self.base_inner_puzzle_hash = None
        self.standard_wallet = wallet
        self.log = logging.getLogger(__name__)

        self.wallet_state_manager = wallet_state_manager

        self.cc_info = CCInfo(None, [])
        info_as_string = bytes(self.cc_info).hex()
        self.wallet_info = await wallet_state_manager.user_store.create_wallet(
            "CC Wallet", WalletType.COLOURED_COIN, info_as_string)
        if self.wallet_info is None:
            raise ValueError("Internal Error")

        try:
            spend_bundle = await self.generate_new_coloured_coin(amount)
        except Exception:
            await wallet_state_manager.user_store.delete_wallet(self.id())
            raise

        await self.wallet_state_manager.add_new_wallet(self, self.id())

        # Change and actual coloured coin
        non_ephemeral_spends: List[
            Coin] = spend_bundle.not_ephemeral_additions()
        cc_coin = None
        puzzle_store = self.wallet_state_manager.puzzle_store

        for c in non_ephemeral_spends:
            info = await puzzle_store.wallet_info_for_puzzle_hash(c.puzzle_hash
                                                                  )
            if info is None:
                raise ValueError("Internal Error")
            id, wallet_type = info
            if id == self.id():
                cc_coin = c

        if cc_coin is None:
            raise ValueError(
                "Internal Error, unable to generate new coloured coin")

        regular_record = TransactionRecord(
            confirmed_at_height=uint32(0),
            created_at_time=uint64(int(time.time())),
            to_puzzle_hash=cc_coin.puzzle_hash,
            amount=uint64(cc_coin.amount),
            fee_amount=uint64(0),
            confirmed=False,
            sent=uint32(0),
            spend_bundle=spend_bundle,
            additions=spend_bundle.additions(),
            removals=spend_bundle.removals(),
            wallet_id=self.wallet_state_manager.main_wallet.id(),
            sent_to=[],
            trade_id=None,
            type=uint32(TransactionType.OUTGOING_TX.value),
            name=token_bytes(),
        )
        cc_record = TransactionRecord(
            confirmed_at_height=uint32(0),
            created_at_time=uint64(int(time.time())),
            to_puzzle_hash=cc_coin.puzzle_hash,
            amount=uint64(cc_coin.amount),
            fee_amount=uint64(0),
            confirmed=False,
            sent=uint32(10),
            spend_bundle=None,
            additions=spend_bundle.additions(),
            removals=spend_bundle.removals(),
            wallet_id=self.id(),
            sent_to=[],
            trade_id=None,
            type=uint32(TransactionType.INCOMING_TX.value),
            name=token_bytes(),
        )
        await self.standard_wallet.push_transaction(regular_record)
        await self.standard_wallet.push_transaction(cc_record)
        return self
示例#13
0
    async def generate_signed_transaction(
        self,
        amounts: List[uint64],
        puzzle_hashes: List[bytes32],
        fee: uint64 = uint64(0),
        origin_id: bytes32 = None,
        coins: Set[Coin] = None,
        ignore_max_send_amount: bool = False,
    ) -> TransactionRecord:
        # Get coins and calculate amount of change required
        outgoing_amount = uint64(sum(amounts))
        total_outgoing = outgoing_amount + fee

        if not ignore_max_send_amount:
            max_send = await self.get_max_send_amount()
            if total_outgoing > max_send:
                raise ValueError(
                    f"Can't send more than {max_send} in a single transaction")

        if coins is None:
            selected_coins: Set[Coin] = await self.select_coins(
                uint64(total_outgoing))
        else:
            selected_coins = coins

        total_amount = sum([x.amount for x in selected_coins])
        change = total_amount - total_outgoing
        primaries = []
        for amount, puzzle_hash in zip(amounts, puzzle_hashes):
            primaries.append({"puzzlehash": puzzle_hash, "amount": amount})

        if change > 0:
            changepuzzlehash = await self.get_new_inner_hash()
            primaries.append({
                "puzzlehash": changepuzzlehash,
                "amount": change
            })

        coin = list(selected_coins)[0]
        inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)

        if self.cc_info.my_genesis_checker is None:
            raise ValueError("My genesis checker is None")

        genesis_id = genesis_coin_id_for_genesis_coin_checker(
            self.cc_info.my_genesis_checker)

        spendable_cc_list = []
        innersol_list = []
        sigs: List[G2Element] = []
        first = True
        for coin in selected_coins:
            coin_inner_puzzle = await self.inner_puzzle_for_cc_puzhash(
                coin.puzzle_hash)
            if first:
                first = False
                if fee > 0:
                    innersol = self.standard_wallet.make_solution(
                        primaries=primaries, fee=fee)
                else:
                    innersol = self.standard_wallet.make_solution(
                        primaries=primaries)
            else:
                innersol = self.standard_wallet.make_solution()
            innersol_list.append(innersol)
            lineage_proof = await self.get_lineage_proof_for_coin(coin)
            assert lineage_proof is not None
            spendable_cc_list.append(
                SpendableCC(coin, genesis_id, inner_puzzle, lineage_proof))
            sigs = sigs + await self.get_sigs(coin_inner_puzzle, innersol,
                                              coin.name())

        spend_bundle = spend_bundle_for_spendable_ccs(
            CC_MOD,
            self.cc_info.my_genesis_checker,
            spendable_cc_list,
            innersol_list,
            sigs,
        )
        # TODO add support for array in stored records
        return TransactionRecord(
            confirmed_at_height=uint32(0),
            created_at_time=uint64(int(time.time())),
            to_puzzle_hash=puzzle_hashes[0],
            amount=uint64(outgoing_amount),
            fee_amount=uint64(0),
            confirmed=False,
            sent=uint32(0),
            spend_bundle=spend_bundle,
            additions=spend_bundle.additions(),
            removals=spend_bundle.removals(),
            wallet_id=self.id(),
            sent_to=[],
            trade_id=None,
            type=uint32(TransactionType.OUTGOING_TX.value),
            name=spend_bundle.name(),
        )
示例#14
0
    async def generate_zero_val_coin(self,
                                     send=True,
                                     exclude: List[Coin] = None
                                     ) -> SpendBundle:
        if self.cc_info.my_genesis_checker is None:
            raise ValueError("My genesis checker is None")
        if exclude is None:
            exclude = []
        coins = await self.standard_wallet.select_coins(0, exclude)

        assert coins != set()

        origin = coins.copy().pop()
        origin_id = origin.name()

        cc_inner = await self.get_new_inner_hash()
        cc_puzzle_hash: Program = cc_puzzle_hash_for_inner_puzzle_hash(
            CC_MOD, self.cc_info.my_genesis_checker, cc_inner)

        tx: TransactionRecord = await self.standard_wallet.generate_signed_transaction(
            uint64(0), cc_puzzle_hash, uint64(0), origin_id, coins)
        assert tx.spend_bundle is not None
        full_spend: SpendBundle = tx.spend_bundle
        self.log.info(
            f"Generate zero val coin: cc_puzzle_hash is {cc_puzzle_hash}")

        # generate eve coin so we can add future lineage_proofs even if we don't eve spend
        eve_coin = Coin(origin_id, cc_puzzle_hash, uint64(0))

        await self.add_lineage(
            eve_coin.name(),
            Program.to((
                1,
                [eve_coin.parent_coin_info, cc_inner, eve_coin.amount],
            )),
        )
        await self.add_lineage(eve_coin.parent_coin_info,
                               Program.to((0, [origin.as_list(), 1])))

        if send:
            regular_record = TransactionRecord(
                confirmed_at_height=uint32(0),
                created_at_time=uint64(int(time.time())),
                to_puzzle_hash=cc_puzzle_hash,
                amount=uint64(0),
                fee_amount=uint64(0),
                confirmed=False,
                sent=uint32(10),
                spend_bundle=full_spend,
                additions=full_spend.additions(),
                removals=full_spend.removals(),
                wallet_id=uint32(1),
                sent_to=[],
                trade_id=None,
                type=uint32(TransactionType.INCOMING_TX.value),
                name=token_bytes(),
            )
            cc_record = TransactionRecord(
                confirmed_at_height=uint32(0),
                created_at_time=uint64(int(time.time())),
                to_puzzle_hash=cc_puzzle_hash,
                amount=uint64(0),
                fee_amount=uint64(0),
                confirmed=False,
                sent=uint32(0),
                spend_bundle=full_spend,
                additions=full_spend.additions(),
                removals=full_spend.removals(),
                wallet_id=self.id(),
                sent_to=[],
                trade_id=None,
                type=uint32(TransactionType.INCOMING_TX.value),
                name=full_spend.name(),
            )
            await self.wallet_state_manager.add_transaction(regular_record)
            await self.wallet_state_manager.add_pending_transaction(cc_record)

        return full_spend
    async def test_cc_max_amount_send(self, two_wallet_nodes):
        num_blocks = 3
        full_nodes, wallets = two_wallet_nodes
        full_node_api = full_nodes[0]
        full_node_server = full_node_api.server
        wallet_node, server_2 = wallets[0]
        wallet_node_2, server_3 = wallets[1]
        wallet = wallet_node.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
        await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)

        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        funds = sum(
            [
                calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
                for i in range(1, num_blocks - 1)
            ]
        )

        await time_out_assert(15, wallet.get_confirmed_balance, funds)

        cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100000))
        tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.get_send_queue()
        tx_record = tx_queue[0]
        await time_out_assert(
            15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
        )
        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 100000)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100000)

        assert cc_wallet.cc_info.my_genesis_checker is not None

        cc_2_hash = await cc_wallet.get_new_inner_hash()
        amounts = []
        puzzle_hashes = []
        for i in range(1, 50):
            amounts.append(uint64(i))
            puzzle_hashes.append(cc_2_hash)
        spent_coint = (await cc_wallet.get_cc_spendable_coins())[0].coin
        tx_record = await cc_wallet.generate_signed_transaction(amounts, puzzle_hashes, coins={spent_coint})
        await wallet.wallet_state_manager.add_pending_transaction(tx_record)

        await time_out_assert(
            15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
        )

        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        await asyncio.sleep(2)

        async def check_all_there():
            spendable = await cc_wallet.get_cc_spendable_coins()
            spendable_name_set = set()
            for record in spendable:
                spendable_name_set.add(record.coin.name())
            puzzle_hash = cc_puzzle_hash_for_inner_puzzle_hash(CC_MOD, cc_wallet.cc_info.my_genesis_checker, cc_2_hash)
            for i in range(1, 50):
                coin = Coin(spent_coint.name(), puzzle_hash, i)
                if coin.name() not in spendable_name_set:
                    return False
            return True

        await time_out_assert(15, check_all_there, True)
        await asyncio.sleep(5)
        max_sent_amount = await cc_wallet.get_max_send_amount()

        # 1) Generate transaction that is under the limit
        under_limit_tx = None
        try:
            under_limit_tx = await cc_wallet.generate_signed_transaction(
                [max_sent_amount - 1],
                [ph],
            )
        except ValueError:
            assert ValueError

        assert under_limit_tx is not None

        # 2) Generate transaction that is equal to limit
        at_limit_tx = None
        try:
            at_limit_tx = await cc_wallet.generate_signed_transaction(
                [max_sent_amount],
                [ph],
            )
        except ValueError:
            assert ValueError

        assert at_limit_tx is not None

        # 3) Generate transaction that is greater than limit
        above_limit_tx = None
        try:
            above_limit_tx = await cc_wallet.generate_signed_transaction(
                [max_sent_amount + 1],
                [ph],
            )
        except ValueError:
            pass

        assert above_limit_tx is None
示例#16
0
    async def respond_proof_of_space(
            self, response: harvester_protocol.RespondProofOfSpace):
        """
        This is a response from the harvester with a proof of space. We check it's validity,
        and request a pool partial, a header signature, or both, if the proof is good enough.
        """

        pool_sks: List[PrivateKey] = [
            PrivateKey.from_bytes(bytes.fromhex(ce))
            for ce in self.key_config["pool_sks"]
        ]
        if response.proof.pool_pubkey not in [
                sk.get_public_key() for sk in pool_sks
        ]:
            raise RuntimeError("Pool pubkey not in list of approved keys")

        challenge_hash: bytes32 = self.harvester_responses_challenge[
            response.quality_string]
        challenge_weight: uint128 = self.challenge_to_weight[challenge_hash]
        challenge_height: uint32 = self.challenge_to_height[challenge_hash]
        new_proof_height: uint32 = uint32(challenge_height + 1)
        difficulty: uint64 = uint64(0)
        for posf in self.challenges[challenge_weight]:
            if posf.challenge_hash == challenge_hash:
                difficulty = posf.difficulty
        if difficulty == 0:
            raise RuntimeError("Did not find challenge")

        computed_quality_string = response.proof.verify_and_get_quality_string(
        )
        if response.quality_string != computed_quality_string:
            raise RuntimeError("Invalid quality for proof of space")

        self.harvester_responses_proofs[
            response.quality_string] = response.proof
        self.harvester_responses_proof_hash_to_qual[
            response.proof.get_hash()] = response.quality_string

        estimate_min = (self.proof_of_time_estimate_ips *
                        self.constants["BLOCK_TIME_TARGET"] /
                        self.constants["MIN_ITERS_PROPORTION"])
        number_iters: uint64 = calculate_iterations_quality(
            computed_quality_string,
            response.proof.size,
            difficulty,
            estimate_min,
        )
        estimate_secs: float = number_iters / self.proof_of_time_estimate_ips

        if estimate_secs < self.config["pool_share_threshold"]:
            request1 = harvester_protocol.RequestPartialProof(
                response.quality_string,
                bytes.fromhex(self.key_config["wallet_target"]),
            )
            yield OutboundMessage(
                NodeType.HARVESTER,
                Message("request_partial_proof", request1),
                Delivery.RESPOND,
            )
        if estimate_secs < self.config["propagate_threshold"]:
            if new_proof_height not in self.coinbase_rewards:
                log.error(
                    f"Don't have coinbase transaction for height {new_proof_height}, cannot submit PoS"
                )
                return

            coinbase, signature = self.coinbase_rewards[new_proof_height]
            request2 = farmer_protocol.RequestHeaderHash(
                challenge_hash,
                coinbase,
                signature,
                bytes.fromhex(self.key_config["wallet_target"]),
                response.proof,
            )

            yield OutboundMessage(
                NodeType.FULL_NODE,
                Message("request_header_hash", request2),
                Delivery.BROADCAST,
            )
    async def test_cc_spend(self, two_wallet_nodes):
        num_blocks = 3
        full_nodes, wallets = two_wallet_nodes
        full_node_api = full_nodes[0]
        full_node_server = full_node_api.server
        wallet_node, server_2 = wallets[0]
        wallet_node_2, server_3 = wallets[1]
        wallet = wallet_node.wallet_state_manager.main_wallet
        wallet2 = wallet_node_2.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
        await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)

        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        funds = sum(
            [
                calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
                for i in range(1, num_blocks - 1)
            ]
        )

        await time_out_assert(15, wallet.get_confirmed_balance, funds)

        cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
        tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.get_send_queue()
        tx_record = tx_queue[0]
        await time_out_assert(
            15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
        )
        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)

        assert cc_wallet.cc_info.my_genesis_checker is not None
        colour = cc_wallet.get_colour()

        cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_2.wallet_state_manager, wallet2, colour)

        assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker

        cc_2_hash = await cc_wallet_2.get_new_inner_hash()
        tx_record = await cc_wallet.generate_signed_transaction([uint64(60)], [cc_2_hash])
        await wallet.wallet_state_manager.add_pending_transaction(tx_record)

        await time_out_assert(
            15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
        )

        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 40)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 40)

        await time_out_assert(30, cc_wallet_2.get_confirmed_balance, 60)
        await time_out_assert(30, cc_wallet_2.get_unconfirmed_balance, 60)

        cc_hash = await cc_wallet.get_new_inner_hash()
        tx_record = await cc_wallet_2.generate_signed_transaction([uint64(15)], [cc_hash])
        await wallet.wallet_state_manager.add_pending_transaction(tx_record)

        await time_out_assert(
            15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
        )

        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 55)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 55)
示例#18
0
    async def proof_of_space_finalized(
            self,
            proof_of_space_finalized: farmer_protocol.ProofOfSpaceFinalized):
        """
        Full node notifies farmer that a proof of space has been completed. It gets added to the
        challenges list at that weight, and weight is updated if necessary
        """
        get_proofs: bool = False
        if (proof_of_space_finalized.weight >= self.current_weight
                and proof_of_space_finalized.challenge_hash
                not in self.seen_challenges):
            # Only get proofs for new challenges, at a current or new weight
            get_proofs = True
            if proof_of_space_finalized.weight > self.current_weight:
                self.current_weight = proof_of_space_finalized.weight

            # TODO: ask the pool for this information

            pool_sks: List[PrivateKey] = [
                PrivateKey.from_bytes(bytes.fromhex(ce))  # type: ignore # noqa
                for ce in self.key_config["pool_sks"]
            ]

            coinbase_reward = uint64(
                calculate_block_reward(proof_of_space_finalized.height))

            coinbase_coin, coinbase_signature = create_coinbase_coin_and_signature(
                proof_of_space_finalized.height + 1,
                bytes.fromhex(self.key_config["pool_target"]),
                coinbase_reward,
                pool_sks[0],
            )

            self.coinbase_rewards[uint32(proof_of_space_finalized.height +
                                         1)] = (
                                             coinbase_coin,
                                             coinbase_signature,
                                         )

            log.info(f"\tCurrent weight set to {self.current_weight}")
        self.seen_challenges.add(proof_of_space_finalized.challenge_hash)
        if proof_of_space_finalized.weight not in self.challenges:
            self.challenges[proof_of_space_finalized.weight] = [
                proof_of_space_finalized
            ]
        else:
            self.challenges[proof_of_space_finalized.weight].append(
                proof_of_space_finalized)
        self.challenge_to_weight[
            proof_of_space_finalized.
            challenge_hash] = proof_of_space_finalized.weight
        self.challenge_to_height[
            proof_of_space_finalized.
            challenge_hash] = proof_of_space_finalized.height

        if get_proofs:
            message = harvester_protocol.NewChallenge(
                proof_of_space_finalized.challenge_hash)
            yield OutboundMessage(
                NodeType.HARVESTER,
                Message("new_challenge", message),
                Delivery.BROADCAST,
            )
示例#19
0
    async def _create_proof_of_weight(self,
                                      tip: bytes32) -> Optional[WeightProof]:
        """
        Creates a weight proof object
        """
        assert self.block_cache is not None
        sub_epoch_data: List[SubEpochData] = []
        sub_epoch_segments: List[SubEpochChallengeSegment] = []
        tip_rec = self.block_cache.sub_block_record(tip)
        if tip_rec is None:
            self.log.error("failed not tip in cache")
            return None
        self.log.info(
            f"create weight proof peak {tip} {tip_rec.sub_block_height}")
        recent_chain = await self._get_recent_chain(tip_rec.sub_block_height)
        if recent_chain is None:
            return None

        weight_to_check = self._get_weights_for_sampling(
            random.Random(tip), tip_rec.weight, recent_chain)
        if weight_to_check is None:
            self.log.warning("math error while sampling sub epochs")

        prev_ses_block = self.block_cache.height_to_sub_block_record(uint32(0))
        if prev_ses_block is None:
            return None

        sub_epoch_n = uint32(0)
        summary_heights = self.block_cache.get_ses_heights()
        for idx, ses_height in enumerate(summary_heights):
            # next sub block
            ses_block = self.block_cache.height_to_sub_block_record(ses_height)
            if ses_block is None or ses_block.sub_epoch_summary_included is None:
                self.log.error("error while building proof")
                return None

            self.log.debug(
                f"handle sub epoch summary {idx} at height: {ses_height} weight: {ses_block.weight}"
            )
            sub_epoch_data.append(
                _make_sub_epoch_data(ses_block.sub_epoch_summary_included))

            # if we have enough sub_epoch samples, dont sample
            if sub_epoch_n >= self.MAX_SAMPLES:
                self.log.debug("reached sampled sub epoch cap")
                continue

            # sample sub epoch
            if self._sample_sub_epoch(prev_ses_block, ses_block,
                                      weight_to_check):  # type: ignore
                segments = await self.__create_sub_epoch_segments(
                    ses_block, prev_ses_block, uint32(idx))
                if segments is None:
                    self.log.error(
                        f"failed while building segments for sub epoch {idx}, ses height {ses_height} "
                    )
                    return None
                self.log.debug(
                    f"sub epoch {sub_epoch_n} has {len(segments)} segments")
                sub_epoch_segments.extend(segments)
                sub_epoch_n = uint32(sub_epoch_n + 1)
            prev_ses_block = ses_block
        self.log.info(f"sub_epochs: {len(sub_epoch_data)}")
        return WeightProof(sub_epoch_data, sub_epoch_segments, recent_chain)
def validate_unfinished_header_block(
    constants: ConsensusConstants,
    sub_blocks: BlockchainInterface,
    header_block: UnfinishedHeaderBlock,
    check_filter: bool,
    expected_difficulty: uint64,
    expected_sub_slot_iters: uint64,
    skip_overflow_last_ss_validation: bool = False,
    skip_vdf_is_valid: bool = False,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
    """
    Validates an unfinished header block. This is a block without the infusion VDFs (unfinished)
    and without transactions and transaction info (header). Returns (required_iters, error).

    This method is meant to validate only the unfinished part of the sub-block. However, the finished_sub_slots
    refers to all sub-slots that were finishes from the previous sub-block's infusion point, up to this sub-blocks
    infusion point. Therefore, in the case where this is an overflow sub-block, and the last sub-slot is not yet
    released, header_block.finished_sub_slots will be missing one sub-slot. In this case,
    skip_overflow_last_ss_validation must be set to True. This will skip validation of end of slots, sub-epochs,
    and lead to other small tweaks in validation.
    """
    # 1. Check that the previous block exists in the blockchain, or that it is correct

    prev_sb = sub_blocks.try_sub_block(header_block.prev_header_hash)
    genesis_block = prev_sb is None
    if genesis_block and header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
        return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)

    overflow = is_overflow_sub_block(
        constants, header_block.reward_chain_sub_block.signage_point_index)
    if skip_overflow_last_ss_validation and overflow:
        finished_sub_slots_since_prev = len(
            header_block.finished_sub_slots) + 1
    else:
        finished_sub_slots_since_prev = len(header_block.finished_sub_slots)

    new_sub_slot: bool = finished_sub_slots_since_prev > 0

    can_finish_se: bool = False
    can_finish_epoch: bool = False
    if genesis_block:
        height: uint32 = uint32(0)
        assert expected_difficulty == constants.DIFFICULTY_STARTING
        assert expected_sub_slot_iters == constants.SUB_SLOT_ITERS_STARTING
    else:
        assert prev_sb is not None
        height = uint32(prev_sb.height + 1)
        if prev_sb.sub_epoch_summary_included is not None:
            can_finish_se, can_finish_epoch = False, False
        else:
            if new_sub_slot:
                can_finish_se, can_finish_epoch = can_finish_sub_and_full_epoch(
                    constants,
                    prev_sb.height,
                    prev_sb.deficit,
                    sub_blocks,
                    prev_sb.prev_hash,
                    False,
                )
            else:
                can_finish_se = False
                can_finish_epoch = False

    # 2. Check finished slots that have been crossed since prev_sb
    ses_hash: Optional[bytes32] = None
    if new_sub_slot and not skip_overflow_last_ss_validation:
        # Finished a slot(s) since previous block. The first sub-slot must have at least one sub-block, and all
        # subsequent sub-slots must be empty
        for finished_sub_slot_n, sub_slot in enumerate(
                header_block.finished_sub_slots):
            # Start of slot challenge is fetched from SP
            challenge_hash: bytes32 = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge

            if finished_sub_slot_n == 0:
                if genesis_block:
                    # 2a. check sub-slot challenge hash for genesis block
                    if challenge_hash != constants.GENESIS_CHALLENGE:
                        return None, ValidationError(
                            Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
                else:
                    assert prev_sb is not None
                    curr: SubBlockRecord = prev_sb
                    while not curr.first_in_sub_slot:
                        curr = sub_blocks.sub_block_record(curr.prev_hash)
                    assert curr.finished_challenge_slot_hashes is not None

                    # 2b. check sub-slot challenge hash for non-genesis block
                    if not curr.finished_challenge_slot_hashes[
                            -1] == challenge_hash:
                        print(curr.finished_challenge_slot_hashes[-1],
                              challenge_hash)
                        return None, ValidationError(
                            Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
            else:
                # 2c. check sub-slot challenge hash for empty slot
                if (not header_block.finished_sub_slots[
                        finished_sub_slot_n - 1].challenge_chain.get_hash()
                        == challenge_hash):
                    return None, ValidationError(
                        Err.INVALID_PREV_CHALLENGE_SLOT_HASH)

            if genesis_block:
                # 2d. Validate that genesis block has no ICC
                if sub_slot.infused_challenge_chain is not None:
                    return None, ValidationError(Err.SHOULD_NOT_HAVE_ICC)
            else:
                assert prev_sb is not None
                icc_iters_committed: Optional[uint64] = None
                icc_iters_proof: Optional[uint64] = None
                icc_challenge_hash: Optional[bytes32] = None
                icc_vdf_input = None
                if prev_sb.deficit < constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK:
                    # There should be no ICC chain if the last sub block's deficit is 16
                    # Prev sb's deficit is 0, 1, 2, 3, or 4
                    if finished_sub_slot_n == 0:
                        # This is the first sub slot after the last sb, which must have deficit 1-4, and thus an ICC
                        curr = prev_sb
                        while not curr.is_challenge_sub_block(
                                constants) and not curr.first_in_sub_slot:
                            curr = sub_blocks.sub_block_record(curr.prev_hash)
                        if curr.is_challenge_sub_block(constants):
                            icc_challenge_hash = curr.challenge_block_info_hash
                            icc_iters_committed = uint64(
                                prev_sb.sub_slot_iters -
                                curr.ip_iters(constants))
                        else:
                            assert curr.finished_infused_challenge_slot_hashes is not None
                            icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[
                                -1]
                            icc_iters_committed = prev_sb.sub_slot_iters
                        icc_iters_proof = uint64(prev_sb.sub_slot_iters -
                                                 prev_sb.ip_iters(constants))
                        if prev_sb.is_challenge_sub_block(constants):
                            icc_vdf_input = ClassgroupElement.get_default_element(
                            )
                        else:
                            icc_vdf_input = prev_sb.infused_challenge_vdf_output
                    else:
                        # This is not the first sub slot after the last sub block, so we might not have an ICC
                        if (header_block.finished_sub_slots[
                                finished_sub_slot_n - 1].reward_chain.deficit <
                                constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK):
                            finished_ss = header_block.finished_sub_slots[
                                finished_sub_slot_n - 1]
                            assert finished_ss.infused_challenge_chain is not None

                            # Only sets the icc iff the previous sub slots deficit is 4 or less
                            icc_challenge_hash = finished_ss.infused_challenge_chain.get_hash(
                            )
                            icc_iters_committed = prev_sb.sub_slot_iters
                            icc_iters_proof = icc_iters_committed
                            icc_vdf_input = ClassgroupElement.get_default_element(
                            )

                # 2e. Validate that there is not icc iff icc_challenge hash is None
                assert (sub_slot.infused_challenge_chain is
                        None) == (icc_challenge_hash is None)
                if sub_slot.infused_challenge_chain is not None:
                    assert icc_vdf_input is not None
                    assert icc_iters_proof is not None
                    assert icc_challenge_hash is not None
                    assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
                    # 2f. Check infused challenge chain sub-slot VDF
                    # Only validate from prev_sb to optimize
                    target_vdf_info = VDFInfo(
                        icc_challenge_hash,
                        icc_iters_proof,
                        sub_slot.infused_challenge_chain.
                        infused_challenge_chain_end_of_slot_vdf.output,
                    )
                    if sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
                            target_vdf_info,
                            number_of_iterations=icc_iters_committed,
                    ):
                        return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
                    if not skip_vdf_is_valid and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
                            constants, icc_vdf_input, target_vdf_info, None):
                        return None, ValidationError(Err.INVALID_ICC_EOS_VDF)

                    if sub_slot.reward_chain.deficit == constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK:
                        # 2g. Check infused challenge sub-slot hash in challenge chain, deficit 16
                        if (sub_slot.infused_challenge_chain.get_hash() !=
                                sub_slot.challenge_chain.
                                infused_challenge_chain_sub_slot_hash):
                            return None, ValidationError(
                                Err.INVALID_ICC_HASH_CC)
                    else:
                        # 2h. Check infused challenge sub-slot hash not included for other deficits
                        if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
                            return None, ValidationError(
                                Err.INVALID_ICC_HASH_CC)

                    # 2i. Check infused challenge sub-slot hash in reward sub-slot
                    if (sub_slot.infused_challenge_chain.get_hash() !=
                            sub_slot.reward_chain.
                            infused_challenge_chain_sub_slot_hash):
                        return None, ValidationError(Err.INVALID_ICC_HASH_RC)
                else:
                    # 2j. If no icc, check that the cc doesn't include it
                    if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
                        return None, ValidationError(Err.INVALID_ICC_HASH_CC)

                    # 2k. If no icc, check that the cc doesn't include it
                    if sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash is not None:
                        return None, ValidationError(Err.INVALID_ICC_HASH_RC)

            if sub_slot.challenge_chain.subepoch_summary_hash is not None:
                assert ses_hash is None  # Only one of the slots can have it
                ses_hash = sub_slot.challenge_chain.subepoch_summary_hash

            # 2l. check sub-epoch summary hash is None for empty slots
            if finished_sub_slot_n != 0:
                if sub_slot.challenge_chain.subepoch_summary_hash is not None:
                    return None, ValidationError(
                        Err.INVALID_SUB_EPOCH_SUMMARY_HASH)

            if can_finish_epoch and sub_slot.challenge_chain.subepoch_summary_hash is not None:
                # 2m. Check new difficulty and ssi
                if sub_slot.challenge_chain.new_sub_slot_iters != expected_sub_slot_iters:
                    return None, ValidationError(
                        Err.INVALID_NEW_SUB_SLOT_ITERS)
                if sub_slot.challenge_chain.new_difficulty != expected_difficulty:
                    return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
            else:
                # 2n. Check new difficulty and ssi are None if we don't finish epoch
                if sub_slot.challenge_chain.new_sub_slot_iters is not None:
                    return None, ValidationError(
                        Err.INVALID_NEW_SUB_SLOT_ITERS)
                if sub_slot.challenge_chain.new_difficulty is not None:
                    return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)

            # 2o. Check challenge sub-slot hash in reward sub-slot
            if sub_slot.challenge_chain.get_hash(
            ) != sub_slot.reward_chain.challenge_chain_sub_slot_hash:
                return (
                    None,
                    ValidationError(
                        Err.INVALID_CHALLENGE_SLOT_HASH_RC,
                        "sub-slot hash in reward sub-slot mismatch",
                    ),
                )

            eos_vdf_iters: uint64 = expected_sub_slot_iters
            cc_start_element: ClassgroupElement = ClassgroupElement.get_default_element(
            )
            cc_eos_vdf_challenge: bytes32 = challenge_hash
            if genesis_block:
                if finished_sub_slot_n == 0:
                    # First block, one empty slot. prior_point is the initial challenge
                    rc_eos_vdf_challenge: bytes32 = constants.GENESIS_CHALLENGE
                    cc_eos_vdf_challenge = constants.GENESIS_CHALLENGE
                else:
                    # First block, but have at least two empty slots
                    rc_eos_vdf_challenge = header_block.finished_sub_slots[
                        finished_sub_slot_n - 1].reward_chain.get_hash()
            else:
                assert prev_sb is not None
                if finished_sub_slot_n == 0:
                    # No empty slots, so the starting point of VDF is the last reward block. Uses
                    # the same IPS as the previous block, since it's the same slot
                    rc_eos_vdf_challenge = prev_sb.reward_infusion_new_challenge
                    eos_vdf_iters = uint64(prev_sb.sub_slot_iters -
                                           prev_sb.ip_iters(constants))
                    cc_start_element = prev_sb.challenge_vdf_output
                else:
                    # At least one empty slot, so use previous slot hash. IPS might change because it's a new slot
                    rc_eos_vdf_challenge = header_block.finished_sub_slots[
                        finished_sub_slot_n - 1].reward_chain.get_hash()

            # 2p. Check end of reward slot VDF
            target_vdf_info = VDFInfo(
                rc_eos_vdf_challenge,
                eos_vdf_iters,
                sub_slot.reward_chain.end_of_slot_vdf.output,
            )
            if not skip_vdf_is_valid and not sub_slot.proofs.reward_chain_slot_proof.is_valid(
                    constants,
                    ClassgroupElement.get_default_element(),
                    sub_slot.reward_chain.end_of_slot_vdf,
                    target_vdf_info,
            ):
                return None, ValidationError(Err.INVALID_RC_EOS_VDF)

            # 2q. Check challenge chain sub-slot VDF
            partial_cc_vdf_info = VDFInfo(
                cc_eos_vdf_challenge,
                eos_vdf_iters,
                sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.
                output,
            )
            if genesis_block:
                cc_eos_vdf_info_iters = constants.SUB_SLOT_ITERS_STARTING
            else:
                assert prev_sb is not None
                if finished_sub_slot_n == 0:
                    cc_eos_vdf_info_iters = prev_sb.sub_slot_iters
                else:
                    cc_eos_vdf_info_iters = expected_sub_slot_iters
            # Check that the modified data is correct
            if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
                    partial_cc_vdf_info,
                    number_of_iterations=cc_eos_vdf_info_iters,
            ):
                return None, ValidationError(
                    Err.INVALID_CC_EOS_VDF,
                    "wrong challenge chain end of slot vdf")

            # Pass in None for target info since we are only checking the proof from the temporary point,
            # but the challenge_chain_end_of_slot_vdf actually starts from the start of slot (for light clients)
            if not skip_vdf_is_valid and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
                    constants, cc_start_element, partial_cc_vdf_info, None):
                return None, ValidationError(Err.INVALID_CC_EOS_VDF)

            if genesis_block:
                # 2r. Check deficit (MIN_SUB.. deficit edge case for genesis block)
                if sub_slot.reward_chain.deficit != constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK:
                    return (
                        None,
                        ValidationError(
                            Err.INVALID_DEFICIT,
                            f"genesis, expected deficit {constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK}",
                        ),
                    )
            else:
                assert prev_sb is not None
                if prev_sb.deficit == 0:
                    # 2s. If prev sb had deficit 0, resets deficit to MIN_SUB_BLOCK_PER_CHALLENGE_BLOCK
                    if sub_slot.reward_chain.deficit != constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK:
                        log.error(
                            constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK, )
                        return (
                            None,
                            ValidationError(
                                Err.INVALID_DEFICIT,
                                f"expected deficit {constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK}, saw "
                                f"{sub_slot.reward_chain.deficit}",
                            ),
                        )
                else:
                    # 2t. Otherwise, deficit stays the same at the slot ends, cannot reset until 0
                    if sub_slot.reward_chain.deficit != prev_sb.deficit:
                        return None, ValidationError(
                            Err.INVALID_DEFICIT,
                            "deficit is wrong at slot end")

        # 3. Check sub-epoch summary
        # Note that the subepoch summary is the summary of the previous subepoch (not the one that just finished)
        if not skip_overflow_last_ss_validation:
            if ses_hash is not None:
                # 3a. Check that genesis block does not have sub-epoch summary
                if genesis_block:
                    return (
                        None,
                        ValidationError(
                            Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
                            "genesis with sub-epoch-summary hash",
                        ),
                    )
                assert prev_sb is not None

                # 3b. Check that we finished a slot and we finished a sub-epoch
                if not new_sub_slot or not can_finish_se:
                    return (
                        None,
                        ValidationError(
                            Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
                            f"new sub-slot: {new_sub_slot} finishes sub-epoch {can_finish_se}",
                        ),
                    )

                # 3c. Check the actual sub-epoch is correct
                expected_sub_epoch_summary = make_sub_epoch_summary(
                    constants,
                    sub_blocks,
                    height,
                    sub_blocks.sub_block_record(prev_sb.prev_hash),
                    expected_difficulty if can_finish_epoch else None,
                    expected_sub_slot_iters if can_finish_epoch else None,
                )
                expected_hash = expected_sub_epoch_summary.get_hash()
                if expected_hash != ses_hash:
                    log.error(f"{expected_sub_epoch_summary}")
                    return (
                        None,
                        ValidationError(
                            Err.INVALID_SUB_EPOCH_SUMMARY,
                            f"expected ses hash: {expected_hash} got {ses_hash} ",
                        ),
                    )
            elif new_sub_slot and not genesis_block:
                # 3d. Check that we don't have to include a sub-epoch summary
                if can_finish_se or can_finish_epoch:
                    return (
                        None,
                        ValidationError(
                            Err.INVALID_SUB_EPOCH_SUMMARY,
                            "block finishes sub-epoch but ses-hash is None",
                        ),
                    )

    # 4. Check if the number of sub-blocks is less than the max
    if not new_sub_slot and not genesis_block:
        assert prev_sb is not None
        num_sub_blocks = 2  # This includes the current sub-block and the prev sub-block
        curr = prev_sb
        while not curr.first_in_sub_slot:
            num_sub_blocks += 1
            curr = sub_blocks.sub_block_record(curr.prev_hash)
        if num_sub_blocks > constants.MAX_SUB_SLOT_SUB_BLOCKS:
            return None, ValidationError(Err.TOO_MANY_SUB_BLOCKS)

    # If sub_block state is correct, we should always find a challenge here
    # This computes what the challenge should be for this sub-block

    challenge = get_block_challenge(
        constants,
        header_block,
        sub_blocks,
        genesis_block,
        overflow,
        skip_overflow_last_ss_validation,
    )

    # 5a. Check proof of space
    if challenge != header_block.reward_chain_sub_block.pos_ss_cc_challenge_hash:
        log.error(f"Finished slots: {header_block.finished_sub_slots}")
        log.error(
            f"Data: {genesis_block} {overflow} {skip_overflow_last_ss_validation} {header_block.total_iters} "
            f"{header_block.reward_chain_sub_block.signage_point_index}"
            f"Prev: {prev_sb}")
        log.error(
            f"Challenge {challenge} provided {header_block.reward_chain_sub_block.pos_ss_cc_challenge_hash}"
        )
        return None, ValidationError(Err.INVALID_CC_CHALLENGE)

    # 5b. Check proof of space
    if header_block.reward_chain_sub_block.challenge_chain_sp_vdf is None:
        # Edge case of first sp (start of slot), where sp_iters == 0
        cc_sp_hash: bytes32 = challenge
    else:
        cc_sp_hash = header_block.reward_chain_sub_block.challenge_chain_sp_vdf.output.get_hash(
        )

    q_str: Optional[
        bytes32] = header_block.reward_chain_sub_block.proof_of_space.verify_and_get_quality_string(
            constants, challenge, cc_sp_hash)
    if q_str is None:
        return None, ValidationError(Err.INVALID_POSPACE)

    # 6. check signage point index
    # no need to check negative values as this is uint 8
    if header_block.reward_chain_sub_block.signage_point_index >= constants.NUM_SPS_SUB_SLOT:
        return None, ValidationError(Err.INVALID_SP_INDEX)

    # Note that required iters might be from the previous slot (if we are in an overflow sub-block)
    required_iters: uint64 = calculate_iterations_quality(
        q_str,
        header_block.reward_chain_sub_block.proof_of_space.size,
        expected_difficulty,
        cc_sp_hash,
    )

    # 7. check signage point index
    # no need to check negative values as this is uint8. (Assumes types are checked)
    if header_block.reward_chain_sub_block.signage_point_index >= constants.NUM_SPS_SUB_SLOT:
        return None, ValidationError(Err.INVALID_SP_INDEX)

    # 8a. check signage point index 0 has no cc sp
    if (header_block.reward_chain_sub_block.signage_point_index == 0) != (
            header_block.reward_chain_sub_block.challenge_chain_sp_vdf is
            None):
        return None, ValidationError(Err.INVALID_SP_INDEX)

    # 8b. check signage point index 0 has no rc sp
    if (header_block.reward_chain_sub_block.signage_point_index == 0) != (
            header_block.reward_chain_sub_block.reward_chain_sp_vdf is None):
        return None, ValidationError(Err.INVALID_SP_INDEX)

    sp_iters: uint64 = calculate_sp_iters(
        constants,
        expected_sub_slot_iters,
        header_block.reward_chain_sub_block.signage_point_index,
    )

    ip_iters: uint64 = calculate_ip_iters(
        constants,
        expected_sub_slot_iters,
        header_block.reward_chain_sub_block.signage_point_index,
        required_iters,
    )
    if header_block.reward_chain_sub_block.challenge_chain_sp_vdf is None:
        # Blocks with very low required iters are not overflow blocks
        assert not overflow

    # 9. Check no overflows in the first sub-slot of a new epoch
    # (although they are OK in the second sub-slot), this is important
    if overflow and can_finish_epoch:
        if finished_sub_slots_since_prev < 2:
            return None, ValidationError(
                Err.NO_OVERFLOWS_IN_FIRST_SUB_SLOT_NEW_EPOCH)

    # 10. Check total iters
    if genesis_block:
        total_iters: uint128 = uint128(expected_sub_slot_iters *
                                       finished_sub_slots_since_prev)
    else:
        assert prev_sb is not None
        if new_sub_slot:
            total_iters = prev_sb.total_iters
            # Add the rest of the slot of prev_sb
            total_iters = uint128(total_iters + prev_sb.sub_slot_iters -
                                  prev_sb.ip_iters(constants))
            # Add other empty slots
            total_iters = uint128(total_iters +
                                  (expected_sub_slot_iters *
                                   (finished_sub_slots_since_prev - 1)))
        else:
            # Slot iters is guaranteed to be the same for header_block and prev_sb
            # This takes the beginning of the slot, and adds ip_iters
            total_iters = uint128(prev_sb.total_iters -
                                  prev_sb.ip_iters(constants))
    total_iters = uint128(total_iters + ip_iters)
    if total_iters != header_block.reward_chain_sub_block.total_iters:
        return (
            None,
            ValidationError(
                Err.INVALID_TOTAL_ITERS,
                f"expected {total_iters} got {header_block.reward_chain_sub_block.total_iters}",
            ),
        )

    sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - (
        expected_sub_slot_iters if overflow else 0))
    if overflow and skip_overflow_last_ss_validation:
        dummy_vdf_info = VDFInfo(
            bytes32([0] * 32),
            uint64(1),
            ClassgroupElement.get_default_element(),
        )
        dummy_sub_slot = EndOfSubSlotBundle(
            ChallengeChainSubSlot(dummy_vdf_info, None, None, None, None),
            None,
            RewardChainSubSlot(dummy_vdf_info, bytes32([0] * 32), None,
                               uint8(0)),
            SubSlotProofs(VDFProof(uint8(0), b""), None,
                          VDFProof(uint8(0), b"")),
        )
        sub_slots_to_pass_in = header_block.finished_sub_slots + [
            dummy_sub_slot
        ]
    else:
        sub_slots_to_pass_in = header_block.finished_sub_slots
    (
        cc_vdf_challenge,
        rc_vdf_challenge,
        cc_vdf_input,
        rc_vdf_input,
        cc_vdf_iters,
        rc_vdf_iters,
    ) = get_signage_point_vdf_info(
        constants,
        sub_slots_to_pass_in,
        overflow,
        prev_sb,
        sub_blocks,
        sp_total_iters,
        sp_iters,
    )

    # 11. Check reward chain sp proof
    if sp_iters != 0:
        assert (header_block.reward_chain_sub_block.reward_chain_sp_vdf
                is not None and header_block.reward_chain_sp_proof is not None)
        target_vdf_info = VDFInfo(
            rc_vdf_challenge,
            rc_vdf_iters,
            header_block.reward_chain_sub_block.reward_chain_sp_vdf.output,
        )
        if not skip_vdf_is_valid and not header_block.reward_chain_sp_proof.is_valid(
                constants,
                rc_vdf_input,
                header_block.reward_chain_sub_block.reward_chain_sp_vdf,
                target_vdf_info,
        ):
            return None, ValidationError(Err.INVALID_RC_SP_VDF)
        rc_sp_hash = header_block.reward_chain_sub_block.reward_chain_sp_vdf.output.get_hash(
        )
    else:
        # Edge case of first sp (start of slot), where sp_iters == 0
        assert overflow is not None
        if header_block.reward_chain_sub_block.reward_chain_sp_vdf is not None:
            return None, ValidationError(Err.INVALID_RC_SP_VDF)
        if new_sub_slot:
            rc_sp_hash = header_block.finished_sub_slots[
                -1].reward_chain.get_hash()
        else:
            if genesis_block:
                rc_sp_hash = constants.GENESIS_CHALLENGE
            else:
                assert prev_sb is not None
                curr = prev_sb
                while not curr.first_in_sub_slot:
                    curr = sub_blocks.sub_block_record(curr.prev_hash)
                assert curr.finished_reward_slot_hashes is not None
                rc_sp_hash = curr.finished_reward_slot_hashes[-1]

    # 12. Check reward chain sp signature
    if not AugSchemeMPL.verify(
            header_block.reward_chain_sub_block.proof_of_space.plot_public_key,
            rc_sp_hash,
            header_block.reward_chain_sub_block.reward_chain_sp_signature,
    ):
        return None, ValidationError(Err.INVALID_RC_SIGNATURE)

    # 13. Check cc sp vdf
    if sp_iters != 0:
        assert header_block.reward_chain_sub_block.challenge_chain_sp_vdf is not None
        assert header_block.challenge_chain_sp_proof is not None
        target_vdf_info = VDFInfo(
            cc_vdf_challenge,
            cc_vdf_iters,
            header_block.reward_chain_sub_block.challenge_chain_sp_vdf.output,
        )

        if header_block.reward_chain_sub_block.challenge_chain_sp_vdf != dataclasses.replace(
                target_vdf_info,
                number_of_iterations=sp_iters,
        ):
            return None, ValidationError(Err.INVALID_CC_SP_VDF)
        if not skip_vdf_is_valid and not header_block.challenge_chain_sp_proof.is_valid(
                constants, cc_vdf_input, target_vdf_info, None):
            return None, ValidationError(Err.INVALID_CC_SP_VDF)
    else:
        assert overflow is not None
        if header_block.reward_chain_sub_block.challenge_chain_sp_vdf is not None:
            return None, ValidationError(Err.INVALID_CC_SP_VDF)

    # 14. Check cc sp sig
    if not AugSchemeMPL.verify(
            header_block.reward_chain_sub_block.proof_of_space.plot_public_key,
            cc_sp_hash,
            header_block.reward_chain_sub_block.challenge_chain_sp_signature,
    ):
        return None, ValidationError(Err.INVALID_CC_SIGNATURE,
                                     "invalid cc sp sig")

    # 15. Check is_block
    if genesis_block:
        if header_block.foliage_sub_block.foliage_block_hash is None:
            return None, ValidationError(Err.INVALID_IS_BLOCK,
                                         "invalid genesis")
    else:
        assert prev_sb is not None
        # Finds the previous block
        curr = prev_sb
        while not curr.is_block:
            curr = sub_blocks.sub_block_record(curr.prev_hash)

        # The first sub-block to have an sp > the last block's infusion iters, is a block
        if overflow:
            our_sp_total_iters: uint128 = uint128(total_iters - ip_iters +
                                                  sp_iters -
                                                  expected_sub_slot_iters)
        else:
            our_sp_total_iters = uint128(total_iters - ip_iters + sp_iters)
        if (our_sp_total_iters > curr.total_iters) != (
                header_block.foliage_sub_block.foliage_block_hash is not None):
            return None, ValidationError(Err.INVALID_IS_BLOCK)
        if (our_sp_total_iters > curr.total_iters) != (
                header_block.foliage_sub_block.foliage_block_signature
                is not None):
            return None, ValidationError(Err.INVALID_IS_BLOCK)

    # 16. Check foliage sub block signature by plot key
    if not AugSchemeMPL.verify(
            header_block.reward_chain_sub_block.proof_of_space.plot_public_key,
            header_block.foliage_sub_block.foliage_sub_block_data.get_hash(),
            header_block.foliage_sub_block.foliage_sub_block_signature,
    ):
        return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)

    # 17. Check foliage block signature by plot key
    if header_block.foliage_sub_block.foliage_block_hash is not None:
        if not AugSchemeMPL.verify(
                header_block.reward_chain_sub_block.proof_of_space.
                plot_public_key,
                header_block.foliage_sub_block.foliage_block_hash,
                header_block.foliage_sub_block.foliage_block_signature,
        ):
            return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)

    # 18. Check unfinished reward chain sub block hash
    if (header_block.reward_chain_sub_block.get_hash() !=
            header_block.foliage_sub_block.foliage_sub_block_data.
            unfinished_reward_block_hash):
        return None, ValidationError(Err.INVALID_URSB_HASH)

    # 19. Check pool target max height
    if (header_block.foliage_sub_block.foliage_sub_block_data.pool_target.
            max_height != 0 and header_block.foliage_sub_block.
            foliage_sub_block_data.pool_target.max_height < height):
        return None, ValidationError(Err.OLD_POOL_TARGET)

    # 20a. Check pre-farm puzzle hashes for genesis sub-block.
    if genesis_block:
        if (header_block.foliage_sub_block.foliage_sub_block_data.pool_target.
                puzzle_hash != constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH):
            log.error(
                f"Pool target {header_block.foliage_sub_block.foliage_sub_block_data.pool_target} hb {header_block}"
            )
            return None, ValidationError(Err.INVALID_PREFARM)
        if (header_block.foliage_sub_block.foliage_sub_block_data.
                farmer_reward_puzzle_hash !=
                constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH):
            return None, ValidationError(Err.INVALID_PREFARM)
    else:
        # 20b. Check pool target signature. Should not check this for genesis sub-block.
        if not AugSchemeMPL.verify(
                header_block.reward_chain_sub_block.proof_of_space.
                pool_public_key,
                bytes(header_block.foliage_sub_block.foliage_sub_block_data.
                      pool_target),
                header_block.foliage_sub_block.foliage_sub_block_data.
                pool_signature,
        ):
            return None, ValidationError(Err.INVALID_POOL_SIGNATURE)

    # 21. Check extension data if applicable. None for mainnet.
    # 22. Check if foliage block is present
    if (header_block.foliage_sub_block.foliage_block_hash
            is not None) != (header_block.foliage_block is not None):
        return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)

    if (header_block.foliage_sub_block.foliage_block_signature
            is not None) != (header_block.foliage_block is not None):
        return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)

    if header_block.foliage_block is not None:
        # 23. Check foliage block hash
        if header_block.foliage_block.get_hash(
        ) != header_block.foliage_sub_block.foliage_block_hash:
            return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_HASH)

        if genesis_block:
            # 24a. Check prev block hash for genesis
            if header_block.foliage_block.prev_block_hash != constants.GENESIS_CHALLENGE:
                return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
        else:
            assert prev_sb is not None
            # 24b. Check prev block hash for non-genesis
            curr_sb: SubBlockRecord = prev_sb
            while not curr_sb.is_block:
                curr_sb = sub_blocks.sub_block_record(curr_sb.prev_hash)
            if not header_block.foliage_block.prev_block_hash == curr_sb.header_hash:
                log.error(
                    f"Prev BH: {header_block.foliage_block.prev_block_hash} {curr_sb.header_hash} curr sb: {curr_sb}"
                )
                return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)

        # 25. The filter hash in the Foliage Block must be the hash of the filter
        if check_filter:
            if header_block.foliage_block.filter_hash != std_hash(
                    header_block.transactions_filter):
                return None, ValidationError(
                    Err.INVALID_TRANSACTIONS_FILTER_HASH)

        # 26. The timestamp in Foliage Block must comply with the timestamp rules
        if prev_sb is not None:
            last_timestamps: List[uint64] = []
            curr_sb = sub_blocks.sub_block_record(
                header_block.foliage_block.prev_block_hash)
            assert curr_sb.timestamp is not None
            while len(last_timestamps) < constants.NUMBER_OF_TIMESTAMPS:
                last_timestamps.append(curr_sb.timestamp)
                fetched: Optional[SubBlockRecord] = sub_blocks.try_sub_block(
                    curr_sb.prev_block_hash)
                if not fetched:
                    break
                curr_sb = fetched
            if len(last_timestamps) != constants.NUMBER_OF_TIMESTAMPS:
                # For blocks 1 to 10, average timestamps of all previous blocks
                assert curr_sb.height == 0
            prev_time: uint64 = uint64(
                int(sum(last_timestamps) // len(last_timestamps)))
            if header_block.foliage_block.timestamp <= prev_time:
                return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_PAST)
            if header_block.foliage_block.timestamp > int(
                    time.time() + constants.MAX_FUTURE_TIME):
                return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_FUTURE)

    return required_iters, None  # Valid unfinished header block
    async def get_blockchain_state(self, _request: Dict):
        """
        Returns a summary of the node's view of the blockchain.
        """
        full_peak: Optional[
            FullBlock] = await self.service.blockchain.get_block_peak()

        if full_peak is not None and full_peak.height > 0:
            if full_peak.header_hash in self.service.blockchain.sub_blocks:
                sub_block: SubBlockRecord = self.service.blockchain.sub_blocks[
                    full_peak.header_hash]
                sub_slot_iters = sub_block.sub_slot_iters
            else:
                sub_slot_iters = self.service.constants.SUB_SLOT_ITERS_STARTING
            difficulty = uint64(full_peak.weight -
                                self.service.blockchain.sub_blocks[
                                    full_peak.prev_header_hash].weight)
        else:
            difficulty = self.service.constants.DIFFICULTY_STARTING
            sub_slot_iters = self.service.constants.SUB_SLOT_ITERS_STARTING

        sync_mode: bool = self.service.sync_store.get_sync_mode()

        if sync_mode:
            max_pp = 0
            for _, potential_peak_tuple in self.service.sync_store.potential_peaks.items(
            ):
                peak_h, peak_w = potential_peak_tuple
                if peak_h > max_pp:
                    max_pp = peak_h
            sync_tip_height = max_pp
            sync_tip_sub_height = max_pp
            if full_peak is not None:
                sync_progress_sub_height = full_peak.sub_block_height
                sync_progress_height = full_peak.height
            else:
                sync_progress_sub_height = 0
                sync_progress_height = 0
        else:
            sync_tip_height = 0
            sync_tip_sub_height = 0
            sync_progress_sub_height = 0
            sync_progress_height = uint32(0)

        if full_peak is not None and full_peak.height > 1:
            newer_block_hex = full_peak.header_hash.hex()
            older_block_hex = self.service.blockchain.sub_height_to_hash[
                uint32(max(1, full_peak.sub_block_height - 1000))].hex()
            space = await self.get_network_space({
                "newer_block_header_hash":
                newer_block_hex,
                "older_block_header_hash":
                older_block_hex
            })
        else:
            space = {"space": uint128(0)}

        synced = await self.service.synced()

        assert space is not None
        response: Dict = {
            "blockchain_state": {
                "peak": full_peak,
                "sync": {
                    "sync_mode": sync_mode,
                    "synced": synced,
                    "sync_tip_height": sync_tip_height,
                    "sync_tip_sub_height": sync_tip_sub_height,
                    "sync_progress_height": sync_progress_height,
                    "sync_progress_sub_height": sync_progress_sub_height,
                },
                "difficulty": difficulty,
                "sub_slot_iters": sub_slot_iters,
                "space": space["space"],
            },
        }
        self.cached_blockchain_state = dict(response["blockchain_state"])
        return response
def validate_finished_header_block(
    constants: ConsensusConstants,
    sub_blocks: BlockchainInterface,
    header_block: HeaderBlock,
    check_filter: bool,
    expected_difficulty: uint64,
    expected_sub_slot_iters: uint64,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
    """
    Fully validates the header of a sub-block. A header block is the same  as a full block, but
    without transactions and transaction info. Returns (required_iters, error).
    """
    unfinished_header_block = UnfinishedHeaderBlock(
        header_block.finished_sub_slots,
        header_block.reward_chain_sub_block.get_unfinished(),
        header_block.challenge_chain_sp_proof,
        header_block.reward_chain_sp_proof,
        header_block.foliage_sub_block,
        header_block.foliage_block,
        header_block.transactions_filter,
    )

    required_iters, validate_unfinished_err = validate_unfinished_header_block(
        constants,
        sub_blocks,
        unfinished_header_block,
        check_filter,
        expected_difficulty,
        expected_sub_slot_iters,
        False,
    )

    genesis_block = False
    if validate_unfinished_err is not None:
        return None, validate_unfinished_err

    assert required_iters is not None

    if header_block.height == 0:
        prev_sb: Optional[SubBlockRecord] = None
        genesis_block = True
    else:
        prev_sb = sub_blocks.sub_block_record(header_block.prev_header_hash)
    new_sub_slot: bool = len(header_block.finished_sub_slots) > 0

    ip_iters: uint64 = calculate_ip_iters(
        constants,
        expected_sub_slot_iters,
        header_block.reward_chain_sub_block.signage_point_index,
        required_iters,
    )
    if not genesis_block:
        assert prev_sb is not None
        # 27. Check sub-block height
        if header_block.height != prev_sb.height + 1:
            return None, ValidationError(Err.INVALID_HEIGHT)

        # 28. Check weight
        if header_block.weight != prev_sb.weight + expected_difficulty:
            log.error(
                f"INVALID WEIGHT: {header_block} {prev_sb} {expected_difficulty}"
            )
            return None, ValidationError(Err.INVALID_WEIGHT)
    else:
        if header_block.height != uint32(0):
            return None, ValidationError(Err.INVALID_HEIGHT)
        if header_block.weight != constants.DIFFICULTY_STARTING:
            return None, ValidationError(Err.INVALID_WEIGHT)

    # RC vdf challenge is taken from more recent of (slot start, prev_block)
    if genesis_block:
        cc_vdf_output = ClassgroupElement.get_default_element()
        ip_vdf_iters = ip_iters
        if new_sub_slot:
            rc_vdf_challenge = header_block.finished_sub_slots[
                -1].reward_chain.get_hash()
        else:
            rc_vdf_challenge = constants.GENESIS_CHALLENGE
    else:
        assert prev_sb is not None
        if new_sub_slot:
            # slot start is more recent
            rc_vdf_challenge = header_block.finished_sub_slots[
                -1].reward_chain.get_hash()
            ip_vdf_iters = ip_iters
            cc_vdf_output = ClassgroupElement.get_default_element()

        else:
            # Prev sb is more recent
            rc_vdf_challenge = prev_sb.reward_infusion_new_challenge
            ip_vdf_iters = uint64(
                header_block.reward_chain_sub_block.total_iters -
                prev_sb.total_iters)
            cc_vdf_output = prev_sb.challenge_vdf_output

    # 29. Check challenge chain infusion point VDF
    if new_sub_slot:
        cc_vdf_challenge = header_block.finished_sub_slots[
            -1].challenge_chain.get_hash()
    else:
        # Not first sub-block in slot
        if genesis_block:
            # genesis block
            cc_vdf_challenge = constants.GENESIS_CHALLENGE
        else:
            assert prev_sb is not None
            # Not genesis block, go back to first sub-block in slot
            curr = prev_sb
            while curr.finished_challenge_slot_hashes is None:
                curr = sub_blocks.sub_block_record(curr.prev_hash)
            cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]

    cc_target_vdf_info = VDFInfo(
        cc_vdf_challenge,
        ip_vdf_iters,
        header_block.reward_chain_sub_block.challenge_chain_ip_vdf.output,
    )
    if header_block.reward_chain_sub_block.challenge_chain_ip_vdf != dataclasses.replace(
            cc_target_vdf_info,
            number_of_iterations=ip_iters,
    ):
        expected = dataclasses.replace(
            cc_target_vdf_info,
            number_of_iterations=ip_iters,
        )
        log.error(
            f"{header_block.reward_chain_sub_block.challenge_chain_ip_vdf }. expected {expected}"
        )
        log.error(f"Block: {header_block}")
        return None, ValidationError(Err.INVALID_CC_IP_VDF)
    if not header_block.challenge_chain_ip_proof.is_valid(
            constants,
            cc_vdf_output,
            cc_target_vdf_info,
            None,
    ):
        log.error(f"Did not validate, output {cc_vdf_output}")
        log.error(f"Block: {header_block}")
        return None, ValidationError(Err.INVALID_CC_IP_VDF)

    # 30. Check reward chain infusion point VDF
    rc_target_vdf_info = VDFInfo(
        rc_vdf_challenge,
        ip_vdf_iters,
        header_block.reward_chain_sub_block.reward_chain_ip_vdf.output,
    )
    if not header_block.reward_chain_ip_proof.is_valid(
            constants,
            ClassgroupElement.get_default_element(),
            header_block.reward_chain_sub_block.reward_chain_ip_vdf,
            rc_target_vdf_info,
    ):
        return None, ValidationError(Err.INVALID_RC_IP_VDF)

    # 31. Check infused challenge chain infusion point VDF
    if not genesis_block:
        overflow = is_overflow_sub_block(
            constants, header_block.reward_chain_sub_block.signage_point_index)
        deficit = calculate_deficit(
            constants,
            header_block.height,
            prev_sb,
            overflow,
            len(header_block.finished_sub_slots),
        )

        if header_block.reward_chain_sub_block.infused_challenge_chain_ip_vdf is None:
            # If we don't have an ICC chain, deficit must be 4 or 5
            if deficit < constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK - 1:
                return None, ValidationError(Err.INVALID_ICC_VDF)
        else:
            assert header_block.infused_challenge_chain_ip_proof is not None
            # If we have an ICC chain, deficit must be 0, 1, 2 or 3
            if deficit >= constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK - 1:
                return (
                    None,
                    ValidationError(
                        Err.INVALID_ICC_VDF,
                        f"icc vdf and deficit is bigger or equal to {constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK - 1}",
                    ),
                )
            if new_sub_slot:
                last_ss = header_block.finished_sub_slots[-1]
                assert last_ss.infused_challenge_chain is not None
                icc_vdf_challenge: bytes32 = last_ss.infused_challenge_chain.get_hash(
                )
                icc_vdf_input = ClassgroupElement.get_default_element()
            else:
                assert prev_sb is not None
                if prev_sb.is_challenge_sub_block(constants):
                    icc_vdf_input = ClassgroupElement.get_default_element()
                else:
                    icc_vdf_input = prev_sb.infused_challenge_vdf_output
                curr = prev_sb
                while curr.finished_infused_challenge_slot_hashes is None and not curr.is_challenge_sub_block(
                        constants):
                    curr = sub_blocks.sub_block_record(curr.prev_hash)

                if curr.is_challenge_sub_block(constants):
                    icc_vdf_challenge = curr.challenge_block_info_hash
                else:
                    assert curr.finished_infused_challenge_slot_hashes is not None
                    icc_vdf_challenge = curr.finished_infused_challenge_slot_hashes[
                        -1]

            icc_target_vdf_info = VDFInfo(
                icc_vdf_challenge,
                ip_vdf_iters,
                header_block.reward_chain_sub_block.
                infused_challenge_chain_ip_vdf.output,
            )
            if not header_block.infused_challenge_chain_ip_proof.is_valid(
                    constants,
                    icc_vdf_input,
                    header_block.reward_chain_sub_block.
                    infused_challenge_chain_ip_vdf,
                    icc_target_vdf_info,
            ):
                return None, ValidationError(Err.INVALID_ICC_VDF,
                                             "invalid icc proof")
    else:
        if header_block.infused_challenge_chain_ip_proof is not None:
            return None, ValidationError(Err.INVALID_ICC_VDF)

    # 32. Check reward block hash
    if header_block.foliage_sub_block.reward_block_hash != header_block.reward_chain_sub_block.get_hash(
    ):
        return None, ValidationError(Err.INVALID_REWARD_BLOCK_HASH)

    # 33. Check reward block is_block
    if (header_block.foliage_sub_block.foliage_block_hash
            is not None) != header_block.reward_chain_sub_block.is_block:
        return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)

    return required_iters, None
示例#23
0
    def __init__(
        self,
        root_path: Optional[Path] = None,
        real_plots: bool = False,
    ):
        self._tempdir = None
        if root_path is None:
            self._tempdir = tempfile.TemporaryDirectory()
            root_path = Path(self._tempdir.name)
        self.root_path = root_path
        self.real_plots = real_plots

        if not real_plots:
            create_default_chia_config(root_path)
            initialize_ssl(root_path)
            # No real plots supplied, so we will use the small test plots
            self.use_any_pos = True
            self.keychain = Keychain("testing-1.8.0", True)
            self.keychain.delete_all_keys()
            self.farmer_master_sk = self.keychain.add_private_key(
                bytes_to_mnemonic(std_hash(b"block_tools farmer key")), "")
            self.pool_master_sk = self.keychain.add_private_key(
                bytes_to_mnemonic(std_hash(b"block_tools pool key")), "")
            self.farmer_pk = master_sk_to_farmer_sk(
                self.farmer_master_sk).get_g1()
            self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1()

            plot_dir = get_plot_dir()
            mkdir(plot_dir)
            temp_dir = plot_dir / "tmp"
            mkdir(temp_dir)
            args = Namespace()
            args.sk_seed = std_hash(b"").hex()
            # Can't go much lower than 18, since plots start having no solutions
            args.size = 18
            # Uses many plots for testing, in order to guarantee proofs of space at every height
            args.num = 40
            args.index = 0
            args.buffer = 32
            args.farmer_public_key = bytes(self.farmer_pk).hex()
            args.pool_public_key = bytes(self.pool_pk).hex()
            args.tmp_dir = temp_dir
            args.tmp2_dir = plot_dir
            args.final_dir = plot_dir
            test_private_keys = [
                PrivateKey.from_seed(std_hash(bytes([i])))
                for i in range(args.num)
            ]
            try:
                # No datetime in the filename, to get deterministic filenames and not replot
                create_plots(
                    args,
                    root_path,
                    use_datetime=False,
                    test_private_keys=test_private_keys,
                )
            except KeyboardInterrupt:
                shutil.rmtree(plot_dir, ignore_errors=True)
                sys.exit(1)
        else:
            initialize_ssl(root_path)
            self.keychain = Keychain()
            self.use_any_pos = False
            sk_and_ent = self.keychain.get_first_private_key()
            assert sk_and_ent is not None
            self.farmer_master_sk = sk_and_ent[0]
            self.pool_master_sk = sk_and_ent[0]

        self.farmer_ph = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1())
        self.pool_ph = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1())

        self.all_sks = self.keychain.get_all_private_keys()
        self.pool_pubkeys: List[G1Element] = [
            master_sk_to_pool_sk(sk).get_g1() for sk, _ in self.all_sks
        ]
        farmer_pubkeys: List[G1Element] = [
            master_sk_to_farmer_sk(sk).get_g1() for sk, _ in self.all_sks
        ]
        if len(self.pool_pubkeys) == 0 or len(farmer_pubkeys) == 0:
            raise RuntimeError("Keys not generated. Run `chia generate keys`")
        _, self.plots, _, _ = load_plots({}, set(), farmer_pubkeys,
                                         self.pool_pubkeys, root_path)
示例#24
0
    async def test_store(self):
        db_filename = Path("blockchain_wallet_store_test.db")

        if db_filename.exists():
            db_filename.unlink()

        db_connection = await aiosqlite.connect(db_filename)
        store = await WalletStore.create(db_connection)
        try:
            coin_1 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            coin_2 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            coin_3 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            coin_4 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            record_replaced = WalletCoinRecord(coin_1, uint32(8), uint32(0),
                                               False, True,
                                               WalletType.STANDARD_WALLET, 0)
            record_1 = WalletCoinRecord(coin_1, uint32(4), uint32(0), False,
                                        True, WalletType.STANDARD_WALLET, 0)
            record_2 = WalletCoinRecord(coin_2, uint32(5), uint32(0), False,
                                        True, WalletType.STANDARD_WALLET, 0)
            record_3 = WalletCoinRecord(
                coin_3,
                uint32(5),
                uint32(10),
                True,
                False,
                WalletType.STANDARD_WALLET,
                0,
            )
            record_4 = WalletCoinRecord(
                coin_4,
                uint32(5),
                uint32(15),
                True,
                False,
                WalletType.STANDARD_WALLET,
                0,
            )

            # Test add (replace) and get
            assert await store.get_coin_record(coin_1.name()) is None
            await store.add_coin_record(record_replaced)
            await store.add_coin_record(record_1)
            await store.add_coin_record(record_2)
            await store.add_coin_record(record_3)
            await store.add_coin_record(record_4)
            assert await store.get_coin_record(coin_1.name()) == record_1

            # Test persistance
            await db_connection.close()
            db_connection = await aiosqlite.connect(db_filename)
            store = await WalletStore.create(db_connection)
            assert await store.get_coin_record(coin_1.name()) == record_1

            # Test set spent
            await store.set_spent(coin_1.name(), uint32(12))
            assert (await store.get_coin_record(coin_1.name())).spent
            assert (await store.get_coin_record(coin_1.name()
                                                )).spent_block_index == 12

            # No coins at height 3
            assert len(await store.get_unspent_coins_at_height(3)) == 0
            assert len(await store.get_unspent_coins_at_height(4)) == 1
            assert len(await store.get_unspent_coins_at_height(5)) == 4
            assert len(await store.get_unspent_coins_at_height(11)) == 3
            assert len(await store.get_unspent_coins_at_height(12)) == 2
            assert len(await store.get_unspent_coins_at_height(15)) == 1
            assert len(await store.get_unspent_coins_at_height(16)) == 1
            assert len(await store.get_unspent_coins_at_height()) == 1

            assert len(await store.get_unspent_coins_for_wallet(0)) == 1
            assert len(await store.get_unspent_coins_for_wallet(1)) == 0

            coin_5 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            record_5 = WalletCoinRecord(
                coin_5,
                uint32(5),
                uint32(15),
                False,
                False,
                WalletType.STANDARD_WALLET,
                1,
            )
            await store.add_coin_record(record_5)
            assert len(await store.get_unspent_coins_for_wallet(1)) == 1

            assert len(await store.get_spendable_for_index(100, 1)) == 1
            assert len(await store.get_spendable_for_index(100, 0)) == 1
            assert len(await store.get_spendable_for_index(0, 0)) == 0

            coin_6 = Coin(token_bytes(32), coin_4.puzzle_hash, uint64(12312))
            await store.add_coin_record(record_5)
            record_6 = WalletCoinRecord(
                coin_6,
                uint32(5),
                uint32(15),
                True,
                False,
                WalletType.STANDARD_WALLET,
                2,
            )
            await store.add_coin_record(record_6)
            assert (len(await store.get_coin_records_by_puzzle_hash(
                record_6.coin.puzzle_hash)) == 2)  # 4 and 6
            assert (len(await
                        store.get_coin_records_by_puzzle_hash(token_bytes(32)
                                                              )) == 0)

            assert await store.get_coin_record_by_coin_id(coin_6.name()
                                                          ) == record_6
            assert await store.get_coin_record_by_coin_id(token_bytes(32)
                                                          ) is None

            # BLOCKS
            assert len(await store.get_lca_path()) == 0

            # NOT lca block
            br_1 = BlockRecord(
                token_bytes(32),
                token_bytes(32),
                uint32(0),
                uint128(100),
                None,
                None,
                None,
                None,
                uint64(0),
            )
            assert await store.get_block_record(br_1.header_hash) is None
            await store.add_block_record(br_1, False)
            assert len(await store.get_lca_path()) == 0
            assert await store.get_block_record(br_1.header_hash) == br_1

            # LCA genesis
            await store.add_block_record(br_1, True)
            assert await store.get_block_record(br_1.header_hash) == br_1
            assert len(await store.get_lca_path()) == 1
            assert (await store.get_lca_path())[br_1.header_hash] == br_1

            br_2 = BlockRecord(
                token_bytes(32),
                token_bytes(32),
                uint32(1),
                uint128(100),
                None,
                None,
                None,
                None,
                uint64(0),
            )
            await store.add_block_record(br_2, False)
            assert len(await store.get_lca_path()) == 1
            await store.add_block_to_path(br_2.header_hash)
            assert len(await store.get_lca_path()) == 2
            assert (await store.get_lca_path())[br_2.header_hash] == br_2

            br_3 = BlockRecord(
                token_bytes(32),
                token_bytes(32),
                uint32(2),
                uint128(100),
                None,
                None,
                None,
                None,
                uint64(0),
            )
            await store.add_block_record(br_3, True)
            assert len(await store.get_lca_path()) == 3
            await store.remove_blocks_from_path(1)
            assert len(await store.get_lca_path()) == 2

            await store.rollback_lca_to_block(0)
            assert len(await store.get_unspent_coins_at_height()) == 0

            coin_7 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            coin_8 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            coin_9 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            coin_10 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
            record_7 = WalletCoinRecord(coin_7, uint32(0), uint32(1), True,
                                        False, WalletType.STANDARD_WALLET, 1)
            record_8 = WalletCoinRecord(coin_8, uint32(1), uint32(2), True,
                                        False, WalletType.STANDARD_WALLET, 1)
            record_9 = WalletCoinRecord(coin_9, uint32(2), uint32(3), True,
                                        False, WalletType.STANDARD_WALLET, 1)
            record_10 = WalletCoinRecord(
                coin_10,
                uint32(3),
                uint32(4),
                True,
                False,
                WalletType.STANDARD_WALLET,
                1,
            )

            await store.add_coin_record(record_7)
            await store.add_coin_record(record_8)
            await store.add_coin_record(record_9)
            await store.add_coin_record(record_10)
            assert len(await store.get_unspent_coins_at_height(0)) == 1
            assert len(await store.get_unspent_coins_at_height(1)) == 1
            assert len(await store.get_unspent_coins_at_height(2)) == 1
            assert len(await store.get_unspent_coins_at_height(3)) == 1
            assert len(await store.get_unspent_coins_at_height(4)) == 0

            await store.add_block_record(br_2, True)
            await store.add_block_record(br_3, True)

            await store.rollback_lca_to_block(1)

            assert len(await store.get_unspent_coins_at_height(0)) == 1
            assert len(await store.get_unspent_coins_at_height(1)) == 1
            assert len(await store.get_unspent_coins_at_height(2)) == 1
            assert len(await store.get_unspent_coins_at_height(3)) == 1
            assert len(await store.get_unspent_coins_at_height(4)) == 1

        except AssertionError:
            await db_connection.close()
            raise
        await db_connection.close()
    async def fetch_blocks_and_validate(self, peer: WSChiaConnection, sub_height_start: uint32, sub_height_end: uint32):
        if self.wallet_state_manager is None:
            return

        self.log.info(f"Requesting blocks {sub_height_start}-{sub_height_end}")
        request = RequestHeaderBlocks(uint32(sub_height_start), uint32(sub_height_end))
        res: Optional[RespondHeaderBlocks] = await peer.request_header_blocks(request)
        if res is None or not isinstance(res, RespondHeaderBlocks):
            raise ValueError("Peer returned no response")
        header_blocks: List[HeaderBlock] = res.header_blocks
        if header_blocks is None:
            raise ValueError(f"No response from peer {peer}")
        if (
            self.full_node_peer is not None
            and peer.peer_host == self.full_node_peer.host
            or peer.peer_host == "127.0.0.1"
        ):
            trusted = True
            pre_validation_results: Optional[List[PreValidationResult]] = None
        else:
            trusted = False
            pre_validation_results = await self.wallet_state_manager.blockchain.pre_validate_blocks_multiprocessing(
                header_blocks
            )
            if pre_validation_results is None:
                return False
            assert len(header_blocks) == len(pre_validation_results)

        for i in range(len(header_blocks)):
            header_block = header_blocks[i]
            if not trusted and pre_validation_results is not None and pre_validation_results[i].error is not None:
                raise ValidationError(Err(pre_validation_results[i].error))
            if header_block.is_block:
                # Find additions and removals
                (additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
                    header_block, header_block.transactions_filter
                )

                # Get Additions
                added_coins = await self.get_additions(peer, header_block, additions)
                if added_coins is None:
                    raise ValueError("Failed to fetch additions")

                # Get removals
                removed_coins = await self.get_removals(peer, header_block, added_coins, removals)
                if removed_coins is None:
                    raise ValueError("Failed to fetch removals")

                header_block_record = HeaderBlockRecord(header_block, added_coins, removed_coins)
            else:
                header_block_record = HeaderBlockRecord(header_block, [], [])

            if trusted:
                (
                    result,
                    error,
                    fork_h,
                ) = await self.wallet_state_manager.blockchain.receive_block(header_block_record, None, trusted)
            else:
                assert pre_validation_results is not None
                (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
                    header_block_record, pre_validation_results[i], trusted
                )
            if result == ReceiveBlockResult.NEW_PEAK:
                self.wallet_state_manager.state_changed("new_block")
            elif result == ReceiveBlockResult.INVALID_BLOCK:
                raise ValueError("Value error peer sent us invalid block")
示例#26
0
def check_keys(new_root):
    keychain: Keychain = Keychain()
    all_sks = keychain.get_all_private_keys()
    if len(all_sks) == 0:
        print(
            "No keys are present in the keychain. Generate them with 'chia keys generate'"
        )
        return

    config: Dict = load_config(new_root, "config.yaml")
    pool_child_pubkeys = [
        master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks
    ]
    all_targets = []
    stop_searching_for_farmer = "xch_target_puzzle_hash" not in config[
        "farmer"]
    stop_searching_for_pool = "xch_target_puzzle_hash" not in config["pool"]
    for i in range(500):
        if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
            break
        for sk, _ in all_sks:
            all_targets.append(
                create_puzzlehash_for_pk(
                    master_sk_to_wallet_sk(sk, uint32(i)).get_g1()).hex())
            if all_targets[-1] == config["farmer"].get(
                    "xch_target_puzzle_hash"):
                stop_searching_for_farmer = True
            if all_targets[-1] == config["pool"].get("xch_target_puzzle_hash"):
                stop_searching_for_pool = True

    # Set the destinations
    if "xch_target_puzzle_hash" not in config["farmer"]:
        print(
            f"Setting the xch destination address for coinbase fees reward to {all_targets[0]}"
        )
        config["farmer"]["xch_target_puzzle_hash"] = all_targets[0]
    elif config["farmer"]["xch_target_puzzle_hash"] not in all_targets:
        print(
            f"WARNING: farmer using a puzzle hash which we don't have the private"
            f" keys for. Overriding "
            f"{config['farmer']['xch_target_puzzle_hash']} with {all_targets[0]}"
        )
        config["farmer"]["xch_target_puzzle_hash"] = all_targets[0]

    if "pool" not in config:
        config["pool"] = {}
    if "xch_target_puzzle_hash" not in config["pool"]:
        print(
            f"Setting the xch destination address for coinbase reward to {all_targets[0]}"
        )
        config["pool"]["xch_target_puzzle_hash"] = all_targets[0]
    elif config["pool"]["xch_target_puzzle_hash"] not in all_targets:
        print(
            f"WARNING: pool using a puzzle hash which we don't have the private"
            f" keys for. Overriding "
            f"{config['pool']['xch_target_puzzle_hash']} with {all_targets[0]}"
        )
        config["pool"]["xch_target_puzzle_hash"] = all_targets[0]

    # Set the pool pks in the farmer
    pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys)
    if "pool_public_keys" in config["farmer"]:
        for pk_hex in config["farmer"]["pool_public_keys"]:
            # Add original ones in config
            pool_pubkeys_hex.add(pk_hex)

    config["farmer"]["pool_public_keys"] = pool_pubkeys_hex
    save_config(new_root, "config.yaml", config)
示例#27
0
def get_next_min_iters(
    constants: ConsensusConstants,
    headers: Dict[bytes32, Header],
    height_to_hash: Dict[uint32, bytes32],
    block: Union[FullBlock, HeaderBlock],
) -> uint64:
    """
    Returns the VDF speed in iterations per seconds, to be used for the next block. This depends on
    the number of iterations of the last epoch, and changes at the same block as the difficulty.
    """
    next_height: uint32 = uint32(block.height + 1)
    if next_height < constants.DIFFICULTY_EPOCH:
        # First epoch has a hardcoded vdf speed
        return constants.MIN_ITERS_STARTING

    prev_block_header: Header = headers[block.prev_header_hash]

    proof_of_space = block.proof_of_space
    difficulty = get_next_difficulty(constants, headers, height_to_hash,
                                     prev_block_header)
    iterations = uint64(block.header.data.total_iters -
                        prev_block_header.data.total_iters)
    prev_min_iters = calculate_min_iters_from_iterations(
        proof_of_space,
        difficulty,
        iterations,
        constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
    )

    if next_height % constants.DIFFICULTY_EPOCH != constants.DIFFICULTY_DELAY:
        # Not at a point where ips would change, so return the previous ips
        # TODO: cache this for efficiency
        return prev_min_iters

    # min iters (along with difficulty) will change in this block, so we need to calculate the new one.
    # The calculation is (iters_2 - iters_1) // epoch size
    # 1 and 2 correspond to height_1 and height_2, being the last block of the second to last, and last
    # block of the last epochs. Basically, it's total iterations per block on average.

    # Height1 is the last block 2 epochs ago, so we can include the iterations taken for mining first block in epoch
    height1 = uint32(next_height - constants.DIFFICULTY_EPOCH -
                     constants.DIFFICULTY_DELAY - 1)
    # Height2 is the last block in the previous epoch
    height2 = uint32(next_height - constants.DIFFICULTY_DELAY - 1)

    block1: Optional[Header] = None
    block2: Optional[Header] = None

    # We need to backtrack until we merge with the LCA chain, so we can use the height_to_hash dict.
    # This is important if we are on a fork, or beyond the LCA.
    curr: Optional[Header] = block.header
    assert curr is not None
    while (curr.height not in height_to_hash
           or height_to_hash[curr.height] != curr.header_hash):
        if curr.height == height1:
            block1 = curr
        elif curr.height == height2:
            block2 = curr
        curr = headers.get(curr.prev_header_hash, None)
        assert curr is not None

    # Once we are before the fork point (and before the LCA), we can use the height_to_hash map
    if block1 is None and height1 >= 0:
        # height1 could be -1, for the first difficulty calculation
        block1 = headers.get(height_to_hash[height1], None)
    if block2 is None:
        block2 = headers.get(height_to_hash[height2], None)
    assert block2 is not None

    if block1 is not None:
        iters1 = block1.data.total_iters
    else:
        # In the case of height == -1, iters = 0
        iters1 = uint64(0)

    iters2 = block2.data.total_iters

    min_iters_precise = uint64(
        (iters2 - iters1) //
        (constants.DIFFICULTY_EPOCH * constants.MIN_ITERS_PROPORTION))
    min_iters = uint64(
        truncate_to_significant_bits(min_iters_precise,
                                     constants.SIGNIFICANT_BITS))
    assert count_significant_bits(min_iters) <= constants.SIGNIFICANT_BITS
    return min_iters
示例#28
0
    async def _reconsider_peak(
            self, block_record: BlockRecord, genesis: bool,
            fork_point_with_peak: Optional[uint32]) -> Optional[uint32]:
        """
        When a new block is added, this is called, to check if the new block is the new peak of the chain.
        This also handles reorgs by reverting blocks which are not in the heaviest chain.
        It returns the height of the fork between the previous chain and the new chain, or returns
        None if there was no update to the heaviest chain.
        """
        peak = self.get_peak()
        if genesis:
            if peak is None:
                block: Optional[
                    FullBlock] = await self.block_store.get_full_block(
                        block_record.header_hash)
                assert block is not None

                # Begins a transaction, because we want to ensure that the coin store and block store are only updated
                # in sync.
                await self.block_store.begin_transaction()
                try:
                    await self.coin_store.new_block(block)
                    self.__height_to_hash[uint32(0)] = block.header_hash
                    self._peak_height = uint32(0)
                    await self.block_store.set_peak(block.header_hash)
                    await self.block_store.commit_transaction()
                except Exception:
                    await self.block_store.rollback_transaction()
                    raise
                return uint32(0)
            return None

        assert peak is not None
        if block_record.weight > peak.weight:
            # Find the fork. if the block is just being appended, it will return the peak
            # If no blocks in common, returns -1, and reverts all blocks
            if fork_point_with_peak is not None:
                fork_height: int = fork_point_with_peak
            else:
                fork_height = find_fork_point_in_chain(self, block_record,
                                                       peak)

            # Begins a transaction, because we want to ensure that the coin store and block store are only updated
            # in sync.
            await self.block_store.begin_transaction()
            try:
                # Rollback to fork
                await self.coin_store.rollback_to_block(fork_height)
                # Rollback sub_epoch_summaries
                heights_to_delete = []
                for ses_included_height in self.__sub_epoch_summaries.keys():
                    if ses_included_height > fork_height:
                        heights_to_delete.append(ses_included_height)
                for height in heights_to_delete:
                    log.info(f"delete ses at height {height}")
                    del self.__sub_epoch_summaries[height]

                if len(heights_to_delete) > 0:
                    # remove segments from prev fork
                    log.info(f"remove segments for se above {fork_height}")
                    await self.block_store.delete_sub_epoch_challenge_segments(
                        uint32(fork_height))

                # Collect all blocks from fork point to new peak
                blocks_to_add: List[Tuple[FullBlock, BlockRecord]] = []
                curr = block_record.header_hash

                while fork_height < 0 or curr != self.height_to_hash(
                        uint32(fork_height)):
                    fetched_full_block: Optional[
                        FullBlock] = await self.block_store.get_full_block(curr
                                                                           )
                    fetched_block_record: Optional[
                        BlockRecord] = await self.block_store.get_block_record(
                            curr)
                    assert fetched_full_block is not None
                    assert fetched_block_record is not None
                    blocks_to_add.append(
                        (fetched_full_block, fetched_block_record))
                    if fetched_full_block.height == 0:
                        # Doing a full reorg, starting at height 0
                        break
                    curr = fetched_block_record.prev_hash

                for fetched_full_block, fetched_block_record in reversed(
                        blocks_to_add):
                    self.__height_to_hash[
                        fetched_block_record.
                        height] = fetched_block_record.header_hash
                    if fetched_block_record.is_transaction_block:
                        await self.coin_store.new_block(fetched_full_block)
                    if fetched_block_record.sub_epoch_summary_included is not None:
                        self.__sub_epoch_summaries[
                            fetched_block_record.
                            height] = fetched_block_record.sub_epoch_summary_included

                # Changes the peak to be the new peak
                await self.block_store.set_peak(block_record.header_hash)
                self._peak_height = block_record.height
                await self.block_store.commit_transaction()
            except Exception:
                await self.block_store.rollback_transaction()
                raise

            return uint32(max(fork_height, 0))

        # This is not a heavier block than the heaviest we have seen, so we don't change the coin set
        return None
    async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet,
                              peer: WSChiaConnection):
        if self.wallet_state_manager is None:
            return

        curr_peak = self.wallet_state_manager.blockchain.get_peak()
        if curr_peak is not None and curr_peak.weight >= peak.weight:
            return
        if self.new_peak_lock is None:
            self.new_peak_lock = asyncio.Lock()
        async with self.new_peak_lock:
            request = wallet_protocol.RequestBlockHeader(peak.height)
            response: Optional[
                RespondBlockHeader] = await peer.request_block_header(request)

            if response is None or not isinstance(
                    response,
                    RespondBlockHeader) or response.header_block is None:
                return

            header_block = response.header_block

            if (curr_peak is None and header_block.height <
                    self.constants.WEIGHT_PROOF_RECENT_BLOCKS) or (
                        curr_peak is not None
                        and curr_peak.height > header_block.height - 200):
                top = header_block
                blocks = [top]
                # Fetch blocks backwards until we hit the one that we have,
                # then complete them with additions / removals going forward
                while not self.wallet_state_manager.blockchain.contains_block(
                        top.prev_header_hash) and top.height > 0:
                    request_prev = wallet_protocol.RequestBlockHeader(
                        top.height - 1)
                    response_prev: Optional[
                        RespondBlockHeader] = await peer.request_block_header(
                            request_prev)
                    if response_prev is None:
                        return
                    if not isinstance(response_prev, RespondBlockHeader):
                        return
                    prev_head = response_prev.header_block
                    blocks.append(prev_head)
                    top = prev_head
                blocks.reverse()
                await self.complete_blocks(blocks, peer)
            elif header_block.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
                # Request weight proof
                # Sync if PoW validates
                if self.wallet_state_manager.sync_mode:
                    return
                weight_request = RequestProofOfWeight(header_block.height,
                                                      header_block.header_hash)
                weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight(
                    weight_request, timeout=180)
                if weight_proof_response is None:
                    return
                weight_proof = weight_proof_response.wp
                if self.wallet_state_manager is None:
                    return
                valid, fork_point = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof(
                    weight_proof)
                if not valid:
                    self.log.error(
                        f"invalid weight proof, num of epochs {len(weight_proof.sub_epochs)}"
                        f" recent blocks num ,{len(weight_proof.recent_chain_data)}"
                    )
                    self.log.debug(f"{weight_proof}")
                    return None
                self.log.info(f"Validated, fork point is {fork_point}")
                self.wallet_state_manager.sync_store.add_potential_fork_point(
                    header_block.header_hash, uint32(fork_point))
                self.wallet_state_manager.sync_store.add_potential_peak(
                    header_block)
                self.start_sync()
        break

print("\n___________ HD PATH ____________")
while True:
    hd_path = input(
        "Enter the HD path in the form 'm/12381/8444/n/n', or enter Q to quit: "
    ).lower()
    if hd_path == "q":
        quit()
    verify = input(f"Is this correct path: {hd_path}? (y/n) ").lower()
    if verify == "y":
        break

k = Keychain()
private_keys = k.get_all_private_keys()
path: List[uint32] = [uint32(int(i)) for i in hd_path.split("/") if i != "m"]

# Derive HD key using path form input
for c in path:
    selected_key = AugSchemeMPL.derive_child_sk(selected_key, c)
print("Public key:", selected_key.get_g1())

# get file path
file_path = None
while True:
    file_path = input(
        "Enter the path where you want to save signed alert file, or q to quit: "
    )
    if file_path == "q" or file_path == "Q":
        quit()
    file_path = file_path.strip()