Exemplo n.º 1
0
    def run_test(self):
        peer = self.nodes[0].add_p2p_connection(P2PInterface())

        self.test_cltv_info(is_active=False)

        self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
        self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)]
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info("Test that invalid-according-to-CLTV transactions can still appear in a block")

        # create one invalid tx per CLTV failure reason (5 in total) and collect them
        invalid_ctlv_txs = []
        for i in range(5):
            spendtx = create_transaction(self.nodes[0], self.coinbase_txids[i],
                                         self.nodeaddress, amount=1.0)
            spendtx = cltv_invalidate(self.nodes[0], spendtx, i)
            spendtx.rehash()
            invalid_ctlv_txs.append(spendtx)

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1

        block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time)
        block.nVersion = 3
        block.vtx.extend(invalid_ctlv_txs)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        self.test_cltv_info(is_active=False)  # Not active as of current tip and next block does not need to obey rules
        peer.send_and_ping(msg_block(block))
        self.test_cltv_info(is_active=True)  # Not active as of current tip, but next block must obey rules
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 4")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
        block.nVersion = 3
        block.solve()
        with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]):
            peer.send_and_ping(msg_block(block))
            assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
            peer.sync_with_ping()

        self.log.info("Test that invalid-according-to-CLTV transactions cannot appear in a block")
        block.nVersion = 4
        block.vtx.append(CTransaction()) # dummy tx after coinbase that will be replaced later

        # create and test one invalid tx per CLTV failure reason (5 in total)
        for i in range(5):
            spendtx = create_transaction(self.nodes[0], self.coinbase_txids[10+i],
                                         self.nodeaddress, amount=1.0)
            spendtx = cltv_invalidate(self.nodes[0], spendtx, i)
            spendtx.rehash()

            expected_cltv_reject_reason = [
                "non-mandatory-script-verify-flag (Operation not valid with the current stack size)",
                "non-mandatory-script-verify-flag (Negative locktime)",
                "non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
                "non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
                "non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
            ][i]
            # First we show that this tx is valid except for CLTV by getting it
            # rejected from the mempool for exactly that reason.
            assert_equal(
                [{
                    'txid': spendtx.hash,
                    'wtxid': spendtx.getwtxid(),
                    'allowed': False,
                    'reject-reason': expected_cltv_reject_reason,
                }],
                self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
            )

            # Now we verify that a block with this transaction is also invalid.
            block.vtx[1] = spendtx
            block.hashMerkleRoot = block.calc_merkle_root()
            block.solve()

            with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with {}'.format(
                                                block.vtx[-1].hash, expected_cltv_reject_reason)]):
                peer.send_and_ping(msg_block(block))
                assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
                peer.sync_with_ping()

        self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
        spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
        spendtx.rehash()

        block.vtx.pop(1)
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        self.test_cltv_info(is_active=True)  # Not active as of current tip, but next block must obey rules
        peer.send_and_ping(msg_block(block))
        self.test_cltv_info(is_active=True)  # Active as of current tip
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
Exemplo n.º 2
0
    def run_test(self):
        self.nodes[0].add_p2p_connection(P2PInterface())

        self.log.info(
            'Check that txs from p2p are rejected and result in disconnect')
        prevtx = self.nodes[0].getblock(self.nodes[0].getblockhash(1),
                                        2)['tx'][0]
        rawtx = self.nodes[0].createrawtransaction(
            inputs=[{
                'txid': prevtx['txid'],
                'vout': 0
            }],
            outputs=[{
                self.nodes[0].get_deterministic_priv_key().address:
                50 - 0.00125
            }],
        )
        sigtx = self.nodes[0].signrawtransactionwithkey(
            hexstring=rawtx,
            privkeys=[self.nodes[0].get_deterministic_priv_key().key],
            prevtxs=[{
                'txid': prevtx['txid'],
                'vout': 0,
                'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'],
            }],
        )['hex']
        assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False)
        with self.nodes[0].assert_debug_log(
            ['transaction sent in violation of protocol peer=0']):
            self.nodes[0].p2p.send_message(
                msg_tx(FromHex(CTransaction(), sigtx)))
            self.nodes[0].p2p.wait_for_disconnect()
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)

        # Remove the disconnected peer and add a new one.
        del self.nodes[0].p2ps[0]
        self.nodes[0].add_p2p_connection(P2PInterface())

        self.log.info(
            'Check that txs from rpc are not rejected and relayed to other peers'
        )
        assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True)
        txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid']
        with self.nodes[0].assert_debug_log(
            ['received getdata for: wtx {} peer=1'.format(txid)]):
            self.nodes[0].sendrawtransaction(sigtx)
            self.nodes[0].p2p.wait_for_tx(txid)
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)

        self.log.info(
            'Check that txs from forcerelay peers are not rejected and relayed to others'
        )
        self.log.info(
            "Restarting node 0 with forcerelay permission and blocksonly")
        self.restart_node(0, [
            "-persistmempool=0", "-whitelist=127.0.0.1",
            "-whitelistforcerelay", "-blocksonly"
        ])
        assert_equal(self.nodes[0].getrawmempool(), [])
        first_peer = self.nodes[0].add_p2p_connection(P2PInterface())
        second_peer = self.nodes[0].add_p2p_connection(P2PInterface())
        peer_1_info = self.nodes[0].getpeerinfo()[0]
        assert_equal(peer_1_info['whitelisted'], True)
        assert_equal(peer_1_info['permissions'],
                     ['noban', 'forcerelay', 'relay', 'mempool', 'download'])
        peer_2_info = self.nodes[0].getpeerinfo()[1]
        assert_equal(peer_2_info['whitelisted'], True)
        assert_equal(peer_2_info['permissions'],
                     ['noban', 'forcerelay', 'relay', 'mempool', 'download'])
        assert_equal(self.nodes[0].testmempoolaccept([sigtx])[0]['allowed'],
                     True)
        txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid']

        self.log.info(
            'Check that the tx from forcerelay first_peer is relayed to others (ie.second_peer)'
        )
        with self.nodes[0].assert_debug_log(["received getdata"]):
            first_peer.send_message(msg_tx(FromHex(CTransaction(), sigtx)))
            self.log.info(
                'Check that the forcerelay peer is still connected after sending the transaction'
            )
            assert_equal(first_peer.is_connected, True)
            second_peer.wait_for_tx(txid)
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
        self.log.info("Forcerelay peer's transaction is accepted and relayed")
Exemplo n.º 3
0
    def run_test(self):
        self.log.info("Read headers data")
        self.headers_file_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), self.options.datafile)
        with open(self.headers_file_path, encoding='utf-8') as headers_data:
            h_lines = [l.strip() for l in headers_data.readlines()]

        # The headers data is taken from testnet3 for early blocks from genesis until the first checkpoint. There are
        # two headers with valid POW at height 1 and 2, forking off from genesis. They are indicated by the FORK_PREFIX.
        FORK_PREFIX = 'fork:'
        self.headers = [l for l in h_lines if not l.startswith(FORK_PREFIX)]
        self.headers_fork = [
            l[len(FORK_PREFIX):] for l in h_lines if l.startswith(FORK_PREFIX)
        ]

        self.headers = [FromHex(CBlockHeader(), h) for h in self.headers]
        self.headers_fork = [
            FromHex(CBlockHeader(), h) for h in self.headers_fork
        ]

        self.log.info(
            "Feed all non-fork headers, including and up to the first checkpoint"
        )
        peer_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface())
        peer_checkpoint.send_and_ping(msg_headers(self.headers))
        assert {
            'height': 546,
            'hash':
            '000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70',
            'branchlen': 546,
            'status': 'headers-only',
        } in self.nodes[0].getchaintips()

        self.log.info("Feed all fork headers (fails due to checkpoint)")
        with self.nodes[0].assert_debug_log(['bad-fork-prior-to-checkpoint']):
            peer_checkpoint.send_message(msg_headers(self.headers_fork))
            peer_checkpoint.wait_for_disconnect()

        self.log.info("Feed all fork headers (succeeds without checkpoint)")
        # On node 0 it succeeds because checkpoints are disabled
        self.restart_node(0, extra_args=['-nocheckpoints'])
        peer_no_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface())
        peer_no_checkpoint.send_and_ping(msg_headers(self.headers_fork))
        assert {
            "height": 2,
            "hash":
            "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39",
            "branchlen": 2,
            "status": "headers-only",
        } in self.nodes[0].getchaintips()

        # On node 1 it succeeds because no checkpoint has been reached yet by a chain tip
        peer_before_checkpoint = self.nodes[1].add_p2p_connection(
            P2PInterface())
        peer_before_checkpoint.send_and_ping(msg_headers(self.headers_fork))
        assert {
            "height": 2,
            "hash":
            "00000000b0494bd6c3d5ff79c497cfce40831871cbf39b1bc28bd1dac817dc39",
            "branchlen": 2,
            "status": "headers-only",
        } in self.nodes[1].getchaintips()
Exemplo n.º 4
0
 def test_service_flags(self):
     self.log.info("Test service flags")
     self.nodes[0].add_p2p_connection(P2PInterface(), services=(1 << 4) | (1 << 63))
     assert_equal(['UNKNOWN[2^4]', 'UNKNOWN[2^63]'], self.nodes[0].getpeerinfo()[-1]['servicesnames'])
     self.nodes[0].disconnect_p2ps()
Exemplo n.º 5
0
    def run_test(self):
        # Turn off node 1 while node 0 mines blocks to generate stakes,
        # so that we can later try starting node 1 with an orphan proof.
        self.stop_node(1)

        node = self.nodes[0]

        addrkey0 = node.get_deterministic_priv_key()
        blockhashes = node.generatetoaddress(100, addrkey0.address)

        self.log.info(
            "Make build a valid proof and restart the node to use it")
        privkey = ECKey()
        privkey.set(
            bytes.fromhex(
                "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747"
            ), True)

        def get_hex_pubkey(privkey):
            return privkey.get_pubkey().get_bytes().hex()

        proof_master = get_hex_pubkey(privkey)
        proof_sequence = 11
        proof_expiration = 12
        stakes = create_coinbase_stakes(node, [blockhashes[0]], addrkey0.key)
        proof = node.buildavalancheproof(proof_sequence, proof_expiration,
                                         proof_master, stakes)

        self.log.info("Test decodeavalancheproof RPC")
        proofobj = FromHex(AvalancheProof(), proof)
        decodedproof = node.decodeavalancheproof(proof)
        limited_id_hex = f"{proofobj.limited_proofid:0{64}x}"
        assert_equal(decodedproof["sequence"], proof_sequence)
        assert_equal(decodedproof["expiration"], proof_expiration)
        assert_equal(decodedproof["master"], proof_master)
        assert_equal(decodedproof["proofid"], f"{proofobj.proofid:0{64}x}")
        assert_equal(decodedproof["limitedid"], limited_id_hex)
        assert_equal(decodedproof["stakes"][0]["txid"], stakes[0]["txid"])
        assert_equal(decodedproof["stakes"][0]["vout"], stakes[0]["vout"])
        assert_equal(decodedproof["stakes"][0]["height"], stakes[0]["height"])
        assert_equal(decodedproof["stakes"][0]["iscoinbase"],
                     stakes[0]["iscoinbase"])
        assert_equal(decodedproof["stakes"][0]["signature"],
                     base64.b64encode(proofobj.stakes[0].sig).decode("ascii"))

        # Invalid hex (odd number of hex digits)
        assert_raises_rpc_error(-22, "Proof must be an hexadecimal string",
                                node.decodeavalancheproof, proof[:-1])
        # Valid hex but invalid proof
        assert_raises_rpc_error(-22, "Proof has invalid format",
                                node.decodeavalancheproof, proof[:-2])

        # Restart the node with this proof
        self.restart_node(
            0, self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
            ])

        self.log.info("The proof is registered at first chaintip update")
        assert_equal(len(node.getavalanchepeerinfo()), 0)
        node.generate(1)
        wait_until(lambda: len(node.getavalanchepeerinfo()) == 1, timeout=5)

        # This case will occur for users building proofs with a third party
        # tool and then starting a new node that is not yet aware of the
        # transactions used for stakes.
        self.log.info("Start a node with an orphan proof")

        self.start_node(
            1, self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
            ])
        # Mine a block to trigger an attempt at registering the proof
        self.nodes[1].generate(1)
        wait_for_proof(self.nodes[1],
                       f"{proofobj.proofid:0{64}x}",
                       expect_orphan=True)

        self.log.info("Connect to an up-to-date node to unorphan the proof")
        connect_nodes(self.nodes[1], node)
        self.sync_all()
        wait_for_proof(self.nodes[1],
                       f"{proofobj.proofid:0{64}x}",
                       expect_orphan=False)

        self.log.info("Generate delegations for the proof")

        # Stack up a few delegation levels
        def gen_privkey():
            pk = ECKey()
            pk.generate()
            return pk

        delegator_privkey = privkey
        delegation = None
        for _ in range(10):
            delegated_privkey = gen_privkey()
            delegation = node.delegateavalancheproof(
                limited_id_hex,
                bytes_to_wif(delegator_privkey.get_bytes()),
                get_hex_pubkey(delegated_privkey),
                delegation,
            )
            delegator_privkey = delegated_privkey

        random_privkey = gen_privkey()
        random_pubkey = get_hex_pubkey(random_privkey)

        # Invalid proof
        no_stake = node.buildavalancheproof(proof_sequence, proof_expiration,
                                            proof_master, [])

        # Invalid privkey
        assert_raises_rpc_error(
            -5,
            "The private key is invalid",
            node.delegateavalancheproof,
            limited_id_hex,
            bytes_to_wif(bytes(32)),
            random_pubkey,
        )

        # Invalid delegation
        bad_dg = AvalancheDelegation()
        assert_raises_rpc_error(
            -8,
            "The delegation does not match the proof",
            node.delegateavalancheproof,
            limited_id_hex,
            bytes_to_wif(privkey.get_bytes()),
            random_pubkey,
            bad_dg.serialize().hex(),
        )

        # Still invalid, but with a matching proofid
        bad_dg.limited_proofid = proofobj.limited_proofid
        bad_dg.proof_master = proofobj.master
        bad_dg.levels = [AvalancheDelegationLevel()]
        assert_raises_rpc_error(
            -8,
            "The delegation is invalid",
            node.delegateavalancheproof,
            limited_id_hex,
            bytes_to_wif(privkey.get_bytes()),
            random_pubkey,
            bad_dg.serialize().hex(),
        )

        # Wrong privkey, match the proof but does not match the delegation
        assert_raises_rpc_error(
            -5,
            "The private key does not match the delegation",
            node.delegateavalancheproof,
            limited_id_hex,
            bytes_to_wif(privkey.get_bytes()),
            random_pubkey,
            delegation,
        )

        # Delegation not hex
        assert_raises_rpc_error(
            -22,
            "Delegation must be an hexadecimal string.",
            node.delegateavalancheproof,
            limited_id_hex,
            bytes_to_wif(privkey.get_bytes()),
            random_pubkey,
            "f00",
        )
        # Delegation is hex but ill-formed
        assert_raises_rpc_error(
            -22,
            "Delegation has invalid format",
            node.delegateavalancheproof,
            limited_id_hex,
            bytes_to_wif(privkey.get_bytes()),
            random_pubkey,
            "dead",
        )

        # Test invalid proofs
        dust = node.buildavalancheproof(
            proof_sequence, proof_expiration, proof_master,
            create_coinbase_stakes(node, [blockhashes[0]],
                                   addrkey0.key,
                                   amount="0"))

        dust_amount = Decimal(f"{PROOF_DUST_THRESHOLD * 0.9999:.4f}")
        dust2 = node.buildavalancheproof(
            proof_sequence, proof_expiration, proof_master,
            create_coinbase_stakes(node, [blockhashes[0]],
                                   addrkey0.key,
                                   amount=str(dust_amount)))

        duplicate_stake = node.buildavalancheproof(
            proof_sequence, proof_expiration, proof_master,
            create_coinbase_stakes(node, [blockhashes[0]] * 2, addrkey0.key))

        missing_stake = node.buildavalancheproof(
            proof_sequence, proof_expiration, proof_master,
            [{
                'txid': '0' * 64,
                'vout': 0,
                'amount': 10000000,
                'height': 42,
                'iscoinbase': False,
                'privatekey': addrkey0.key,
            }])

        bad_sig = (
            "0b000000000000000c0000000000000021030b4c866585dd868a9d62348"
            "a9cd008d6a312937048fff31670e7e920cfc7a7440105c5f72f5d6da3085"
            "583e75ee79340eb4eff208c89988e7ed0efb30b87298fa30000000000f20"
            "52a0100000003000000210227d85ba011276cf25b51df6a188b75e604b3"
            "8770a462b2d0e9fb2fc839ef5d3faf07f001dd38e9b4a43d07d5d449cc0"
            "f7d2888d96b82962b3ce516d1083c0e031773487fc3c4f2e38acd1db974"
            "1321b91a79b82d1c2cfd47793261e4ba003cf5")

        self.log.info(
            "Check the verifyavalancheproof and sendavalancheproof RPCs")

        if self.is_wallet_compiled():
            self.log.info(
                "Check a proof with the maximum number of UTXO is valid")
            new_blocks = node.generate(AVALANCHE_MAX_PROOF_STAKES // 10 + 1)
            # confirm the coinbase UTXOs
            node.generate(101)
            too_many_stakes = create_stakes(node, new_blocks,
                                            AVALANCHE_MAX_PROOF_STAKES + 1)
            maximum_stakes = too_many_stakes[:-1]

            good_proof = node.buildavalancheproof(proof_sequence,
                                                  proof_expiration,
                                                  proof_master, maximum_stakes)

            too_many_utxos = node.buildavalancheproof(proof_sequence,
                                                      proof_expiration,
                                                      proof_master,
                                                      too_many_stakes)

            assert node.verifyavalancheproof(good_proof)

        for rpc in [node.verifyavalancheproof, node.sendavalancheproof]:
            assert_raises_rpc_error(-22, "Proof must be an hexadecimal string",
                                    rpc, "f00")
            assert_raises_rpc_error(-22, "Proof has invalid format", rpc,
                                    "f00d")

            def check_rpc_failure(proof, message):
                assert_raises_rpc_error(-8, "The proof is invalid: " + message,
                                        rpc, proof)

            check_rpc_failure(no_stake, "no-stake")
            check_rpc_failure(dust, "amount-below-dust-threshold")
            check_rpc_failure(duplicate_stake, "duplicated-stake")
            check_rpc_failure(missing_stake, "utxo-missing-or-spent")
            check_rpc_failure(bad_sig, "invalid-signature")
            if self.is_wallet_compiled():
                check_rpc_failure(too_many_utxos, "too-many-utxos")

        conflicting_utxo = node.buildavalancheproof(proof_sequence + 1,
                                                    proof_expiration,
                                                    proof_master, stakes)
        assert_raises_rpc_error(
            -8, "The proof has conflicting utxo with an existing proof",
            node.sendavalancheproof, conflicting_utxo)

        # Good proof
        assert node.verifyavalancheproof(proof)

        peer = node.add_p2p_connection(P2PInterface())

        proofid = FromHex(AvalancheProof(), proof).proofid
        node.sendavalancheproof(proof)
        assert proofid in get_proof_ids(node)

        def inv_found():
            with p2p_lock:
                return peer.last_message.get(
                    "inv") and peer.last_message["inv"].inv[-1].hash == proofid

        wait_until(inv_found)

        self.log.info("Check the getrawproof RPC")

        raw_proof = node.getrawavalancheproof("{:064x}".format(proofid))
        assert_equal(raw_proof['proof'], proof)
        assert_equal(raw_proof['orphan'], False)

        assert_raises_rpc_error(-8, "Proof not found",
                                node.getrawavalancheproof, '0' * 64)

        # Orphan the proof by sending the stake
        raw_tx = node.createrawtransaction([{
            "txid": stakes[-1]["txid"],
            "vout": 0
        }], {
            ADDRESS_BCHREG_UNSPENDABLE:
            stakes[-1]["amount"] - Decimal('10000')
        })
        signed_tx = node.signrawtransactionwithkey(raw_tx, [addrkey0.key])
        node.sendrawtransaction(signed_tx["hex"])
        node.generate(1)
        wait_until(lambda: proofid not in get_proof_ids(node))

        raw_proof = node.getrawavalancheproof("{:064x}".format(proofid))
        assert_equal(raw_proof['proof'], proof)
        assert_equal(raw_proof['orphan'], True)

        self.log.info("Bad proof should be rejected at startup")

        self.stop_node(0)

        node.assert_start_raises_init_error(
            self.extra_args[0] + [
                "-avasessionkey=0",
            ],
            expected_msg="Error: The avalanche session key is invalid.",
        )

        node.assert_start_raises_init_error(
            self.extra_args[0] + [
                "-avaproof={}".format(proof),
            ],
            expected_msg=
            "Error: The avalanche master key is missing for the avalanche proof.",
        )

        node.assert_start_raises_init_error(
            self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey=0",
            ],
            expected_msg="Error: The avalanche master key is invalid.",
        )

        def check_proof_init_error(proof, message):
            node.assert_start_raises_init_error(
                self.extra_args[0] + [
                    "-avaproof={}".format(proof),
                    "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
                ],
                expected_msg="Error: " + message,
            )

        check_proof_init_error(no_stake, "The avalanche proof has no stake.")
        check_proof_init_error(dust, "The avalanche proof stake is too low.")
        check_proof_init_error(dust2, "The avalanche proof stake is too low.")
        check_proof_init_error(duplicate_stake,
                               "The avalanche proof has duplicated stake.")
        check_proof_init_error(
            bad_sig, "The avalanche proof has invalid stake signatures.")
        if self.is_wallet_compiled():
            # The too many utxos case creates a proof which is that large that it
            # cannot fit on the command line
            append_config(node.datadir, ["avaproof={}".format(too_many_utxos)])
            node.assert_start_raises_init_error(
                self.extra_args[0] + [
                    "-avamasterkey=cND2ZvtabDbJ1gucx9GWH6XT9kgTAqfb6cotPt5Q5CyxVDhid2EN",
                ],
                expected_msg="Error: The avalanche proof has too many utxos.",
                match=ErrorMatch.PARTIAL_REGEX,
            )

        # Master private key mismatch
        random_privkey = ECKey()
        random_privkey.generate()
        node.assert_start_raises_init_error(
            self.extra_args[0] + [
                "-avaproof={}".format(proof),
                "-avamasterkey={}".format(
                    bytes_to_wif(random_privkey.get_bytes())),
            ],
            expected_msg=
            "Error: The master key does not match the proof public key.",
        )

        self.log.info("Bad delegation should be rejected at startup")

        def check_delegation_init_error(delegation, message):
            node.assert_start_raises_init_error(
                self.extra_args[0] + [
                    "-avadelegation={}".format(delegation),
                    "-avaproof={}".format(proof),
                    "-avamasterkey={}".format(
                        bytes_to_wif(delegated_privkey.get_bytes())),
                ],
                expected_msg="Error: " + message,
            )

        check_delegation_init_error(
            AvalancheDelegation().serialize().hex(),
            "The delegation does not match the proof.")

        bad_level_sig = FromHex(AvalancheDelegation(), delegation)
        # Tweak some key to cause the signature to mismatch
        bad_level_sig.levels[-2].pubkey = bytes.fromhex(proof_master)
        check_delegation_init_error(
            bad_level_sig.serialize().hex(),
            "The avalanche delegation has invalid signatures.")

        node.assert_start_raises_init_error(
            self.extra_args[0] + [
                "-avadelegation={}".format(delegation),
                "-avaproof={}".format(proof),
                "-avamasterkey={}".format(
                    bytes_to_wif(random_privkey.get_bytes())),
            ],
            expected_msg=
            "Error: The master key does not match the delegation public key.",
        )
Exemplo n.º 6
0
    def run_test(self):
        # create a p2p receiver
        dspReceiver = P2PInterface()
        self.nodes[0].add_p2p_connection(dspReceiver)
        # workaround - nodes think they're in IBD unless one block is mined
        self.generate(self.nodes[0], 1)
        self.sync_all()
        # Disconnect the third node, will be used later for triple-spend
        disconnect_nodes(self.nodes[1], self.nodes[2])
        # Put fourth node (the non-dsproof-enabled node) with the connected group
        # (we will check its log at the end to ensure it ignored dsproof inv's)
        non_dsproof_node = self.nodes[3]
        disconnect_nodes(self.nodes[2], non_dsproof_node)
        connect_nodes(self.nodes[1], non_dsproof_node)

        # Create and mine a regular non-coinbase transaction for spending
        fundingtxid = self.nodes[0].getblock(
            self.nodes[0].getblockhash(1))['tx'][0]
        fundingtx = FromHex(CTransaction(),
                            self.nodes[0].getrawtransaction(fundingtxid))

        # Create three conflicting transactions. They are only signed, but not yet submitted to the mempool
        firstDSTx = create_raw_transaction(self.nodes[0], fundingtxid,
                                           self.nodes[0].getnewaddress(),
                                           49.95)
        secondDSTx = create_raw_transaction(self.nodes[0], fundingtxid,
                                            self.nodes[0].getnewaddress(),
                                            49.95)
        thirdDSTx = create_raw_transaction(self.nodes[0], fundingtxid,
                                           self.nodes[0].getnewaddress(),
                                           49.95)

        # Send the two conflicting transactions to the network
        # Submit to two different nodes, because a node would simply reject
        # a double spend submitted through RPC
        firstDSTxId = self.nodes[0].sendrawtransaction(firstDSTx)
        self.nodes[1].call_rpc('sendrawtransaction',
                               secondDSTx,
                               ignore_error='txn-mempool-conflict')
        wait_until(lambda: dspReceiver.message_count["dsproof-beta"] == 1,
                   lock=p2p_lock,
                   timeout=25)

        # 1. The DSP message is well-formed and contains all fields
        # If the message arrived and was deserialized successfully, then 1. is satisfied
        dsp = dspReceiver.last_message["dsproof-beta"].dsproof
        dsps = set()
        dsps.add(dsp.serialize())

        # Check that it is valid, both spends are signed with the same key
        # NB: pushData is made of the sig + one last byte for hashtype
        pubkey = self.getpubkey()
        sighash1 = getSighashes(dsp.getPrevOutput(), dsp.spender1, fundingtx)
        sighash2 = getSighashes(dsp.getPrevOutput(), dsp.spender2, fundingtx)
        assert (pubkey.verify_ecdsa(dsp.spender1.pushData[0][:-1], sighash1))
        assert (pubkey.verify_ecdsa(dsp.spender2.pushData[0][:-1], sighash2))

        # 2. For p2pkh these is exactly one pushdata per spender
        assert_equal(1, len(dsp.spender1.pushData))
        assert_equal(1, len(dsp.spender2.pushData))

        # 3. The two spenders are different, specifically the signature (push data) has to be different.
        assert (dsp.spender1.pushData != dsp.spender2.pushData)

        # 4. The first & double spenders are sorted with two hashes as keys.
        assert (dsp.spender1.hashOutputs < dsp.spender2.hashOutputs)

        # 5. The double spent output is still available in the UTXO database,
        #    implying no spending transaction has been mined.
        assert_equal(
            self.nodes[0].gettransaction(firstDSTxId)["confirmations"], 0)

        # The original fundingtx is the same as the transaction being spent reported by the DSP
        assert_equal(hex(dsp.prevTxId)[2:], fundingtxid)
        assert_equal(dsp.prevOutIndex, 0)

        # 6. No other valid proof is known.
        #    IE if a valid proof is known, no new proofs will be constructed
        #    We submit a _triple_ spend transaction to the third node
        connect_nodes(self.nodes[0], self.nodes[2])
        self.nodes[2].call_rpc('sendrawtransaction',
                               thirdDSTx,
                               ignore_error='txn-mempool-conflict')
        #    Await for a new dsp to be relayed to the node
        #    if such a dsp (or the double or triple spending tx) arrives, the test fails
        assert_raises(AssertionError,
                      wait_until,
                      lambda: dspReceiver.message_count["dsproof-beta"] == 2 or
                      dspReceiver.message_count["tx"] == 2,
                      lock=p2p_lock,
                      timeout=5)

        # Only P2PKH inputs are protected
        # Check that a non-P2PKH output is not protected
        self.generate(self.nodes[0], 1)
        fundingtxid = self.nodes[0].getblock(
            self.nodes[0].getblockhash(2))['tx'][0]
        fundingtx = FromHex(CTransaction(),
                            self.nodes[0].getrawtransaction(fundingtxid))
        fundingtx.rehash()
        nonP2PKHTx = create_tx_with_script(fundingtx, 0,
                                           b'', int(49.95 * COIN),
                                           CScript([OP_TRUE]))
        signedNonP2PKHTx = self.nodes[0].signrawtransactionwithwallet(
            ToHex(nonP2PKHTx))
        self.nodes[0].sendrawtransaction(signedNonP2PKHTx['hex'])
        self.sync_all()

        tx = FromHex(CTransaction(), signedNonP2PKHTx['hex'])
        tx.rehash()

        firstDSTx = create_tx_with_script(tx, 0, b'', int(49.90 * COIN),
                                          CScript([OP_TRUE]))
        secondDSTx = create_tx_with_script(tx, 0, b'', int(49.90 * COIN),
                                           CScript([OP_FALSE]))

        self.nodes[0].sendrawtransaction(ToHex(firstDSTx))
        self.nodes[1].call_rpc('sendrawtransaction',
                               ToHex(secondDSTx),
                               ignore_error='txn-mempool-conflict')

        assert_raises(AssertionError,
                      wait_until,
                      lambda: dspReceiver.message_count["dsproof-beta"] == 2,
                      lock=p2p_lock,
                      timeout=5)

        # Check that unconfirmed outputs are also protected
        self.generate(self.nodes[0], 1)
        unconfirmedtx = self.nodes[0].sendtoaddress(
            self.nodes[0].getnewaddress(), 25)
        self.sync_all()

        firstDSTx = create_raw_transaction(self.nodes[0], unconfirmedtx,
                                           self.nodes[0].getnewaddress(), 24.9)
        secondDSTx = create_raw_transaction(self.nodes[0], unconfirmedtx,
                                            self.nodes[0].getnewaddress(),
                                            24.9)

        self.nodes[0].sendrawtransaction(firstDSTx)
        self.nodes[1].call_rpc('sendrawtransaction',
                               secondDSTx,
                               ignore_error='txn-mempool-conflict')

        wait_until(lambda: dspReceiver.message_count["dsproof-beta"] == 2,
                   lock=p2p_lock,
                   timeout=5)
        dsp2 = dspReceiver.last_message["dsproof-beta"].dsproof
        dsps.add(dsp2.serialize())
        assert (len(dsps) == 2)

        # Check that a double spent tx, which has some non-P2PKH inputs
        # in its ancestor, still results in a dsproof being emitted.
        self.generate(self.nodes[0], 1)
        # Create a 1-of-2 multisig address which will be an in-mempool
        # ancestor to a double-spent tx
        pubkey0 = self.nodes[0].getaddressinfo(
            self.nodes[0].getnewaddress())['pubkey']
        pubkey1 = self.nodes[1].getaddressinfo(
            self.nodes[1].getnewaddress())['pubkey']
        p2sh = self.nodes[0].addmultisigaddress(1, [pubkey0, pubkey1],
                                                "")['address']
        # Fund the p2sh address
        fundingtxid = self.nodes[0].sendtoaddress(p2sh, 49)
        vout = find_output(self.nodes[0], fundingtxid, Decimal('49'))
        self.sync_all()

        # Spend from the P2SH to a P2PKH, which we will double spend from
        # in the next step.
        p2pkh1 = self.nodes[0].getnewaddress()
        rawtx1 = create_raw_transaction(self.nodes[0], fundingtxid, p2pkh1,
                                        48.999, vout)
        signed_tx1 = self.nodes[0].signrawtransactionwithwallet(rawtx1)
        txid1 = self.nodes[0].sendrawtransaction(signed_tx1['hex'])
        vout1 = find_output(self.nodes[0], txid1, Decimal('48.999'))
        self.sync_all()

        # Now double spend the P2PKH which has a P2SH ancestor.
        firstDSTx = create_raw_transaction(self.nodes[0], txid1,
                                           self.nodes[0].getnewaddress(), 48.9,
                                           vout1)
        secondDSTx = create_raw_transaction(self.nodes[0], txid1,
                                            self.nodes[1].getnewaddress(),
                                            48.9, vout1)
        self.nodes[0].sendrawtransaction(firstDSTx)
        self.nodes[1].call_rpc('sendrawtransaction',
                               secondDSTx,
                               ignore_error='txn-mempool-conflict')

        # We still get a dsproof, showing that not all ancestors have
        # to be P2PKH.
        wait_until(lambda: dspReceiver.message_count["dsproof-beta"] == 3,
                   lock=p2p_lock,
                   timeout=5)
        dsp3 = dspReceiver.last_message["dsproof-beta"].dsproof
        dsps.add(dsp3.serialize())
        assert (len(dsps) == 3)

        # Check that a double spent tx, which has some unconfirmed ANYONECANPAY
        # transactions in its ancestry, still results in a dsproof being emitted.
        self.generate(self.nodes[0], 1)
        fundingtxid = self.nodes[0].getblock(
            self.nodes[0].getblockhash(5))['tx'][0]
        vout1 = find_output(self.nodes[0], fundingtxid, Decimal('50'))
        addr = self.nodes[1].getnewaddress()
        pubkey = self.nodes[1].getaddressinfo(addr)['pubkey']
        inputs = [{
            'txid': fundingtxid,
            'vout': vout1,
            'amount': 49.99,
            'scriptPubKey': pubkey
        }]
        outputs = {addr: 49.99}
        rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
        signed = self.nodes[0].signrawtransactionwithwallet(
            rawtx, None, "NONE|FORKID|ANYONECANPAY")
        assert 'complete' in signed
        assert_equal(signed['complete'], True)
        assert 'errors' not in signed
        txid = self.nodes[0].sendrawtransaction(signed['hex'])
        self.sync_all()
        # The ANYONECANPAY is still unconfirmed, but let's create some
        # double spends from it.
        vout2 = find_output(self.nodes[0], txid, Decimal('49.99'))
        firstDSTx = create_raw_transaction(self.nodes[1], txid,
                                           self.nodes[0].getnewaddress(),
                                           49.98, vout2)
        secondDSTx = create_raw_transaction(self.nodes[1], txid,
                                            self.nodes[1].getnewaddress(),
                                            49.98, vout2)
        self.nodes[0].sendrawtransaction(firstDSTx)
        self.nodes[1].call_rpc('sendrawtransaction',
                               secondDSTx,
                               ignore_error='txn-mempool-conflict')
        # We get a dsproof.
        wait_until(lambda: dspReceiver.message_count["dsproof-beta"] == 4,
                   lock=p2p_lock,
                   timeout=5)
        dsp4 = dspReceiver.last_message["dsproof-beta"].dsproof
        dsps.add(dsp4.serialize())
        assert (len(dsps) == 4)

        # Create a P2SH to double-spend directly (1-of-1 multisig)
        self.generate(self.nodes[0], 1)
        self.sync_all()
        pubkey2 = self.nodes[0].getaddressinfo(
            self.nodes[0].getnewaddress())['pubkey']
        p2sh = self.nodes[0].addmultisigaddress(1, [
            pubkey2,
        ], "")['address']
        fundingtxid = self.nodes[0].sendtoaddress(p2sh, 49)
        vout = find_output(self.nodes[0], fundingtxid, Decimal('49'))
        self.sync_all()
        # Now double spend it
        firstDSTx = create_raw_transaction(self.nodes[0], fundingtxid,
                                           self.nodes[0].getnewaddress(), 48.9,
                                           vout)
        secondDSTx = create_raw_transaction(self.nodes[0], fundingtxid,
                                            self.nodes[1].getnewaddress(),
                                            48.9, vout)
        self.nodes[0].sendrawtransaction(firstDSTx)
        self.nodes[1].call_rpc('sendrawtransaction',
                               secondDSTx,
                               ignore_error='txn-mempool-conflict')
        # No dsproof is generated.
        assert_raises(AssertionError,
                      wait_until,
                      lambda: dspReceiver.message_count["dsproof-beta"] == 5,
                      lock=p2p_lock,
                      timeout=5)

        # Check end conditions - still only 4 DSPs
        last_dsp = dspReceiver.last_message["dsproof-beta"].dsproof
        dsps.add(last_dsp.serialize())
        assert (len(dsps) == 4)

        # Next, test that submitting a double-spend via the RPC interface also results in a broadcasted
        # dsproof
        self.generate(self.nodes[0], 1)
        self.sync_all()
        fundingtxid = self.nodes[0].getblock(
            self.nodes[0].getblockhash(6))['tx'][0]
        # Create 2 new double-spends
        firstDSTx = create_raw_transaction(self.nodes[0], fundingtxid,
                                           self.nodes[0].getnewaddress(),
                                           49.95)
        secondDSTx = create_raw_transaction(self.nodes[0], fundingtxid,
                                            self.nodes[0].getnewaddress(),
                                            49.95)

        # Send the two conflicting transactions to the same node via RPC
        assert_equal(dspReceiver.message_count["dsproof-beta"], 4)
        firstDSTxId = self.nodes[0].sendrawtransaction(firstDSTx)
        # send second tx to same node via RPC
        # -- it's normal for it to reject the tx, but it should still generate a dsproof broadcast
        assert_raises_rpc_error(-26, "txn-mempool-conflict (code 18)",
                                self.nodes[0].sendrawtransaction, secondDSTx)
        wait_until(lambda: dspReceiver.message_count["dsproof-beta"] == 5,
                   lock=p2p_lock,
                   timeout=5)

        # Ensure that the non-dsproof node has the messages we expect in its log
        # (this checks that dsproof was disabled for this node)
        debug_log = os.path.join(non_dsproof_node.datadir, self.chain,
                                 'debug.log')
        dsp_inv_ctr = 0
        with open(debug_log, encoding='utf-8') as dl:
            for line in dl.readlines():
                if "Got DSProof INV" in line:
                    # Ensure that if this node did see a dsproof inv, it explicitly ignored it
                    assert "(ignored, -doublespendproof=0)" in line
                    dsp_inv_ctr += 1
                else:
                    # Ensure this node is not processing dsproof messages and not requesting them via getdata
                    assert ("received: dsproof-beta" not in line
                            and "Good DSP" not in line
                            and "DSP broadcasting" not in line
                            and "bad-dsproof" not in line)
        # We expect it to have received at least some DSP inv broadcasts
        assert_greater_than(dsp_inv_ctr, 0)

        # Finally test that restarting the node persists the dsproof
        # - ensure nodes 0 & 1 saw the dsp
        assert self.nodes[0].getdsproof(firstDSTxId) is not None
        wait_until(lambda: self.nodes[1].getdsproof(firstDSTxId) is not None,
                   timeout=10)

        # Node 1 will have a deleted dsproofs.dat, check that we get the error message we expect
        # and that the proof won't come back.
        with self.nodes[1].assert_debug_log(expected_msgs=[
                'Imported mempool transactions from disk',
                'Failed to open dsproofs file on disk. Continuing anyway.'
        ],
                                            timeout=60):
            dsproofs_dat = os.path.join(self.nodes[1].datadir, self.chain,
                                        'dsproofs.dat')
            self.stop_node(1)
            # remove the dsproofs.dat file to keep the proof from coming back
            os.remove(dsproofs_dat)
            self.start_node(1, self.extra_args[1])
        # Node 1 has no dsproofs.dat it should forget the dsproof after a restart.
        assert self.nodes[1].getdsproof(firstDSTxId) is None

        # Node 0 -- default behavior; it should still see the dsproof for the txid after a restart.
        # Wait for the debug log to say that the expected number of dsproofs were loaded without error
        with self.nodes[0].assert_debug_log(
                expected_msgs=['Imported dsproofs from disk: 5'], timeout=60):
            self.restart_node(0, self.extra_args[0])
        wait_until(
            lambda: self.nodes[0].getdsproof(firstDSTxId) is not None,
            timeout=60,
        )
Exemplo n.º 7
0
    def run_test(self):
        protected_peers = set(
        )  # peers that we expect to be protected from eviction
        current_peer = -1
        node = self.nodes[0]
        node.generatetoaddress(101, node.get_deterministic_priv_key().address)

        self.log.info(
            "Create 4 peers and protect them from eviction by sending us a block"
        )
        for _ in range(4):
            block_peer = node.add_p2p_connection(SlowP2PDataStore())
            current_peer += 1
            block_peer.sync_with_ping()
            best_block = node.getbestblockhash()
            tip = int(best_block, 16)
            best_block_time = node.getblock(best_block)['time']
            block = create_block(tip,
                                 create_coinbase(node.getblockcount() + 1),
                                 best_block_time + 1)
            block.solve()
            block_peer.send_blocks_and_test([block], node, success=True)
            protected_peers.add(current_peer)

        self.log.info(
            "Create 5 slow-pinging peers, making them eviction candidates")
        for _ in range(5):
            node.add_p2p_connection(SlowP2PInterface())
            current_peer += 1

        self.log.info(
            "Create 4 peers and protect them from eviction by sending us a tx")
        for i in range(4):
            txpeer = node.add_p2p_connection(SlowP2PInterface())
            current_peer += 1
            txpeer.sync_with_ping()

            prevtx = node.getblock(node.getblockhash(i + 1), 2)['tx'][0]
            rawtx = node.createrawtransaction(
                inputs=[{
                    'txid': prevtx['txid'],
                    'vout': 0
                }],
                outputs=[{
                    node.get_deterministic_priv_key().address:
                    50 - 0.00125
                }],
            )
            sigtx = node.signrawtransactionwithkey(
                hexstring=rawtx,
                privkeys=[node.get_deterministic_priv_key().key],
                prevtxs=[{
                    'txid':
                    prevtx['txid'],
                    'vout':
                    0,
                    'scriptPubKey':
                    prevtx['vout'][0]['scriptPubKey']['hex'],
                }],
            )['hex']
            txpeer.send_message(msg_tx(FromHex(CTransaction(), sigtx)))
            protected_peers.add(current_peer)

        self.log.info(
            "Create 8 peers and protect them from eviction by having faster pings"
        )
        for _ in range(8):
            fastpeer = node.add_p2p_connection(P2PInterface())
            current_peer += 1
            self.wait_until(lambda: "ping" in fastpeer.last_message,
                            timeout=10)

        # Make sure by asking the node what the actual min pings are
        peerinfo = node.getpeerinfo()
        pings = {}
        for i in range(len(peerinfo)):
            pings[i] = peerinfo[i]['minping'] if 'minping' in peerinfo[
                i] else 1000000
        sorted_pings = sorted(pings.items(), key=lambda x: x[1])

        # Usually the 8 fast peers are protected. In rare case of unreliable pings,
        # one of the slower peers might have a faster min ping though.
        for i in range(8):
            protected_peers.add(sorted_pings[i][0])

        self.log.info("Create peer that triggers the eviction mechanism")
        node.add_p2p_connection(SlowP2PInterface())

        # One of the non-protected peers must be evicted. We can't be sure which one because
        # 4 peers are protected via netgroup, which is identical for all peers,
        # and the eviction mechanism doesn't preserve the order of identical elements.
        evicted_peers = []
        for i in range(len(node.p2ps)):
            if not node.p2ps[i].is_connected:
                evicted_peers.append(i)

        self.log.info("Test that one peer was evicted")
        self.log.debug("{} evicted peer: {}".format(len(evicted_peers),
                                                    set(evicted_peers)))
        assert_equal(len(evicted_peers), 1)

        self.log.info("Test that no peer expected to be protected was evicted")
        self.log.debug("{} protected peers: {}".format(len(protected_peers),
                                                       protected_peers))
        assert evicted_peers[0] not in protected_peers
Exemplo n.º 8
0
    def relay_tests(self):
        self.log.info('Test address relay')
        self.log.info(
            'Check that addr message content is relayed and added to addrman')
        addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
        num_receivers = 7
        receivers = []
        for _ in range(num_receivers):
            receivers.append(self.nodes[0].add_p2p_connection(
                AddrReceiver(test_addr_contents=True)))

        # Keep this with length <= 10. Addresses from larger messages are not
        # relayed.
        num_ipv4_addrs = 10
        msg = self.setup_addr_msg(num_ipv4_addrs)
        with self.nodes[0].assert_debug_log([
                'received: addr (301 bytes) peer=1',
        ]):
            self.send_addr_msg(addr_source, msg, receivers)

        total_ipv4_received = sum(r.num_ipv4_received for r in receivers)

        # Every IPv4 address must be relayed to two peers, other than the
        # originating node (addr_source).
        ipv4_branching_factor = 2
        assert_equal(total_ipv4_received,
                     num_ipv4_addrs * ipv4_branching_factor)

        self.nodes[0].disconnect_p2ps()

        self.log.info('Check relay of addresses received from outbound peers')
        inbound_peer = self.nodes[0].add_p2p_connection(
            AddrReceiver(test_addr_contents=True, send_getaddr=False))
        full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(
            AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
        msg = self.setup_addr_msg(2)
        self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
        self.log.info(
            'Check that the first addr message received from an outbound peer is not relayed'
        )
        # Currently, there is a flag that prevents the first addr message received
        # from a new outbound peer to be relayed to others. Originally meant to prevent
        # large GETADDR responses from being relayed, it now typically affects the self-announcement
        # of the outbound peer which is often sent before the GETADDR response.
        assert_equal(inbound_peer.num_ipv4_received, 0)

        # Send an empty ADDR message to initialize address relay on this connection.
        inbound_peer.send_and_ping(msg_addr())

        self.log.info(
            'Check that subsequent addr messages sent from an outbound peer are relayed'
        )
        msg2 = self.setup_addr_msg(2)
        self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
        assert_equal(inbound_peer.num_ipv4_received, 2)

        self.log.info('Check address relay to outbound peers')
        block_relay_peer = self.nodes[0].add_outbound_p2p_connection(
            AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
        msg3 = self.setup_addr_msg(2)
        self.send_addr_msg(inbound_peer, msg3,
                           [full_outbound_peer, block_relay_peer])

        self.log.info(
            'Check that addresses are relayed to full outbound peers')
        assert_equal(full_outbound_peer.num_ipv4_received, 2)
        self.log.info(
            'Check that addresses are not relayed to block-relay-only outbound peers'
        )
        assert_equal(block_relay_peer.num_ipv4_received, 0)

        self.nodes[0].disconnect_p2ps()
Exemplo n.º 9
0
    def run_test(self):
        # Node 0 supports COMPACT_FILTERS, node 1 does not.
        peer_0 = self.nodes[0].add_p2p_connection(FiltersClient())
        peer_1 = self.nodes[1].add_p2p_connection(FiltersClient())

        # Nodes 0 & 1 share the same first 999 blocks in the chain.
        self.generate(self.nodes[0], 999)

        # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting
        self.disconnect_nodes(0, 1)

        stale_block_hash = self.generate(self.nodes[0], 1,
                                         sync_fun=self.no_op)[0]
        self.nodes[0].syncwithvalidationinterfacequeue()
        assert_equal(self.nodes[0].getblockcount(), 1000)

        self.generate(self.nodes[1], 1001, sync_fun=self.no_op)
        assert_equal(self.nodes[1].getblockcount(), 2000)

        # Check that nodes have signalled NODE_COMPACT_FILTERS correctly.
        assert peer_0.nServices & NODE_COMPACT_FILTERS != 0
        assert peer_1.nServices & NODE_COMPACT_FILTERS == 0

        # Check that the localservices is as expected.
        assert int(self.nodes[0].getnetworkinfo()['localservices'],
                   16) & NODE_COMPACT_FILTERS != 0
        assert int(self.nodes[1].getnetworkinfo()['localservices'],
                   16) & NODE_COMPACT_FILTERS == 0

        self.log.info("get cfcheckpt on chain to be re-orged out.")
        request = msg_getcfcheckpt(
            filter_type=FILTER_TYPE_BASIC,
            stop_hash=int(stale_block_hash, 16),
        )
        peer_0.send_and_ping(message=request)
        response = peer_0.last_message['cfcheckpt']
        assert_equal(response.filter_type, request.filter_type)
        assert_equal(response.stop_hash, request.stop_hash)
        assert_equal(len(response.headers), 1)

        self.log.info("Reorg node 0 to a new chain.")
        self.connect_nodes(0, 1)
        self.sync_blocks(timeout=600)
        self.nodes[0].syncwithvalidationinterfacequeue()

        main_block_hash = self.nodes[0].getblockhash(1000)
        assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize"

        self.log.info("Check that peers can fetch cfcheckpt on active chain.")
        tip_hash = self.nodes[0].getbestblockhash()
        request = msg_getcfcheckpt(
            filter_type=FILTER_TYPE_BASIC,
            stop_hash=int(tip_hash, 16),
        )
        peer_0.send_and_ping(request)
        response = peer_0.last_message['cfcheckpt']
        assert_equal(response.filter_type, request.filter_type)
        assert_equal(response.stop_hash, request.stop_hash)

        main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash,
                                                      'basic')['header']
        tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash,
                                                     'basic')['header']
        assert_equal(
            response.headers,
            [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)],
        )

        self.log.info("Check that peers can fetch cfcheckpt on stale chain.")
        request = msg_getcfcheckpt(
            filter_type=FILTER_TYPE_BASIC,
            stop_hash=int(stale_block_hash, 16),
        )
        peer_0.send_and_ping(request)
        response = peer_0.last_message['cfcheckpt']

        stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash,
                                                       'basic')['header']
        assert_equal(
            response.headers,
            [int(header, 16) for header in (stale_cfcheckpt, )],
        )

        self.log.info("Check that peers can fetch cfheaders on active chain.")
        request = msg_getcfheaders(
            filter_type=FILTER_TYPE_BASIC,
            start_height=1,
            stop_hash=int(main_block_hash, 16),
        )
        peer_0.send_and_ping(request)
        response = peer_0.last_message['cfheaders']
        main_cfhashes = response.hashes
        assert_equal(len(main_cfhashes), 1000)
        assert_equal(
            compute_last_header(response.prev_header, response.hashes),
            int(main_cfcheckpt, 16),
        )

        self.log.info("Check that peers can fetch cfheaders on stale chain.")
        request = msg_getcfheaders(
            filter_type=FILTER_TYPE_BASIC,
            start_height=1,
            stop_hash=int(stale_block_hash, 16),
        )
        peer_0.send_and_ping(request)
        response = peer_0.last_message['cfheaders']
        stale_cfhashes = response.hashes
        assert_equal(len(stale_cfhashes), 1000)
        assert_equal(
            compute_last_header(response.prev_header, response.hashes),
            int(stale_cfcheckpt, 16),
        )

        self.log.info("Check that peers can fetch cfilters.")
        stop_hash = self.nodes[0].getblockhash(10)
        request = msg_getcfilters(
            filter_type=FILTER_TYPE_BASIC,
            start_height=1,
            stop_hash=int(stop_hash, 16),
        )
        peer_0.send_and_ping(request)
        response = peer_0.pop_cfilters()
        assert_equal(len(response), 10)

        self.log.info("Check that cfilter responses are correct.")
        for cfilter, cfhash, height in zip(response, main_cfhashes,
                                           range(1, 11)):
            block_hash = self.nodes[0].getblockhash(height)
            assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
            assert_equal(cfilter.block_hash, int(block_hash, 16))
            computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
            assert_equal(computed_cfhash, cfhash)

        self.log.info("Check that peers can fetch cfilters for stale blocks.")
        request = msg_getcfilters(
            filter_type=FILTER_TYPE_BASIC,
            start_height=1000,
            stop_hash=int(stale_block_hash, 16),
        )
        peer_0.send_and_ping(request)
        response = peer_0.pop_cfilters()
        assert_equal(len(response), 1)

        cfilter = response[0]
        assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
        assert_equal(cfilter.block_hash, int(stale_block_hash, 16))
        computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
        assert_equal(computed_cfhash, stale_cfhashes[999])

        self.log.info(
            "Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection."
        )
        requests = [
            msg_getcfcheckpt(
                filter_type=FILTER_TYPE_BASIC,
                stop_hash=int(main_block_hash, 16),
            ),
            msg_getcfheaders(
                filter_type=FILTER_TYPE_BASIC,
                start_height=1000,
                stop_hash=int(main_block_hash, 16),
            ),
            msg_getcfilters(
                filter_type=FILTER_TYPE_BASIC,
                start_height=1000,
                stop_hash=int(main_block_hash, 16),
            ),
        ]
        for request in requests:
            peer_1 = self.nodes[1].add_p2p_connection(P2PInterface())
            peer_1.send_message(request)
            peer_1.wait_for_disconnect()

        self.log.info("Check that invalid requests result in disconnection.")
        requests = [
            # Requesting too many filters results in disconnection.
            msg_getcfilters(
                filter_type=FILTER_TYPE_BASIC,
                start_height=0,
                stop_hash=int(main_block_hash, 16),
            ),
            # Requesting too many filter headers results in disconnection.
            msg_getcfheaders(
                filter_type=FILTER_TYPE_BASIC,
                start_height=0,
                stop_hash=int(tip_hash, 16),
            ),
            # Requesting unknown filter type results in disconnection.
            msg_getcfcheckpt(
                filter_type=255,
                stop_hash=int(main_block_hash, 16),
            ),
            # Requesting unknown hash results in disconnection.
            msg_getcfcheckpt(
                filter_type=FILTER_TYPE_BASIC,
                stop_hash=123456789,
            ),
        ]
        for request in requests:
            peer_0 = self.nodes[0].add_p2p_connection(P2PInterface())
            peer_0.send_message(request)
            peer_0.wait_for_disconnect()

        self.log.info(
            "Test -peerblockfilters without -blockfilterindex raises an error")
        self.stop_node(0)
        self.nodes[0].extra_args = ["-peerblockfilters"]
        msg = "Error: Cannot set -peerblockfilters without -blockfilterindex."
        self.nodes[0].assert_start_raises_init_error(expected_msg=msg)

        self.log.info(
            "Test -blockfilterindex with -reindex-chainstate raises an error")
        self.nodes[0].assert_start_raises_init_error(
            expected_msg=
            'Error: -reindex-chainstate option is not compatible with -blockfilterindex. '
            'Please temporarily disable blockfilterindex while using -reindex-chainstate, or replace -reindex-chainstate with -reindex to fully rebuild all indexes.',
            extra_args=['-blockfilterindex', '-reindex-chainstate'],
        )
Exemplo n.º 10
0
    def run_test(self):
        # Nodes will only request hb compact blocks mode when they're out of IBD
        for node in self.nodes:
            assert not node.getblockchaininfo()['initialblockdownload']

        p2p_conn_blocksonly = self.nodes[0].add_p2p_connection(P2PInterface())
        p2p_conn_high_bw = self.nodes[1].add_p2p_connection(P2PInterface())
        p2p_conn_low_bw = self.nodes[3].add_p2p_connection(P2PInterface())
        for conn in [p2p_conn_blocksonly, p2p_conn_high_bw, p2p_conn_low_bw]:
            assert_equal(conn.message_count['sendcmpct'], 2)
            conn.send_and_ping(msg_sendcmpct(announce=False, version=2))

        # Nodes:
        #   0 -> blocksonly
        #   1 -> high bandwidth
        #   2 -> miner
        #   3 -> low bandwidth
        #
        # Topology:
        #   p2p_conn_blocksonly ---> node0
        #   p2p_conn_high_bw    ---> node1
        #   p2p_conn_low_bw     ---> node3
        #   node2 (no connections)
        #
        # node2 produces blocks that are passed to the rest of the nodes
        # through the respective p2p connections.

        self.log.info(
            "Test that -blocksonly nodes do not select peers for BIP152 high bandwidth mode"
        )

        block0 = self.build_block_on_tip()

        # A -blocksonly node should not request BIP152 high bandwidth mode upon
        # receiving a new valid block at the tip.
        p2p_conn_blocksonly.send_and_ping(msg_block(block0))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block0.sha256)
        assert_equal(p2p_conn_blocksonly.message_count['sendcmpct'], 2)
        assert_equal(p2p_conn_blocksonly.last_message['sendcmpct'].announce,
                     False)

        # A normal node participating in transaction relay should request BIP152
        # high bandwidth mode upon receiving a new valid block at the tip.
        p2p_conn_high_bw.send_and_ping(msg_block(block0))
        assert_equal(int(self.nodes[1].getbestblockhash(), 16), block0.sha256)
        p2p_conn_high_bw.wait_until(
            lambda: p2p_conn_high_bw.message_count['sendcmpct'] == 3)
        assert_equal(p2p_conn_high_bw.last_message['sendcmpct'].announce, True)

        # Don't send a block from the p2p_conn_low_bw so the low bandwidth node
        # doesn't select it for BIP152 high bandwidth relay.
        self.nodes[3].submitblock(block0.serialize().hex())

        self.log.info("Test that -blocksonly nodes send getdata(BLOCK) instead"
                      " of getdata(CMPCT) in BIP152 low bandwidth mode")

        block1 = self.build_block_on_tip()

        p2p_conn_blocksonly.send_message(
            msg_headers(headers=[CBlockHeader(block1)]))
        p2p_conn_blocksonly.sync_send_with_ping()
        assert_equal(p2p_conn_blocksonly.last_message['getdata'].inv,
                     [CInv(MSG_BLOCK | MSG_WITNESS_FLAG, block1.sha256)])

        p2p_conn_high_bw.send_message(
            msg_headers(headers=[CBlockHeader(block1)]))
        p2p_conn_high_bw.sync_send_with_ping()
        assert_equal(p2p_conn_high_bw.last_message['getdata'].inv,
                     [CInv(MSG_CMPCT_BLOCK, block1.sha256)])

        self.log.info(
            "Test that getdata(CMPCT) is still sent on BIP152 low bandwidth connections"
            " when no -blocksonly nodes are involved")

        p2p_conn_low_bw.send_and_ping(
            msg_headers(headers=[CBlockHeader(block1)]))
        p2p_conn_low_bw.sync_with_ping()
        assert_equal(p2p_conn_low_bw.last_message['getdata'].inv,
                     [CInv(MSG_CMPCT_BLOCK, block1.sha256)])
Exemplo n.º 11
0
    def run_test(self):
        self.nodes[0].add_p2p_connection(P2PInterface())

        self.log.info("Mining {} blocks".format(DERSIG_HEIGHT - 1))
        self.coinbase_txids = [
            self.nodes[0].getblock(b)['tx'][0]
            for b in self.generate(self.nodes[0], DERSIG_HEIGHT - 1)
        ]
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info("Test that blocks must now be at least version 3")
        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT),
                             block_time)
        block.nVersion = 2
        block.rehash()
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=[
                '{}, bad-version(0x00000002)'.format(block.hash)
        ]):
            self.nodes[0].p2p.send_and_ping(msg_block(block))
            assert_equal(self.nodes[0].getbestblockhash(), tip)
            self.nodes[0].p2p.sync_with_ping()

        self.log.info(
            "Test that transactions with non-DER signatures cannot appear in a block"
        )
        block.nVersion = 3

        spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
                                     self.nodeaddress, 1.0)
        unDERify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # rejected from the mempool for exactly that reason.
        assert_equal([{
            'txid':
            spendtx.hash,
            'allowed':
            False,
            'reject-reason':
            '16: mandatory-script-verify-flag-failed (Non-canonical DER signature)'
        }], self.nodes[0].testmempoolaccept(rawtxs=[ToHex(spendtx)],
                                            allowhighfees=True))

        # Now we verify that a block with this transaction is also invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=[
                'ConnectBlock {} failed, blk-bad-inputs'.format(block.hash)
        ]):
            self.nodes[0].p2p.send_and_ping(msg_block(block))
            assert_equal(self.nodes[0].getbestblockhash(), tip)
            self.nodes[0].p2p.sync_with_ping()

        wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(),
                   lock=p2p_lock)
        with p2p_lock:
            assert self.nodes[0].p2p.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(self.nodes[0].p2p.last_message["reject"].data,
                         block.sha256)
            assert b'blk-bad-inputs' in self.nodes[0].p2p.last_message[
                "reject"].reason

        self.log.info(
            "Test that a version 3 block with a DERSIG-compliant transaction is accepted"
        )
        block.vtx[1] = create_transaction(self.nodes[0],
                                          self.coinbase_txids[1],
                                          self.nodeaddress, 1.0)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
    def run_test(self):
        # Start building a chain on node0.  node2 shouldn't be able to sync until node1's
        # minchainwork is exceeded
        starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
        self.log.info(f"Testing relay across node 1 (minChainWork = {self.node_min_work[1]})")

        starting_blockcount = self.nodes[2].getblockcount()

        num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
        self.log.info(f"Generating {num_blocks_to_generate} blocks on node0")
        hashes = self.generate(self.nodes[0], num_blocks_to_generate, sync_fun=self.no_op)

        self.log.info(f"Node0 current chain work: {self.nodes[0].getblockheader(hashes[-1])['chainwork']}")

        # Sleep a few seconds and verify that node2 didn't get any new blocks
        # or headers.  We sleep, rather than sync_blocks(node0, node1) because
        # it's reasonable either way for node1 to get the blocks, or not get
        # them (since they're below node1's minchainwork).
        time.sleep(3)

        self.log.info("Verifying node 2 has no more blocks than before")
        self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}")
        # Node2 shouldn't have any new headers yet, because node1 should not
        # have relayed anything.
        assert_equal(len(self.nodes[2].getchaintips()), 1)
        assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)

        assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
        assert_equal(self.nodes[2].getblockcount(), starting_blockcount)

        self.log.info("Check that getheaders requests to node2 are ignored")
        peer = self.nodes[2].add_p2p_connection(P2PInterface())
        msg = msg_getheaders()
        msg.locator.vHave = [int(self.nodes[2].getbestblockhash(), 16)]
        msg.hashstop = 0
        peer.send_and_ping(msg)
        time.sleep(5)
        assert "headers" not in peer.last_message or len(peer.last_message["headers"].headers) == 0

        self.log.info("Generating one more block")
        self.generate(self.nodes[0], 1)

        self.log.info("Verifying nodes are all synced")

        # Because nodes in regtest are all manual connections (eg using
        # addnode), node1 should not have disconnected node0. If not for that,
        # we'd expect node1 to have disconnected node0 for serving an
        # insufficient work chain, in which case we'd need to reconnect them to
        # continue the test.

        self.sync_all()
        self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}")

        self.log.info("Test that getheaders requests to node2 are not ignored")
        peer.send_and_ping(msg)
        assert "headers" in peer.last_message

        # Verify that node2 is in fact still in IBD (otherwise this test may
        # not be exercising the logic we want!)
        assert_equal(self.nodes[2].getblockchaininfo()['initialblockdownload'], True)

        self.log.info("Test -minimumchainwork with a non-hex value")
        self.stop_node(0)
        self.nodes[0].assert_start_raises_init_error(
            ["-minimumchainwork=test"],
            expected_msg='Error: Invalid non-hex (test) minimum chain work value specified',
        )
Exemplo n.º 13
0
    def run_test(self):
        # Tests the net:inbound_message and net:outbound_message tracepoints
        # See https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#context-net

        class P2PMessage(ctypes.Structure):
            _fields_ = [
                ("peer_id", ctypes.c_uint64),
                ("peer_addr", ctypes.c_char * MAX_PEER_ADDR_LENGTH),
                ("peer_conn_type", ctypes.c_char * MAX_PEER_CONN_TYPE_LENGTH),
                ("msg_type", ctypes.c_char * MAX_MSG_TYPE_LENGTH),
                ("msg_size", ctypes.c_uint64),
                ("msg", ctypes.c_ubyte * MAX_MSG_DATA_LENGTH),
            ]

            def __repr__(self):
                return f"P2PMessage(peer={self.peer_id}, addr={self.peer_addr.decode('utf-8')}, conn_type={self.peer_conn_type.decode('utf-8')}, msg_type={self.msg_type.decode('utf-8')}, msg_size={self.msg_size})"

        self.log.info(
            "hook into the net:inbound_message and net:outbound_message tracepoints"
        )
        ctx = USDT(path=str(self.options.bitcoind))
        ctx.enable_probe(probe="net:inbound_message",
                         fn_name="trace_inbound_message")
        ctx.enable_probe(probe="net:outbound_message",
                         fn_name="trace_outbound_message")
        bpf = BPF(text=net_tracepoints_program, usdt_contexts=[ctx], debug=0)

        # The handle_* function is a ctypes callback function called from C. When
        # we assert in the handle_* function, the AssertError doesn't propagate
        # back to Python. The exception is ignored. We manually count and assert
        # that the handle_* functions succeeded.
        EXPECTED_INOUTBOUND_VERSION_MSG = 1
        checked_inbound_version_msg = 0
        checked_outbound_version_msg = 0

        def check_p2p_message(event, inbound):
            nonlocal checked_inbound_version_msg, checked_outbound_version_msg
            if event.msg_type.decode("utf-8") == "version":
                self.log.info(
                    f"check_p2p_message(): {'inbound' if inbound else 'outbound'} {event}"
                )
                peer = self.nodes[0].getpeerinfo()[0]
                msg = msg_version()
                msg.deserialize(BytesIO(bytes(event.msg[:event.msg_size])))
                assert_equal(peer["id"], event.peer_id, peer["id"])
                assert_equal(peer["addr"], event.peer_addr.decode("utf-8"))
                assert_equal(peer["connection_type"],
                             event.peer_conn_type.decode("utf-8"))
                if inbound:
                    checked_inbound_version_msg += 1
                else:
                    checked_outbound_version_msg += 1

        def handle_inbound(_, data, __):
            event = ctypes.cast(data, ctypes.POINTER(P2PMessage)).contents
            check_p2p_message(event, True)

        def handle_outbound(_, data, __):
            event = ctypes.cast(data, ctypes.POINTER(P2PMessage)).contents
            check_p2p_message(event, False)

        bpf["inbound_messages"].open_perf_buffer(handle_inbound)
        bpf["outbound_messages"].open_perf_buffer(handle_outbound)

        self.log.info("connect a P2P test node to our bitcoind node")
        test_node = P2PInterface()
        self.nodes[0].add_p2p_connection(test_node)
        bpf.perf_buffer_poll(timeout=200)

        self.log.info(
            "check that we got both an inbound and outbound version message")
        assert_equal(EXPECTED_INOUTBOUND_VERSION_MSG,
                     checked_inbound_version_msg)
        assert_equal(EXPECTED_INOUTBOUND_VERSION_MSG,
                     checked_outbound_version_msg)

        bpf.cleanup()
    def orphan_list_check(self):
        """Test that:
         - getdsprooflist omits orphans and/or includes them as expected for the include_orphans arg
         - the orphans appearing in the orphan list match what we expect"""

        # assumption is previous test left some proofs around
        len_noorphans = len(self.nodes[0].getdsprooflist(False, False))
        assert_greater_than(len_noorphans, 0)
        # previous test may or may not have left some orphans around, account for them
        len_orphans = len(self.nodes[0].getdsprooflist(False,
                                                       True)) - len_noorphans

        orphans_ids = [
            "978c2b3d829dbc934c170ff797c539a86d35fcfc0ec806a5753c00794cd5caad",
            "0e7f2e002073916cfa16692df9b44c2d52e808f7b55e75e7d941356dd90f2096",
        ]
        orphans_data = [
            bytes.fromhex(hx) for hx in (
                "326bd6eee699d18308a04720583f663ba039070a1abd8a59868ee03a1c250be10000000001000000feffffffbadb1500dbd7cf882"
                "c620ed8fb660b64444bfb4febf6c553d0f19cafdc3070bc2c27664618606b350cd8bf565266bc352f0caddcf01e8fa789dd8a1538"
                "6327cf8cabe198fdef7e5d2f370d4e96ab7cc22482f181b2c0e7e6275838aeed19eeedbfd378170141467adbad7deb7635bdf6bbe"
                "4c605ce57c3dccd01f5fb9e32b22c3479a1d3f143f3f9592f9e1ef9ea96f01141b261e468c46d31a4a63cde692947d126f34641e3"
                "4101000000feffffffbadb1500dbd7cf882c620ed8fb660b64444bfb4febf6c553d0f19cafdc3070bc2c27664618606b350cd8bf5"
                "65266bc352f0caddcf01e8fa789dd8a15386327cf8cabe198188af582e7a09fcfe0b1a37ee3ca6c91f80c13006e595c79320ac38d"
                "40a945cf0141210f8a36fe24b9fb1cb5a2a8cb01ac27d58410d8d8f3abf6fe935b2b1c1eadb285a4cdcd24727472af4d65b1c7ccb"
                "120361bdcbcadfb2f1436df9bfe9b9a5b0641",
                "11565d6e11586e4d0b989358be23299458afd76d8eedad32f96a671f775970740000000001000000feffffff4fdc1500ac7850e9b"
                "64559f703a9e6068bde8c175761408f0777299691083b0fc534aef618606b350cd8bf565266bc352f0caddcf01e8fa789dd8a1538"
                "6327cf8cabe198cdf604b8294fe87f39c637bcab10869db7cc306b0ddbf35ed92ab526dd18af69014126b3e82473f456f9bcb2bf1"
                "20ede1ad6e3ee588935b70033cfb625c317ced26f116b54d2effc3c9abf5efd38cffae57af50fb5fef88e1be7dc9d82a415fc1367"
                "4101000000feffffff4fdc1500ac7850e9b64559f703a9e6068bde8c175761408f0777299691083b0fc534aef618606b350cd8bf5"
                "65266bc352f0caddcf01e8fa789dd8a15386327cf8cabe1981194289492779938f938ce59ba48f916ba0b883803fbf2bfab22bf8d"
                "b09227ba0141fdb9ff69c028a6a1e4143bedcf2f44b1ea2b6996bd463440f9d3037845ad7a879963acbb424d3850ba6affdf81325"
                "e7753294a2e1959d9e84ba6108ce15e7cdc41",
            )
        ]
        orphans_proofs = []
        orphans_outpoints = set()
        for od in orphans_data:
            proof = CDSProof()
            proof.deserialize(BytesIO(od))
            orphans_outpoints.add((
                int.to_bytes(proof.prevTxId, 32,
                             byteorder='big').hex(),  # txid
                proof.prevOutIndex  # vout
            ))
            orphans_proofs.append(proof)
        assert len(orphans_outpoints) == 2

        p2p = P2PInterface()
        self.nodes[0].add_p2p_connection(p2p)
        wait_until(lambda: sum(p2p.message_count.values()) > 0, lock=p2p_lock)

        # send orphans to node0
        for proof in orphans_proofs:
            p2p.send_message(msg_dsproof(proof))

        # wait for node0 to have acknowledged the orphans
        wait_until(lambda: len(self.nodes[0].getdsprooflist(False, True)) ==
                   len_noorphans + len_orphans + 2)

        def check(len_noorphans, len_orphans):
            # verify that node0 has a view of the orphans that we expect
            dsplist_all = self.nodes[0].getdsprooflist(False, True)
            non_orph_ct = 0
            orph_ct = 0
            orphs_seen = set()
            outpoints_seen = set()
            matches = 0
            for dspid in dsplist_all:
                dsp = self.nodes[0].getdsproof(dspid, True)
                if dsp["txid"] is not None:
                    non_orph_ct += 1
                    continue
                orphs_seen.add(dsp["dspid"])
                orph_ct += 1
                op = dsp["outpoint"]
                outpoints_seen.add((op["txid"], op["vout"]))
                hexdata = self.nodes[0].getdsproof(dspid, False)["hex"]
                try:
                    # ensure hexdata we got for this dspid matches our data
                    orphans_data.index(bytes.fromhex(hexdata))
                    orphans_ids.index(dspid)
                    matches += 1
                except ValueError:
                    pass  # this is ok, stale oprhan (ignore)
            assert_equal(matches, len(orphans_data))
            assert_equal(non_orph_ct, len_noorphans)
            assert_equal(orph_ct, len_orphans)
            assert_equal(orphs_seen & set(orphans_ids), set(orphans_ids))
            assert_equal(outpoints_seen & orphans_outpoints, orphans_outpoints)

        check(len_noorphans=len_noorphans,
              len_orphans=len_orphans + len(orphans_ids))

        # Mining a block should take every dsproof and send them to the orphan list
        # it should also keep the same orphans from before around
        block_hash = self.generate(self.nodes[0], 1)[0]
        self.sync_all()
        # No non-orphans
        assert_equal(len(self.nodes[0].getdsprooflist()), 0)
        # All previous dsproofs are now orphans (because their associated tx was mined
        assert_equal(len(self.nodes[0].getdsprooflist(False, True)),
                     len_orphans + len_noorphans + len(orphans_ids))

        # Test reorg behavior. On reorg, all tx's that go back to mempool should continue to have their previous
        # proofs. We invalidate the block and make sure that all the orphaned dsp's got claimed again by their
        # respective tx's which were put back into the mempool
        self.nodes[0].invalidateblock(block_hash)
        wait_until(
            lambda: len(self.nodes[0].getdsprooflist()) == len_noorphans,
            timeout=10)
        check(len_noorphans=len_noorphans,
              len_orphans=len_orphans + len(orphans_ids))
        # Now put the block back
        self.nodes[0].reconsiderblock(block_hash)
        self.sync_all()
        # There should again be no non-orphans
        assert_equal(len(self.nodes[0].getdsprooflist()), 0)
        # All previous dsproofs are now orphans again
        assert_equal(len(self.nodes[0].getdsprooflist(False, True)),
                     len_orphans + len_noorphans + len(orphans_ids))

        # Wait for all orphans to get auto-cleaned (this may take up to 60 seconds)
        self.nodes[0].setmocktime(int(time.time() + 100))
        wait_until(lambda: len(self.nodes[0].getdsprooflist(False, True)) == 0,
                   timeout=90)
Exemplo n.º 15
0
    def run_test(self):
        self.log.info("Mine 4 blocks on Node 0")
        self.generate(self.nodes[0], 4, sync_fun=self.no_op)
        assert_equal(self.nodes[0].getblockcount(), 204)

        self.log.info("Mine competing 3 blocks on Node 1")
        self.generate(self.nodes[1], 3, sync_fun=self.no_op)
        assert_equal(self.nodes[1].getblockcount(), 203)
        short_tip = self.nodes[1].getbestblockhash()

        self.log.info("Connect nodes to sync headers")
        self.connect_nodes(0, 1)
        self.sync_blocks()

        self.log.info(
            "Node 0 should only have the header for node 1's block 3")
        x = next(
            filter(lambda x: x['hash'] == short_tip,
                   self.nodes[0].getchaintips()))
        assert_equal(x['status'], "headers-only")
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, short_tip)

        self.log.info("Fetch block from node 1")
        peers = self.nodes[0].getpeerinfo()
        assert_equal(len(peers), 1)
        peer_0_peer_1_id = peers[0]["id"]

        self.log.info("Arguments must be sensible")
        assert_raises_rpc_error(
            -8, "hash must be of length 64 (not 4, for '1234')",
            self.nodes[0].getblockfrompeer, "1234", 0)

        self.log.info("We must already have the header")
        assert_raises_rpc_error(-1, "Block header missing",
                                self.nodes[0].getblockfrompeer, "00" * 32, 0)

        self.log.info("Non-existent peer generates error")
        assert_raises_rpc_error(-1, "Peer does not exist",
                                self.nodes[0].getblockfrompeer, short_tip,
                                peer_0_peer_1_id + 1)

        self.log.info("Fetching from pre-segwit peer generates error")
        self.nodes[0].add_p2p_connection(P2PInterface(),
                                         services=P2P_SERVICES & ~NODE_WITNESS)
        peers = self.nodes[0].getpeerinfo()
        assert_equal(len(peers), 2)
        presegwit_peer_id = peers[1]["id"]
        assert_raises_rpc_error(-1, "Pre-SegWit peer",
                                self.nodes[0].getblockfrompeer, short_tip,
                                presegwit_peer_id)

        self.log.info("Successful fetch")
        result = self.nodes[0].getblockfrompeer(short_tip, peer_0_peer_1_id)
        self.wait_until(lambda: self.check_for_block(short_tip), timeout=1)
        assert_equal(result, {})

        self.log.info("Don't fetch blocks we already have")
        assert_raises_rpc_error(-1, "Block already downloaded",
                                self.nodes[0].getblockfrompeer, short_tip,
                                peer_0_peer_1_id)
Exemplo n.º 16
0
 def test_oversized_msg(self, msg, size):
     msg_type = msg.msgtype.decode('ascii')
     self.log.info("Test {} message of size {} is logged as misbehaving".format(msg_type, size))
     with self.nodes[0].assert_debug_log(['Misbehaving', '{} message size = {}'.format(msg_type, size)]):
         self.nodes[0].add_p2p_connection(P2PInterface()).send_and_ping(msg)
     self.nodes[0].disconnect_p2ps()
Exemplo n.º 17
0
    def run_test(self):
        # Peer that never sends a version. We will send a bunch of messages
        # from this peer anyway and verify eventual disconnection.
        no_version_disconnect_peer = self.nodes[0].add_p2p_connection(
            LazyPeer(), send_version=False, wait_for_verack=False)

        # Another peer that never sends a version, nor any other messages. It shouldn't receive anything from the node.
        no_version_idle_peer = self.nodes[0].add_p2p_connection(
            LazyPeer(), send_version=False, wait_for_verack=False)

        # Peer that sends a version but not a verack.
        no_verack_idle_peer = self.nodes[0].add_p2p_connection(
            NoVerackIdlePeer(), wait_for_verack=False)

        # Send enough ping messages (any non-version message will do) prior to sending
        # version to reach the peer discouragement threshold. This should get us disconnected.
        for _ in range(DISCOURAGEMENT_THRESHOLD):
            no_version_disconnect_peer.send_message(msg_ping())

        # Wait until we got the verack in response to the version. Though, don't wait for the node to receive the
        # verack, since we never sent one
        no_verack_idle_peer.wait_for_verack()

        no_version_disconnect_peer.wait_until(
            lambda: no_version_disconnect_peer.ever_connected,
            check_connected=False)
        no_version_idle_peer.wait_until(
            lambda: no_version_idle_peer.ever_connected)
        no_verack_idle_peer.wait_until(
            lambda: no_verack_idle_peer.version_received)

        # Mine a block and make sure that it's not sent to the connected peers
        self.nodes[0].generate(nblocks=1)

        #Give the node enough time to possibly leak out a message
        time.sleep(5)

        # Expect this peer to be disconnected for misbehavior
        assert not no_version_disconnect_peer.is_connected

        self.nodes[0].disconnect_p2ps()

        # Make sure no unexpected messages came in
        assert no_version_disconnect_peer.unexpected_msg == False
        assert no_version_idle_peer.unexpected_msg == False
        assert no_verack_idle_peer.unexpected_msg == False

        self.log.info(
            'Check that the version message does not leak the local address of the node'
        )
        p2p_version_store = self.nodes[0].add_p2p_connection(P2PVersionStore())
        ver = p2p_version_store.version_received
        # Check that received time is within one hour of now
        assert_greater_than_or_equal(ver.nTime, time.time() - 3600)
        assert_greater_than_or_equal(time.time() + 3600, ver.nTime)
        assert_equal(ver.addrFrom.port, 0)
        assert_equal(ver.addrFrom.ip, '0.0.0.0')
        assert_equal(ver.nStartingHeight, 201)
        assert_equal(ver.nRelay, 1)

        self.log.info('Check that old peers are disconnected')
        p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(),
                                                        send_version=False,
                                                        wait_for_verack=False)
        old_version_msg = msg_version()
        old_version_msg.nVersion = 31799
        with self.nodes[0].assert_debug_log(
            ['peer=4 using obsolete version 31799; disconnecting']):
            p2p_old_peer.send_message(old_version_msg)
            p2p_old_peer.wait_for_disconnect()
Exemplo n.º 18
0
    def run_test(self):
        self.log.info('Check that the node doesn\'t send unexpected messages before handshake completion')
        # Peer that never sends a version, nor any other messages. It shouldn't receive anything from the node.
        no_version_idle_peer = self.nodes[0].add_p2p_connection(LazyPeer(), send_version=False, wait_for_verack=False)

        # Peer that sends a version but not a verack.
        no_verack_idle_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), wait_for_verack=False)

        # Pre-wtxidRelay peer that sends a version but not a verack and does not support feature negotiation
        # messages which start at nVersion == 70016
        pre_wtxidrelay_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), send_version=False, wait_for_verack=False)
        pre_wtxidrelay_peer.send_message(self.create_old_version(70015))

        # Wait until the peer gets the verack in response to the version. Though, don't wait for the node to receive the
        # verack, since the peer never sent one
        no_verack_idle_peer.wait_for_verack()
        pre_wtxidrelay_peer.wait_for_verack()

        no_version_idle_peer.wait_until(lambda: no_version_idle_peer.ever_connected)
        no_verack_idle_peer.wait_until(lambda: no_verack_idle_peer.version_received)
        pre_wtxidrelay_peer.wait_until(lambda: pre_wtxidrelay_peer.version_received)

        # Mine a block and make sure that it's not sent to the connected peers
        self.nodes[0].generate(nblocks=1)

        # Give the node enough time to possibly leak out a message
        time.sleep(PEER_TIMEOUT + 2)

        # Make sure only expected messages came in
        assert not no_version_idle_peer.unexpected_msg
        assert not no_version_idle_peer.got_wtxidrelay
        assert not no_version_idle_peer.got_sendaddrv2

        assert not no_verack_idle_peer.unexpected_msg
        assert no_verack_idle_peer.got_wtxidrelay
        assert no_verack_idle_peer.got_sendaddrv2

        assert not pre_wtxidrelay_peer.unexpected_msg
        assert not pre_wtxidrelay_peer.got_wtxidrelay
        assert not pre_wtxidrelay_peer.got_sendaddrv2

        # Expect peers to be disconnected due to timeout
        assert not no_version_idle_peer.is_connected
        assert not no_verack_idle_peer.is_connected
        assert not pre_wtxidrelay_peer.is_connected

        self.log.info('Check that the version message does not leak the local address of the node')
        p2p_version_store = self.nodes[0].add_p2p_connection(P2PVersionStore())
        ver = p2p_version_store.version_received
        # Check that received time is within one hour of now
        assert_greater_than_or_equal(ver.nTime, time.time() - 3600)
        assert_greater_than_or_equal(time.time() + 3600, ver.nTime)
        assert_equal(ver.addrFrom.port, 0)
        assert_equal(ver.addrFrom.ip, '0.0.0.0')
        assert_equal(ver.nStartingHeight, 201)
        assert_equal(ver.relay, 1)

        self.log.info('Check that old peers are disconnected')
        p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
        with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
            p2p_old_peer.send_message(self.create_old_version(31799))
            p2p_old_peer.wait_for_disconnect()
 def test_message_causes_disconnect(self, message):
     """Add a p2p connection that sends a message and check that it disconnects."""
     peer = self.nodes[0].add_p2p_connection(P2PInterface())
     peer.send_message(message)
     peer.wait_for_disconnect()
     assert_equal(self.nodes[0].getconnectioncount(), 0)
Exemplo n.º 20
0
    def run_test(self):
        peer = self.nodes[0].add_p2p_connection(P2PInterface())

        self.test_dersig_info(is_active=False)

        self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
        self.coinbase_txids = [
            self.nodes[0].getblock(b)['tx'][0]
            for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)
        ]
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that a transaction with non-DER signature can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0],
                                     self.coinbase_txids[0],
                                     self.nodeaddress,
                                     amount=1.0)
        unDERify(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1),
                             block_time)
        block.nVersion = VB_TOP_BITS
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.test_dersig_info(
            is_active=False
        )  # Not active as of current tip and next block does not need to obey rules
        peer.send_and_ping(msg_block(block))
        self.test_dersig_info(
            is_active=True
        )  # Not active as of current tip, but next block must obey rules
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least VB_TOP_BITS")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
        block.nVersion = 2
        block.rehash()
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=[
                '{}, bad-version(0x00000002)'.format(block.hash)
        ]):
            peer.send_and_ping(msg_block(block))
            assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
            peer.sync_with_ping()

        self.log.info(
            "Test that transactions with non-DER signatures cannot appear in a block"
        )
        block.nVersion = VB_TOP_BITS

        spendtx = create_transaction(self.nodes[0],
                                     self.coinbase_txids[1],
                                     self.nodeaddress,
                                     amount=1.0)
        unDERify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # rejected from the mempool for exactly that reason.
        assert_equal([{
            'txid':
            spendtx.hash,
            'allowed':
            False,
            'reject-reason':
            'non-mandatory-script-verify-flag (Non-canonical DER signature)'
        }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()],
                                            maxfeerate=0))

        # Now we verify that a block with this transaction is also invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=[
                'CheckInputScripts on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'
                .format(block.vtx[-1].hash)
        ]):
            peer.send_and_ping(msg_block(block))
            assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
            peer.sync_with_ping()

        self.log.info(
            "Test that a version 3 block with a DERSIG-compliant transaction is accepted"
        )
        block.vtx[1] = create_transaction(self.nodes[0],
                                          self.coinbase_txids[1],
                                          self.nodeaddress,
                                          amount=1.0)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.test_dersig_info(
            is_active=True
        )  # Not active as of current tip, but next block must obey rules
        peer.send_and_ping(msg_block(block))
        self.test_dersig_info(is_active=True)  # Active as of current tip
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
Exemplo n.º 21
0
    def run_test(self):
        # Node 0 supports COMPACT_FILTERS, node 1 does not.
        node0 = self.nodes[0].add_p2p_connection(CFiltersClient())
        node1 = self.nodes[1].add_p2p_connection(CFiltersClient())

        # Nodes 0 & 1 share the same first 999 blocks in the chain.
        self.nodes[0].generate(999)
        self.sync_blocks(timeout=600)

        # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting
        disconnect_nodes(self.nodes[0], self.nodes[1])

        self.nodes[0].generate(1)
        wait_until(lambda: self.nodes[0].getblockcount() == 1000)
        stale_block_hash = self.nodes[0].getblockhash(1000)

        self.nodes[1].generate(1001)
        wait_until(lambda: self.nodes[1].getblockcount() == 2000)

        # Check that nodes have signalled NODE_COMPACT_FILTERS correctly.
        assert node0.nServices & NODE_COMPACT_FILTERS != 0
        assert node1.nServices & NODE_COMPACT_FILTERS == 0

        # Check that the localservices is as expected.
        assert int(self.nodes[0].getnetworkinfo()['localservices'],
                   16) & NODE_COMPACT_FILTERS != 0
        assert int(self.nodes[1].getnetworkinfo()['localservices'],
                   16) & NODE_COMPACT_FILTERS == 0

        self.log.info("get cfcheckpt on chain to be re-orged out.")
        request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC,
                                   stop_hash=int(stale_block_hash, 16))
        node0.send_and_ping(message=request)
        response = node0.last_message['cfcheckpt']
        assert_equal(response.filter_type, request.filter_type)
        assert_equal(response.stop_hash, request.stop_hash)
        assert_equal(len(response.headers), 1)

        self.log.info("Reorg node 0 to a new chain.")
        connect_nodes(self.nodes[0], self.nodes[1])
        self.sync_blocks(timeout=600)

        main_block_hash = self.nodes[0].getblockhash(1000)
        assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize"

        self.log.info("Check that peers can fetch cfcheckpt on active chain.")
        tip_hash = self.nodes[0].getbestblockhash()
        request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC,
                                   stop_hash=int(tip_hash, 16))
        node0.send_and_ping(request)
        response = node0.last_message['cfcheckpt']
        assert_equal(response.filter_type, request.filter_type)
        assert_equal(response.stop_hash, request.stop_hash)

        main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash,
                                                      'basic')['header']
        tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash,
                                                     'basic')['header']
        assert_equal(
            response.headers,
            [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)])

        self.log.info("Check that peers can fetch cfcheckpt on stale chain.")
        request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC,
                                   stop_hash=int(stale_block_hash, 16))
        node0.send_and_ping(request)
        response = node0.last_message['cfcheckpt']

        stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash,
                                                       'basic')['header']
        assert_equal(response.headers,
                     [int(header, 16) for header in (stale_cfcheckpt, )])

        self.log.info("Check that peers can fetch cfheaders on active chain.")
        request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC,
                                   start_height=1,
                                   stop_hash=int(main_block_hash, 16))
        node0.send_and_ping(request)
        response = node0.last_message['cfheaders']
        main_cfhashes = response.hashes
        assert_equal(len(main_cfhashes), 1000)
        assert_equal(
            compute_last_header(response.prev_header, response.hashes),
            int(main_cfcheckpt, 16))

        self.log.info("Check that peers can fetch cfheaders on stale chain.")
        request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC,
                                   start_height=1,
                                   stop_hash=int(stale_block_hash, 16))
        node0.send_and_ping(request)
        response = node0.last_message['cfheaders']
        stale_cfhashes = response.hashes
        assert_equal(len(stale_cfhashes), 1000)
        assert_equal(
            compute_last_header(response.prev_header, response.hashes),
            int(stale_cfcheckpt, 16))

        self.log.info("Check that peers can fetch cfilters.")
        stop_hash = self.nodes[0].getblockhash(10)
        request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC,
                                  start_height=1,
                                  stop_hash=int(stop_hash, 16))
        node0.send_message(request)
        node0.sync_with_ping()
        response = node0.pop_cfilters()
        assert_equal(len(response), 10)

        self.log.info("Check that cfilter responses are correct.")
        for cfilter, cfhash, height in zip(response, main_cfhashes,
                                           range(1, 11)):
            block_hash = self.nodes[0].getblockhash(height)
            assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
            assert_equal(cfilter.block_hash, int(block_hash, 16))
            computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
            assert_equal(computed_cfhash, cfhash)

        self.log.info("Check that peers can fetch cfilters for stale blocks.")
        request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC,
                                  start_height=1000,
                                  stop_hash=int(stale_block_hash, 16))
        node0.send_message(request)
        node0.sync_with_ping()
        response = node0.pop_cfilters()
        assert_equal(len(response), 1)

        cfilter = response[0]
        assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
        assert_equal(cfilter.block_hash, int(stale_block_hash, 16))
        computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
        assert_equal(computed_cfhash, stale_cfhashes[999])

        self.log.info(
            "Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection."
        )
        requests = [
            msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC,
                             stop_hash=int(main_block_hash, 16)),
            msg_getcfheaders(filter_type=FILTER_TYPE_BASIC,
                             start_height=1000,
                             stop_hash=int(main_block_hash, 16)),
            msg_getcfilters(filter_type=FILTER_TYPE_BASIC,
                            start_height=1000,
                            stop_hash=int(main_block_hash, 16)),
        ]
        for request in requests:
            node1 = self.nodes[1].add_p2p_connection(P2PInterface())
            node1.send_message(request)
            node1.wait_for_disconnect()

        self.log.info("Check that invalid requests result in disconnection.")
        requests = [
            # Requesting too many filters results in disconnection.
            msg_getcfilters(filter_type=FILTER_TYPE_BASIC,
                            start_height=0,
                            stop_hash=int(main_block_hash, 16)),
            # Requesting too many filter headers results in disconnection.
            msg_getcfheaders(filter_type=FILTER_TYPE_BASIC,
                             start_height=0,
                             stop_hash=int(tip_hash, 16)),
            # Requesting unknown filter type results in disconnection.
            msg_getcfcheckpt(filter_type=255,
                             stop_hash=int(main_block_hash, 16)),
            # Requesting unknown hash results in disconnection.
            msg_getcfcheckpt(
                filter_type=FILTER_TYPE_BASIC,
                stop_hash=123456789,
            ),
        ]
        for request in requests:
            node0 = self.nodes[0].add_p2p_connection(P2PInterface())
            node0.send_message(request)
            node0.wait_for_disconnect()
Exemplo n.º 22
0
    def run_test(self):
        self.log.info("Add 8 outbounds to node 0")
        for i in range(8):
            self.log.info(f"outbound: {i}")
            self.nodes[0].add_outbound_p2p_connection(
                P2PInterface(),
                p2p_idx=i,
                connection_type="outbound-full-relay")

        self.log.info("Add 2 block-relay-only connections to node 0")
        for i in range(2):
            self.log.info(f"block-relay-only: {i}")
            # set p2p_idx based on the outbound connections already open to the
            # node, so add 8 to account for the previous full-relay connections
            self.nodes[0].add_outbound_p2p_connection(
                P2PInterface(),
                p2p_idx=i + 8,
                connection_type="block-relay-only")

        self.log.info("Add 2 block-relay-only connections to node 1")
        for i in range(2):
            self.log.info(f"block-relay-only: {i}")
            self.nodes[1].add_outbound_p2p_connection(
                P2PInterface(), p2p_idx=i, connection_type="block-relay-only")

        self.log.info("Add 5 inbound connections to node 1")
        for i in range(5):
            self.log.info(f"inbound: {i}")
            self.nodes[1].add_p2p_connection(P2PInterface())

        self.log.info("Add 8 outbounds to node 1")
        for i in range(8):
            self.log.info(f"outbound: {i}")
            # bump p2p_idx to account for the 2 existing outbounds on node 1
            self.nodes[1].add_outbound_p2p_connection(P2PInterface(),
                                                      p2p_idx=i + 2)

        self.log.info("Check the connections opened as expected")
        check_node_connections(node=self.nodes[0], num_in=0, num_out=10)
        check_node_connections(node=self.nodes[1], num_in=5, num_out=10)

        self.log.info("Disconnect p2p connections & try to re-open")
        self.nodes[0].disconnect_p2ps()
        check_node_connections(node=self.nodes[0], num_in=0, num_out=0)

        self.log.info("Add 8 outbounds to node 0")
        for i in range(8):
            self.log.info(f"outbound: {i}")
            self.nodes[0].add_outbound_p2p_connection(P2PInterface(),
                                                      p2p_idx=i)
        check_node_connections(node=self.nodes[0], num_in=0, num_out=8)

        self.log.info("Add 2 block-relay-only connections to node 0")
        for i in range(2):
            self.log.info(f"block-relay-only: {i}")
            # bump p2p_idx to account for the 8 existing outbounds on node 0
            self.nodes[0].add_outbound_p2p_connection(
                P2PInterface(),
                p2p_idx=i + 8,
                connection_type="block-relay-only")
        check_node_connections(node=self.nodes[0], num_in=0, num_out=10)

        self.log.info("Restart node 0 and try to reconnect to p2ps")
        self.restart_node(0)

        self.log.info("Add 4 outbounds to node 0")
        for i in range(4):
            self.log.info(f"outbound: {i}")
            self.nodes[0].add_outbound_p2p_connection(P2PInterface(),
                                                      p2p_idx=i)
        check_node_connections(node=self.nodes[0], num_in=0, num_out=4)

        self.log.info("Add 2 block-relay-only connections to node 0")
        for i in range(2):
            self.log.info(f"block-relay-only: {i}")
            # bump p2p_idx to account for the 4 existing outbounds on node 0
            self.nodes[0].add_outbound_p2p_connection(
                P2PInterface(),
                p2p_idx=i + 4,
                connection_type="block-relay-only")
        check_node_connections(node=self.nodes[0], num_in=0, num_out=6)

        check_node_connections(node=self.nodes[1], num_in=5, num_out=10)
Exemplo n.º 23
0
    def run_test(self):
        # Setup the p2p connections
        # test_node connects to node0 (not whitelisted)
        test_node = self.nodes[0].add_p2p_connection(P2PInterface())
        # min_work_node connects to node1 (whitelisted)
        min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())

        # 1. Have nodes mine a block (leave IBD)
        [self.generatetoaddress(n, 1, n.get_deterministic_priv_key().address)
         for n in self.nodes]
        tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]

        # 2. Send one block that builds on each tip.
        # This should be accepted by node0
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = int(time.time()) + 1
        for i in range(2):
            blocks_h2.append(create_block(
                tips[i], create_coinbase(2), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_and_ping(msg_block(blocks_h2[0]))
        min_work_node.send_and_ping(msg_block(blocks_h2[1]))

        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 1)
        self.log.info(
            "First height 2 block accepted by node0; correctly rejected by node1")

        # 3. Send another block that builds on genesis.
        block_h1f = create_block(
            int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
        block_time += 1
        block_h1f.solve()
        test_node.send_and_ping(msg_block(block_h1f))

        tip_entry_found = False
        for x in self.nodes[0].getchaintips():
            if x['hash'] == block_h1f.hash:
                assert_equal(x['status'], "headers-only")
                tip_entry_found = True
        assert tip_entry_found
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, block_h1f.hash)

        # 4. Send another two block that build on the fork.
        block_h2f = create_block(
            block_h1f.sha256, create_coinbase(2), block_time)
        block_time += 1
        block_h2f.solve()
        test_node.send_and_ping(msg_block(block_h2f))

        # Since the earlier block was not processed by node, the new block
        # can't be fully validated.
        tip_entry_found = False
        for x in self.nodes[0].getchaintips():
            if x['hash'] == block_h2f.hash:
                assert_equal(x['status'], "headers-only")
                tip_entry_found = True
        assert tip_entry_found

        # But this block should be accepted by node since it has equal work.
        self.nodes[0].getblock(block_h2f.hash)
        self.log.info("Second height 2 block accepted, but not reorg'ed to")

        # 4b. Now send another block that builds on the forking chain.
        block_h3 = create_block(
            block_h2f.sha256, create_coinbase(3), block_h2f.nTime + 1)
        block_h3.solve()
        test_node.send_and_ping(msg_block(block_h3))

        # Since the earlier block was not processed by node, the new block
        # can't be fully validated.
        tip_entry_found = False
        for x in self.nodes[0].getchaintips():
            if x['hash'] == block_h3.hash:
                assert_equal(x['status'], "headers-only")
                tip_entry_found = True
        assert tip_entry_found
        self.nodes[0].getblock(block_h3.hash)

        # But this block should be accepted by node since it has more work.
        self.nodes[0].getblock(block_h3.hash)
        self.log.info("Unrequested more-work block accepted")

        # 4c. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node (as long as it is not missing any
        # headers)
        tip = block_h3
        all_blocks = []
        for i in range(288):
            next_block = create_block(
                tip.sha256, create_coinbase(i + 4), tip.nTime + 1)
            next_block.solve()
            all_blocks.append(next_block)
            tip = next_block

        # Now send the block at height 5 and check that it wasn't accepted
        # (missing header)
        test_node.send_and_ping(msg_block(all_blocks[1]))
        assert_raises_rpc_error(-5, "Block not found",
                                self.nodes[0].getblock, all_blocks[1].hash)
        assert_raises_rpc_error(-5, "Block not found",
                                self.nodes[0].getblockheader, all_blocks[1].hash)

        # The block at height 5 should be accepted if we provide the missing
        # header, though
        headers_message = msg_headers()
        headers_message.headers.append(CBlockHeader(all_blocks[0]))
        test_node.send_message(headers_message)
        test_node.send_and_ping(msg_block(all_blocks[1]))
        self.nodes[0].getblock(all_blocks[1].hash)

        # Now send the blocks in all_blocks
        for i in range(288):
            test_node.send_message(msg_block(all_blocks[i]))
        test_node.sync_with_ping()

        # Blocks 1-287 should be accepted, block 288 should be ignored because
        # it's too far ahead
        for x in all_blocks[:-1]:
            self.nodes[0].getblock(x.hash)
        assert_raises_rpc_error(
            -1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).

        # The node should have requested the blocks at some point, so
        # disconnect/reconnect first
        self.nodes[0].disconnect_p2ps()
        self.nodes[1].disconnect_p2ps()

        test_node = self.nodes[0].add_p2p_connection(P2PInterface())

        test_node.send_and_ping(msg_block(block_h1f))
        assert_equal(self.nodes[0].getblockcount(), 2)
        self.log.info(
            "Unrequested block that would complete more-work chain was ignored")

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with p2p_lock:
            # Clear state so we can check the getdata request
            test_node.last_message.pop("getdata", None)
            test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)]))

        test_node.sync_with_ping()
        with p2p_lock:
            getdata = test_node.last_message["getdata"]

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, block_h1f.sha256)
        self.log.info("Inv at tip triggered getdata for unprocessed block")

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_and_ping(msg_block(block_h1f))
        assert_equal(self.nodes[0].getblockcount(), 290)
        self.nodes[0].getblock(all_blocks[286].hash)
        assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, all_blocks[287].hash)
        self.log.info(
            "Successfully reorged to longer chain from non-whitelisted peer")

        # 8. Create a chain which is invalid at a height longer than the
        # current chain, but which has more blocks on top of that
        block_289f = create_block(
            all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime + 1)
        block_289f.solve()
        block_290f = create_block(
            block_289f.sha256, create_coinbase(290), block_289f.nTime + 1)
        block_290f.solve()
        block_291 = create_block(
            block_290f.sha256, create_coinbase(291), block_290f.nTime + 1)
        # block_291 spends a coinbase below maturity!
        block_291.vtx.append(create_tx_with_script(
            block_290f.vtx[0], 0, script_sig=b"42", amount=1))
        block_291.hashMerkleRoot = block_291.calc_merkle_root()
        block_291.solve()
        block_292 = create_block(
            block_291.sha256, create_coinbase(292), block_291.nTime + 1)
        block_292.solve()

        # Now send all the headers on the chain and enough blocks to trigger
        # reorg
        headers_message = msg_headers()
        headers_message.headers.append(CBlockHeader(block_289f))
        headers_message.headers.append(CBlockHeader(block_290f))
        headers_message.headers.append(CBlockHeader(block_291))
        headers_message.headers.append(CBlockHeader(block_292))
        test_node.send_and_ping(headers_message)

        tip_entry_found = False
        for x in self.nodes[0].getchaintips():
            if x['hash'] == block_292.hash:
                assert_equal(x['status'], "headers-only")
                tip_entry_found = True
        assert tip_entry_found
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, block_292.hash)

        test_node.send_message(msg_block(block_289f))
        test_node.send_and_ping(msg_block(block_290f))

        self.nodes[0].getblock(block_289f.hash)
        self.nodes[0].getblock(block_290f.hash)

        test_node.send_message(msg_block(block_291))

        # At this point we've sent an obviously-bogus block, wait for full processing
        # without assuming whether we will be disconnected or not
        try:
            # Only wait a short while so the test doesn't take forever if we do get
            # disconnected
            test_node.sync_with_ping(timeout=1)
        except AssertionError:
            test_node.wait_for_disconnect()

            self.nodes[0].disconnect_p2ps()
            test_node = self.nodes[0].add_p2p_connection(P2PInterface())

        # We should have failed reorg and switched back to 290 (but have block
        # 291)
        assert_equal(self.nodes[0].getblockcount(), 290)
        assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
        assert_equal(self.nodes[0].getblock(
            block_291.hash)["confirmations"], -1)

        # Now send a new header on the invalid chain, indicating we're forked
        # off, and expect to get disconnected
        block_293 = create_block(
            block_292.sha256, create_coinbase(293), block_292.nTime + 1)
        block_293.solve()
        headers_message = msg_headers()
        headers_message.headers.append(CBlockHeader(block_293))
        test_node.send_message(headers_message)
        test_node.wait_for_disconnect()

        # 9. Connect node1 to node0 and ensure it is able to sync
        connect_nodes(self.nodes[0], self.nodes[1])
        self.sync_blocks([self.nodes[0], self.nodes[1]])
        self.log.info("Successfully synced nodes 1 and 0")
Exemplo n.º 24
0
    def run_test(self):
        node0 = self.nodes[0].add_p2p_connection(P2PInterface())

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generatetoaddress(
            10, self.nodes[0].get_deterministic_priv_key().address)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata([x.sha256 for x in new_blocks])
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        node0.wait_for_block(stale_hash, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        node0.wait_for_header(hex(stale_hash), timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        block_hash = int(
            self.nodes[0].generatetoaddress(
                1, self.nodes[0].get_deterministic_priv_key().address)[-1], 16)
        assert_equal(self.nodes[0].getblockcount(), 14)
        node0.wait_for_block(block_hash, timeout=3)

        # Request for very old stale block should now fail
        with p2p_lock:
            node0.last_message.pop("block", None)
        self.send_block_request(stale_hash, node0)
        node0.sync_with_ping()
        assert "block" not in node0.last_message

        # Request for very old stale block header should now fail
        with p2p_lock:
            node0.last_message.pop("headers", None)
        self.send_header_request(stale_hash, node0)
        node0.sync_with_ping()
        assert "headers" not in node0.last_message

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        node0.wait_for_block(block_hash, timeout=3)

        self.send_header_request(block_hash, node0)
        node0.wait_for_header(hex(block_hash), timeout=3)
Exemplo n.º 25
0
    def run_test(self):
        block_relay_peer = self.nodes[0].add_p2p_connection(P2PInterface())

        self.log.info(
            'Check that txs from p2p are rejected and result in disconnect')
        prevtx = self.nodes[0].getblock(self.nodes[0].getblockhash(1),
                                        2)['tx'][0]
        rawtx = self.nodes[0].createrawtransaction(
            inputs=[{
                'txid': prevtx['txid'],
                'vout': 0
            }],
            outputs=[{
                self.nodes[0].get_deterministic_priv_key().address:
                50 - 0.00125
            }],
        )
        sigtx = self.nodes[0].signrawtransactionwithkey(
            hexstring=rawtx,
            privkeys=[self.nodes[0].get_deterministic_priv_key().key],
            prevtxs=[{
                'txid': prevtx['txid'],
                'vout': 0,
                'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'],
            }],
        )['hex']
        assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False)
        with self.nodes[0].assert_debug_log(
            ['transaction sent in violation of protocol peer=0']):
            block_relay_peer.send_message(
                msg_tx(FromHex(CTransaction(), sigtx)))
            block_relay_peer.wait_for_disconnect()
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)

        # Remove the disconnected peer and add a new one.
        del self.nodes[0].p2ps[0]
        tx_relay_peer = self.nodes[0].add_p2p_connection(P2PInterface())

        self.log.info(
            'Check that txs from rpc are not rejected and relayed to other peers'
        )
        assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True)
        txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid']
        with self.nodes[0].assert_debug_log(
            ['received getdata for: wtx {} peer=1'.format(txid)]):
            self.nodes[0].sendrawtransaction(sigtx)
            tx_relay_peer.wait_for_tx(txid)
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)

        self.log.info(
            'Check that txs from peers with relay-permission are not rejected and relayed to others'
        )
        self.log.info("Restarting node 0 with relay permission and blocksonly")
        self.restart_node(0, [
            "-persistmempool=0", "[email protected]", "-blocksonly",
            '-deprecatedrpc=whitelisted'
        ])
        assert_equal(self.nodes[0].getrawmempool(), [])
        first_peer = self.nodes[0].add_p2p_connection(P2PInterface())
        second_peer = self.nodes[0].add_p2p_connection(P2PInterface())
        peer_1_info = self.nodes[0].getpeerinfo()[0]
        assert_equal(peer_1_info['permissions'], ['relay'])
        peer_2_info = self.nodes[0].getpeerinfo()[1]
        assert_equal(peer_2_info['permissions'], ['relay'])
        assert_equal(self.nodes[0].testmempoolaccept([sigtx])[0]['allowed'],
                     True)
        txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid']

        self.log.info(
            'Check that the tx from first_peer with relay-permission is relayed to others (ie.second_peer)'
        )
        with self.nodes[0].assert_debug_log(["received getdata"]):
            # Note that normally, first_peer would never send us transactions since we're a blocksonly node.
            # By activating blocksonly, we explicitly tell our peers that they should not send us transactions,
            # and Bitcoin Core respects that choice and will not send transactions.
            # But if, for some reason, first_peer decides to relay transactions to us anyway, we should relay them to
            # second_peer since we gave relay permission to first_peer.
            # See https://github.com/bitcoin/bitcoin/issues/19943 for details.
            first_peer.send_message(msg_tx(FromHex(CTransaction(), sigtx)))
            self.log.info(
                'Check that the peer with relay-permission is still connected after sending the transaction'
            )
            assert_equal(first_peer.is_connected, True)
            second_peer.wait_for_tx(txid)
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
        self.log.info(
            "Relay-permission peer's transaction is accepted and relayed")
    def run_test(self):
        node = self.nodes[0]
        peer = node.add_p2p_connection(P2PInterface())

        node_deterministic_address = node.get_deterministic_priv_key().address
        # Mine one period worth of blocks
        self.generatetoaddress(node, VB_PERIOD, node_deterministic_address)

        self.log.info(
            "Check that there is no warning if previous VB_BLOCKS have <VB_THRESHOLD blocks with unknown versionbits version."
        )
        # Build one period of blocks with < VB_THRESHOLD blocks signaling some unknown bit
        self.send_blocks_with_version(peer, VB_THRESHOLD - 1,
                                      VB_UNKNOWN_VERSION)
        self.generatetoaddress(node, VB_PERIOD - VB_THRESHOLD + 1,
                               node_deterministic_address)

        # Check that we're not getting any versionbit-related errors in get*info()
        assert not VB_PATTERN.match(node.getmininginfo()["warnings"])
        assert not VB_PATTERN.match(node.getnetworkinfo()["warnings"])

        self.log.info(
            "Check that there is a warning if >50 blocks in the last 100 were an unknown version schema"
        )
        # Build UNKNOWN_VERSION_SCHEMA_THRESHOLD blocks signaling some unknown schema
        self.send_blocks_with_version(peer, UNKNOWN_VERSION_SCHEMA_THRESHOLD,
                                      UNKNOWN_VERSION_SCHEMA)
        # Check that get*info() shows the 51/100 unknown block version error.
        assert (WARN_UNKNOWN_RULES_MINED in node.getmininginfo()["warnings"])
        assert (WARN_UNKNOWN_RULES_MINED in node.getnetworkinfo()["warnings"])
        # Close the period normally
        self.generatetoaddress(node,
                               VB_PERIOD - UNKNOWN_VERSION_SCHEMA_THRESHOLD,
                               node_deterministic_address)
        # Make sure the warning remains
        assert (WARN_UNKNOWN_RULES_MINED in node.getmininginfo()["warnings"])
        assert (WARN_UNKNOWN_RULES_MINED in node.getnetworkinfo()["warnings"])

        # Stop-start the node, and make sure the warning is gone
        self.restart_node(0)
        assert (WARN_UNKNOWN_RULES_MINED
                not in node.getmininginfo()["warnings"])
        assert (WARN_UNKNOWN_RULES_MINED
                not in node.getnetworkinfo()["warnings"])
        peer = node.add_p2p_connection(P2PInterface())

        self.log.info(
            "Check that there is a warning if >50 blocks in the last 100 were an unknown version"
        )
        # Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
        self.send_blocks_with_version(peer, VB_THRESHOLD, VB_UNKNOWN_VERSION)
        self.generatetoaddress(node, VB_PERIOD - VB_THRESHOLD,
                               node_deterministic_address)

        # Check that get*info() shows the 51/100 unknown block version error.
        assert (WARN_UNKNOWN_RULES_MINED in node.getmininginfo()["warnings"])
        assert (WARN_UNKNOWN_RULES_MINED in node.getnetworkinfo()["warnings"])

        self.log.info(
            "Check that there is a warning if previous VB_BLOCKS have >=VB_THRESHOLD blocks with unknown versionbits version."
        )
        # Mine a period worth of expected blocks so the generic block-version warning
        # is cleared. This will move the versionbit state to ACTIVE.
        self.generatetoaddress(node, VB_PERIOD, node_deterministic_address)

        # Stop-start the node. This is required because bitcoind will only warn once about unknown versions or unknown rules activating.
        self.restart_node(0)

        # Generating one block guarantees that we'll get out of IBD
        self.generatetoaddress(node, 1, node_deterministic_address)
        self.wait_until(
            lambda: not node.getblockchaininfo()['initialblockdownload'])
        # Generating one more block will be enough to generate an error.
        self.generatetoaddress(node, 1, node_deterministic_address)
        # Check that get*info() shows the versionbits unknown rules warning
        assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
        assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
        # Check that the alert file shows the versionbits unknown rules warning
        self.wait_until(lambda: self.versionbits_in_alert_file())