Exemplo n.º 1
0
    def _init(self):
        node_no = 0

        # Create a P2P connections
        node = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[node_no], node)
        node.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[node_no].getbestblockhash(), 16))
        block = self.chain.next_block(self.block_count)
        self.block_count += 1
        self.chain.save_spendable_output()
        node.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(self.block_count)
            self.block_count += 1
            self.chain.save_spendable_output()
            node.send_message(msg_block(block))

        self.log.info("Waiting for block height 101 via rpc")
        self.nodes[node_no].waitforblockheight(101)

        return node
    def run_test(self):
        # Create a P2P connection to the first node
        node0 = NodeConnCB()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Out of IBD
        self.nodes[0].generate(1)

        # Create shortcuts.
        conn = connections[0]
        rpc = conn.rpc

        # Use p2p interface.
        self.test_case(rpcsend=None, conn=conn)
        # Use sendrawtransaction rpc interface.
        self.test_case(rpc.sendrawtransaction)
        # Use sendrawtransactions rpc interface.
        self.test_case(rpc.sendrawtransactions)
Exemplo n.º 3
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        # send one to get out of IBD state
        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        node0.send_message(msg_block(block))

        self.nodes[0].waitforblockheight(1)

        block = self.chain.next_block(block_count)

        # set block validating status to wait after validation
        self.nodes[0].waitaftervalidatingblock(block.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block.hash}, self.nodes[0], self.log)

        node0.send_message(msg_block(block))
        node1.send_message(msg_block(block))

        # make sure we started validating blocks.
        # One is validating the other is ignored.
        wait_for_validating_blocks({block.hash}, self.nodes[0], self.log)

        def wait_for_log():
            line_text = block.hash + " will not be considered by the current"
            for line in open(glob.glob(self.options.tmpdir + "/node0" + "/regtest/bitcoind.log")[0]):
                if line_text in line:
                    self.log.info("Found line: %s", line)
                    return True
            return False

        # wait for the log of the ignored block.
        wait_until(wait_for_log)

        # remove block validating status to finish validation
        self.nodes[0].waitaftervalidatingblock(block.hash, "remove")

        # wait till validation of block finishes
        node0.sync_with_ping()

        self.nodes[0].waitforblockheight(2)
        assert_equal(block.hash, self.nodes[0].getbestblockhash())
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        _, out, block_count = prepare_init_chain(self.chain,
                                                 101,
                                                 100,
                                                 start_block=0,
                                                 block_0=False,
                                                 node=node0)

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        block1 = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1
        # send block but block him at validation point
        self.nodes[0].waitaftervalidatingblock(block1.hash, "add")
        node0.send_message(msg_block(block1))
        self.log.info(f"block1 hash: {block1.hash}")

        # make sure block hash is in waiting list
        wait_for_waiting_blocks({block1.hash}, self.nodes[0], self.log)

        # send child block
        block2 = self.chain.next_block(block_count,
                                       spend=out[1],
                                       extra_txns=10)
        block_count += 1
        node0.send_message(msg_block(block2))
        self.log.info(f"block2 hash: {block2.hash}")

        wait_until(lambda: check_for_log_msg(
            self, block2.hash + " will not be considered by the current",
            "/node0"))

        self.nodes[0].waitaftervalidatingblock(block1.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block that arrived last on competing chain should be active
        assert_equal(block2.hash, self.nodes[0].getbestblockhash())
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # send one to get out of IBD state
        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        ancestor_block_hash = self.nodes[0].getbestblockhash()

        parent_block = self.chain.next_block(block_count)
        block_count += 1

        headers_message = msg_headers()
        headers_message.headers = [CBlockHeader(parent_block)]
        connection.cb.send_message(headers_message)

        child_block = self.chain.next_block(block_count)
        node0.send_message(msg_block(child_block))

        # wait till validation of block finishes
        node0.sync_with_ping()

        assert_equal(ancestor_block_hash, self.nodes[0].getbestblockhash())

        self.stop_node(0)
        self.start_node(0)

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        assert_equal(ancestor_block_hash, self.nodes[0].getbestblockhash())

        node0.send_message(msg_block(parent_block))

        # wait till validation of block finishes
        node0.sync_with_ping()

        assert_equal(child_block.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 6
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        # send one to get out of IBD state
        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        node0.send_message(msg_block(block))

        self.nodes[0].waitforblockheight(1)

        block = self.chain.next_block(block_count)

        # set block validating status to wait after validation
        self.nodes[0].waitaftervalidatingblock(block.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block.hash}, self.nodes[0], self.log)

        node0.send_message(msg_block(block))
        node1.send_message(msg_block(block))

        # make sure we started validating blocks.
        # One is validating the other is ignored.
        wait_for_validating_blocks({block.hash}, self.nodes[0], self.log)

        # wait for the log of the ignored block.
        wait_until(lambda: check_for_log_msg(self, block.hash + " will not be considered by the current", "/node0"))

        # remove block validating status to finish validation
        self.nodes[0].waitaftervalidatingblock(block.hash, "remove")

        # wait till validation of block finishes
        node0.sync_with_ping()

        self.nodes[0].waitforblockheight(2)
        assert_equal(block.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 7
0
    def run_test(self):
        #connect a mininode
        aTestNode = NodeConnCB()
        node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
        aTestNode.add_connection(node)
        NetworkThread().start()
        aTestNode.wait_for_verack()

        #request mempool
        aTestNode.send_message(MsgMempool())
        aTestNode.wait_for_disconnect()

        #mininode must be disconnected at this point
        assert_equal(len(self.nodes[0].getpeerinfo()), 0)
Exemplo n.º 8
0
    def run_test(self):
        node0 = NodeConnCB()
        node0.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))

        NetworkThread().start()
        node0.wait_for_verack()

        self.log.info("#1. generate 1 block by node0==================================================================")
        self.nodes[0].generate(nblocks=1)
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1

        self.height = 1
        self.coinbase_txs = []

        self.log.info("#2. create 100 blocks and send to node0========================================================")
        for i in range(100):
            coinbase_tx = create_coinbase(self.height)
            self.coinbase_txs.append(coinbase_tx)
            self.create_block_and_send([coinbase_tx], node0)

        self.nodes[0].waitforblockheight(101)

        self.fork_point_hash = self.tip
        self.fork_height = self.height

        self.log.info("#3. create one fork chain with one block=======================================================")
        for i in range(1):
            block_fee, txns = self.create_txns_from(self.coinbase_txs[i], 2)
            coinbase = create_coinbase(self.height, None, block_fee)
            self.create_block_and_send([coinbase] + txns, node0)

        self.nodes[0].waitforblockheight(102)

        self.log.info("#4. create another fork chain with two blocks==================================================")
        self.tip = self.fork_point_hash
        self.height = self.fork_height

        for i in range(2):
            block_fee, txns = self.create_txns_from(self.coinbase_txs[i], 2)
            coinbase = create_coinbase(self.height, None, block_fee)
            self.create_block_and_send([coinbase] + txns, node0)

        self.log.info("#5. expect node0 switch to new chain===========================================================")
        self.nodes[0].waitforblockheight(103)
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        node0.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        node0.send_message(msg_block(block))

        block = self.chain.next_block(block_count)
        self.log.info(f"block hash: {block.hash}")
        self.nodes[0].waitaftervalidatingblock(block.hash, "add")

        # make sure block hash is in waiting list
        wait_for_waiting_blocks({block.hash}, self.nodes[0], self.log)

        self.log.info("sending block")
        node0.send_message(msg_block(block))

        # make sure we started validating block
        wait_for_validating_blocks({block.hash}, self.nodes[0], self.log)

        # sleep a bit and check that in the meantime validation hasn't proceeded
        time.sleep(1)
        assert (block.hash != self.nodes[0].getbestblockhash())

        # after validating the block we release its waiting status
        self.nodes[0].waitaftervalidatingblock(block.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        assert_equal(block.hash, self.nodes[0].getbestblockhash())
    def run_test(self):
        block_count = 0

        # Create a P2P connection
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # send one to get out of IBD state
        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        node0.send_message(msg_block(block))

        self.nodes[0].waitforblockheight(1)

        block_count = self.__test_getdata(node0, block_count)
        block_count = self.__test_getblocks(node0, block_count)
Exemplo n.º 11
0
    def run_test(self):

        # Turn on a webhook server
        self.start_webhook_server()

        # Create a P2P connection
        node = self.nodes[0]
        peer = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), node, peer)
        peer.add_connection(connection)
        NetworkThread().start()
        peer.wait_for_verack()

        # Create an initial block with a coinbase we will split into multiple utxos
        initialBlock, _ = make_block(connection)
        coinbaseTx = initialBlock.vtx[0]

        send_by_headers(connection, [initialBlock], do_send_blocks=True)
        wait_for_tip(connection, initialBlock.hash)

        node.generate(101)
        block101hex = node.getblock(node.getbestblockhash(), False)
        block101dict = node.getblock(node.getbestblockhash(), 2)
        block101 = FromHex(CBlock(), block101hex)
        block101.height = block101dict['height']
        block101.rehash()

        # Create a block with a transaction spending coinbaseTx of a previous block and making multiple outputs for future transactions to spend
        utxoBlock, _ = make_block(connection, parent_block=block101)
        utxoTx = create_tx(coinbaseTx, 0, 1 * COIN)

        # Create additional 48 outputs (we let 1 COIN as fee)
        for _ in range(48):
            utxoTx.vout.append(CTxOut(1 * COIN, CScript([OP_TRUE])))
        # Add to block
        utxoTx.rehash()

        utxoBlock.vtx.append(utxoTx)
        utxoBlock.hashMerkleRoot = utxoBlock.calc_merkle_root()
        utxoBlock.solve()

        send_by_headers(connection, [utxoBlock], do_send_blocks=True)
        wait_for_tip(connection, utxoBlock.hash)

        # Make sure serialization/deserialization works as expected
        # Create dsdetected message. The content is not important here.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(utxoBlock),
                 CBlockHeader(initialBlock)],
                DSMerkleProof(1, utxoTx, utxoBlock.hashMerkleRoot,
                              [MerkleProofNode(utxoBlock.vtx[0].sha256)]))
        ])
        dsdBytes = dsdMessage.serialize()
        dsdMessageDeserialized = msg_dsdetected()
        dsdMessageDeserialized.deserialize(BytesIO(dsdBytes))
        assert_equal(str(dsdMessage), str(dsdMessageDeserialized))

        # Send a message containing random bytes. Webhook should not receive the notification.
        peer.send_and_ping(fake_msg_dsdetected())
        assert_equal(self.get_JSON_notification(), None)

        # Create two blocks with transactions spending the same utxo
        blockA, _ = make_block(connection, parent_block=utxoBlock)
        blockB, _ = make_block(connection, parent_block=utxoBlock)
        blockF, _ = make_block(connection, parent_block=utxoBlock)
        txA = create_tx(utxoBlock.vtx[1], 0, int(0.8 * COIN))
        txB = create_tx(utxoBlock.vtx[1], 0, int(0.9 * COIN))
        txF = create_tx(utxoBlock.vtx[1], 0, int(0.7 * COIN))
        txA.rehash()
        txB.rehash()
        txF.rehash()
        blockA.vtx.append(txA)
        blockB.vtx.append(txB)
        blockF.vtx.append(txF)
        blockA.hashMerkleRoot = blockA.calc_merkle_root()
        blockB.hashMerkleRoot = blockB.calc_merkle_root()
        blockF.hashMerkleRoot = blockF.calc_merkle_root()
        blockA.calc_sha256()
        blockB.calc_sha256()
        blockF.calc_sha256()
        blockA.solve()
        blockB.solve()
        blockF.solve()

        start_banscore = node.getpeerinfo()[0]['banscore']

        # Webhook should not receive the notification if we send dsdetected message with only one block detail.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with two block details and one is containing no headers.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message where last headers in block details do not have a common previous block hash.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(utxoBlock)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message where block details does not have headers in proper order.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(utxoBlock),
                 CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the empty merkle proof.
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails([CBlockHeader(blockB)], DSMerkleProof())
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the wrong index in the merkle proof (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(0, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the wrong transaction in the merkle proof (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txA, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the wrong merkle root (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the wrong merkle proof (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockA.hashMerkleRoot)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the merkle proof having an additional unexpected node (merkle root validation should fail)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails([CBlockHeader(blockB)],
                         DSMerkleProof(1, txB, blockB.hashMerkleRoot, [
                             MerkleProofNode(blockB.vtx[0].sha256),
                             MerkleProofNode(blockA.hashMerkleRoot)
                         ]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with the valid proof, but transaction is a coinbase transaction
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(0, blockB.vtx[0], blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[1].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if we send dsdetected message with transactions that are not double spending
        # Create a block similar as before, but with a transaction spending a different utxo
        blockC, _ = make_block(connection, parent_block=utxoBlock)
        txC = create_tx(utxoBlock.vtx[1], 1, int(0.7 * COIN))
        blockC.vtx.append(txC)
        blockC.hashMerkleRoot = blockC.calc_merkle_root()
        blockC.solve()
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockC)],
                DSMerkleProof(1, txC, blockC.hashMerkleRoot,
                              [MerkleProofNode(blockC.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if the two double spending transactions are actually the same transaction (having same txid)
        # Create a block similar as before, but with a transaction spending a different utxo
        blockD, _ = make_block(connection, parent_block=utxoBlock)
        blockD.vtx.append(txA)
        blockD.hashMerkleRoot = blockD.calc_merkle_root()
        blockD.solve()
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockD)],
                DSMerkleProof(1, txA, blockD.hashMerkleRoot,
                              [MerkleProofNode(blockD.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Webhook should not receive the notification if header cannot pow
        # note hat pow is so easy in regtest that nonce can often be hence we have to select the nonce carefully
        blockE, _ = make_block(connection, parent_block=utxoBlock)
        blockE.vtx.append(txB)
        blockE.hashMerkleRoot = blockE.calc_merkle_root()
        nonce = blockE.nNonce
        while True:
            blockE.solve()
            if blockE.nNonce > nonce:
                blockE.nNonce = nonce
                break
            nonce += 1
            blockE.nNonce = nonce

        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockE)],
                DSMerkleProof(1, txB, blockE.hashMerkleRoot,
                              [MerkleProofNode(blockE.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        end_banscore = node.getpeerinfo()[0]['banscore']
        assert ((end_banscore - start_banscore) / 10 == 13
                )  # because we have 13 negative tests so far

        # Finally, webhook should receive the notification if we send a proper dsdetected message
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        json_notification = self.get_JSON_notification()
        # remove diverentBlockHash so we can compare with the ds-message
        assert (json_notification != None)
        for e in json_notification['blocks']:
            del e['divergentBlockHash']
        assert_equal(str(dsdMessage),
                     str(msg_dsdetected(json_notification=json_notification)))

        # Repeat previous test but change the order of the BlockDetails, the node should identify this as a duplicate
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockB)],
                DSMerkleProof(1, txB, blockB.hashMerkleRoot,
                              [MerkleProofNode(blockB.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # repeat previous test but generate many blocks in the node to age the notificatoin message.
        # very old notification messages shall be ignored. We use the same thresholds as safe mode.
        # We will hardcode this threshold for now until branch we depend on is merged
        node.generate(289)
        dsdMessage = msg_dsdetected(blocksDetails=[
            BlockDetails(
                [CBlockHeader(blockA)],
                DSMerkleProof(1, txA, blockA.hashMerkleRoot,
                              [MerkleProofNode(blockA.vtx[0].sha256)])),
            BlockDetails(
                [CBlockHeader(blockF)],
                DSMerkleProof(1, txF, blockF.hashMerkleRoot,
                              [MerkleProofNode(blockF.vtx[0].sha256)]))
        ])
        peer.send_and_ping(dsdMessage)
        assert_equal(self.get_JSON_notification(), None)

        # Create number of random valid block trees and send dsdetected P2P message for each
        maxNumberOfBranches = 10
        maxNumberOfBlocksPerBranch = 30
        for _ in range(10):
            blockTree = self.createRandomBlockTree(maxNumberOfBranches,
                                                   maxNumberOfBlocksPerBranch,
                                                   utxoBlock,
                                                   [utxoBlock.vtx[1]])
            dsdMessage = self.createDsDetectedMessageFromBlockTree(blockTree)
            peer.send_and_ping(dsdMessage)
            # Notification should be received as generated dsdetected message is valid
            json_notification = self.get_JSON_notification()
            # remove diverentBlockHash so we can compare with the ds-message
            assert (json_notification != None)
            for e in json_notification['blocks']:
                del e['divergentBlockHash']
            assert_equal(
                str(dsdMessage),
                str(msg_dsdetected(json_notification=json_notification)))

        self.stop_webhook_server()
Exemplo n.º 12
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        test_node = NodeConnCB()  # connects to node0 (not whitelisted)
        white_node = NodeConnCB()  # connects to node1 (whitelisted)
        min_work_node = NodeConnCB()  # connects to node2 (not whitelisted)

        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node),
            NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node),
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node)
        ]
        test_node.add_connection(connections[0])
        white_node.add_connection(connections[1])
        min_work_node.add_connection(connections[2])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()
        white_node.wait_for_verack()
        min_work_node.wait_for_verack()

        # 1. Have nodes mine a block (nodes1/2 leave IBD)
        [n.generate(1) for n in self.nodes]
        tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]

        # 2. Send one block that builds on each tip.
        # This should be accepted by nodes 1/2
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = int(time.time()) + 1
        for i in range(3):
            blocks_h2.append(
                create_block(tips[i], create_coinbase(2), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(MsgBlock(blocks_h2[0]))
        white_node.send_message(MsgBlock(blocks_h2[1]))
        min_work_node.send_message(MsgBlock(blocks_h2[2]))

        for x in [test_node, white_node, min_work_node]:
            x.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 2)
        assert_equal(self.nodes[2].getblockcount(), 1)
        self.log.info(
            "First height 2 block accepted by node0/node1; correctly rejected by node2"
        )

        # 3. Send another block that builds on the original tip.
        blocks_h2f = []  # Blocks at height 2 that fork off the main chain
        for i in range(2):
            blocks_h2f.append(
                create_block(tips[i], create_coinbase(2),
                             blocks_h2[i].nTime + 1))
            blocks_h2f[i].solve()
        test_node.send_message(MsgBlock(blocks_h2f[0]))
        white_node.send_message(MsgBlock(blocks_h2f[1]))

        for x in [test_node, white_node]:
            x.sync_with_ping()
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h2f[0].hash:
                assert_equal(x['status'], "headers-only")

        for x in self.nodes[1].getchaintips():
            if x['hash'] == blocks_h2f[1].hash:
                assert_equal(x['status'], "valid-headers")

        self.log.info(
            "Second height 2 block accepted only from whitelisted peer")

        # 4. Now send another block that builds on the forking chain.
        blocks_h3 = []
        for i in range(2):
            blocks_h3.append(
                create_block(blocks_h2f[i].sha256, create_coinbase(3),
                             blocks_h2f[i].nTime + 1))
            blocks_h3[i].solve()
        test_node.send_message(MsgBlock(blocks_h3[0]))
        white_node.send_message(MsgBlock(blocks_h3[1]))

        for x in [test_node, white_node]:
            x.sync_with_ping()
        # Since the earlier block was not processed by node0, the new block
        # can't be fully validated.
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h3[0].hash:
                assert_equal(x['status'], "headers-only")

        # But this block should be accepted by node0 since it has more work.
        self.nodes[0].getblock(blocks_h3[0].hash)
        self.log.info(
            "Unrequested more-work block accepted from non-whitelisted peer")

        # Node1 should have accepted and reorged.
        assert_equal(self.nodes[1].getblockcount(), 3)
        self.log.info(
            "Successfully reorged to length 3 chain from whitelisted peer")

        # 4b. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node0.  Node1 should process the tip if
        # we give it the headers chain leading to the tip.
        tips = blocks_h3
        headers_message = MsgHeaders()
        all_blocks = []  # node0's blocks
        for j in range(2):
            for i in range(288):
                next_block = create_block(tips[j].sha256,
                                          create_coinbase(i + 4),
                                          tips[j].nTime + 1)
                next_block.solve()
                if j == 0:
                    test_node.send_message(MsgBlock(next_block))
                    all_blocks.append(next_block)
                else:
                    headers_message.headers.append(CBlockHeader(next_block))
                tips[j] = next_block

        time.sleep(2)
        # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
        for x in all_blocks[:-1]:
            self.nodes[0].getblock(x.hash)
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, all_blocks[-1].hash)

        headers_message.headers.pop()  # Ensure the last block is unrequested
        white_node.send_message(headers_message)  # Send headers leading to tip
        white_node.send_message(MsgBlock(tips[1]))  # Now deliver the tip
        white_node.sync_with_ping()
        self.nodes[1].getblock(tips[1].hash)
        self.log.info(
            "Unrequested block far ahead of tip accepted from whitelisted peer"
        )

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).
        test_node.send_message(MsgBlock(blocks_h2f[0]))

        # Here, if the sleep is too short, the test could falsely succeed (if the
        # node hasn't processed the block by the time the sleep returns, and then
        # the node processes it and incorrectly advances the tip).
        # But this would be caught later on, when we verify that an inv triggers
        # a getdata request for this block.
        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        self.log.info(
            "Unrequested block that would complete more-work chain was ignored"
        )

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_message.pop("getdata", None)
            test_node.send_message(MsgInv([CInv(2, blocks_h3[0].sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_message["getdata"]

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
        self.log.info("Inv at tip triggered getdata for unprocessed block")

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_message(MsgBlock(blocks_h2f[0]))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        self.log.info(
            "Successfully reorged to longer chain from non-whitelisted peer")

        # 8. Connect node2 to node0 and ensure it is able to sync
        connect_nodes(self.nodes[0], 2)
        sync_blocks([self.nodes[0], self.nodes[2]])
        self.log.info("Successfully synced nodes 2 and 0")

        [c.disconnect_node() for c in connections]
class MockDsdetector():
    def __init__(self, testRig, node):
        self.peer = NodeConnCB()
        self.peer.add_connection(
            NodeConn('127.0.0.1', p2p_port(0), node, self.peer))
        NetworkThread().start()
        self.peer.wait_for_verack()
        testRig.dsdetector = self
        self.message = None

    def SendDsNotification(self):
        if self.message:
            self.peer.send_and_ping(self.message)

    def CheckForDoubleSpends(self, nodes):
        spent_inputs = []
        seen_transactions = []
        ds_counter = 0
        for node in nodes:
            for height in range(node.getblockcount() + 1):
                blockhash = node.getblockhash(height)
                block = node.getblock(blockhash, 2)
                blockHex = node.getblock(blockhash, False)
                for txraw in block['tx'][1:]:  # exclude coinbase
                    # skip the identical transactions in the two chains, they are no double spends
                    if txraw['txid'] in seen_transactions:
                        continue
                    else:
                        seen_transactions.append(txraw['txid'])
                    for i in txraw['vin']:
                        utxoA = (i['txid'], i['vout'])
                        blockA = FromHex(CBlock(), blockHex)
                        txA = FromHex(CTransaction(), txraw['hex'])
                        foundB = [
                            j for j in spent_inputs if j['utxo'] == utxoA
                        ]
                        if foundB:
                            ds_counter += 1
                            foundB = foundB[0]
                            blockB = foundB['block']
                            txB = foundB['tx']
                            txA.rehash()

                            txB.rehash()

                            blockA.vtx[0].rehash()
                            blockB.vtx[0].rehash()
                            sha256_A = blockA.vtx[0].sha256
                            sha256_B = blockB.vtx[0].sha256

                            dsdMessage = msg_dsdetected(blocksDetails=[
                                BlockDetails([CBlockHeader(blockA)],
                                             DSMerkleProof(
                                                 1, txA, blockA.hashMerkleRoot,
                                                 [MerkleProofNode(sha256_A)])),
                                BlockDetails([CBlockHeader(blockB)],
                                             DSMerkleProof(
                                                 1, txB, blockB.hashMerkleRoot,
                                                 [MerkleProofNode(sha256_B)]))
                            ])
                            self.message = dsdMessage

                            dsdBytes = dsdMessage.serialize()
                            dsdMessageDeserialized = msg_dsdetected()
                            dsdMessageDeserialized.deserialize(
                                BytesIO(dsdBytes))
                            assert_equal(str(dsdMessage),
                                         str(dsdMessageDeserialized))

                            break
                        else:
                            spent_inputs.append({
                                'txid': txraw['txid'],
                                'tx': txA,
                                'utxo': utxoA,
                                'block': blockA
                            })

        return ds_counter
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        num_blocks = 150
        for i in range(num_blocks):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(num_blocks):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 151 via rpc")
        self.nodes[0].waitforblockheight(num_blocks + 1)

        tip_block_num = block_count - 1

        # left branch
        block2 = self.chain.next_block(block_count,
                                       spend=out[0:9],
                                       extra_txns=8)
        block_count += 1
        node0.send_message(msg_block(block2))
        self.log.info(f"block2 hash: {block2.hash}")

        self.nodes[0].waitforblockheight(num_blocks + 2)

        # send blocks 3,4 for parallel validation on left branch
        self.chain.set_tip(tip_block_num)
        block3 = self.chain.next_block(block_count,
                                       spend=out[10:19],
                                       extra_txns=10)
        block_count += 1

        block4 = self.chain.next_block(block_count,
                                       spend=out[20:29],
                                       extra_txns=8)
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"block3 hash: {block3.hash}")
        self.log.info(f"block4 hash: {block4.hash}")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block4.hash}, self.nodes[0], self.log)

        node1.send_message(msg_block(block3))
        node1.send_message(msg_block(block4))

        # make sure we started validating blocks
        wait_for_validating_blocks({block4.hash}, self.nodes[0], self.log)

        # right branch
        self.chain.set_tip(tip_block_num)
        block5 = self.chain.next_block(block_count)
        # Add some txns from block2 & block3 to block5, just to check that they get
        # filtered from the mempool and not re-added
        block5_duplicated_txns = block3.vtx[1:3] + block2.vtx[1:3]
        self.chain.update_block(block_count, block5_duplicated_txns)
        block_count += 1
        node0.send_message(msg_block(block5))
        self.log.info(f"block5 hash: {block5.hash}")

        # and two blocks to extend second branch to cause reorg
        # - they must be sent from the same node as otherwise they will be
        #   rejected with "prev block not found" as we don't wait for the first
        #   block to arrive so there is a race condition which block is seen
        #   first when using multiple connections
        block6 = self.chain.next_block(block_count)
        node0.send_message(msg_block(block6))
        self.log.info(f"block6 hash: {block6.hash}")
        block_count += 1
        block7 = self.chain.next_block(block_count)
        node0.send_message(msg_block(block7))
        self.log.info(f"block7 hash: {block7.hash}")
        block_count += 1

        self.nodes[0].waitforblockheight(num_blocks + 4)
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())

        self.log.info(
            "releasing wait status on parallel blocks to finish their validation"
        )
        self.nodes[0].waitaftervalidatingblock(block4.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block that arrived last on competing chain should be active
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())

        # make sure that transactions from block2 and 3 (except coinbase, and those also
        # in block 5) are in mempool
        not_expected_in_mempool = set()
        for txn in block5_duplicated_txns:
            not_expected_in_mempool.add(txn.hash)
        expected_in_mempool = set()
        for txn in block2.vtx[1:] + block3.vtx[1:]:
            expected_in_mempool.add(txn.hash)
        expected_in_mempool = expected_in_mempool.difference(
            not_expected_in_mempool)

        mempool = self.nodes[0].getrawmempool()
        assert_equal(expected_in_mempool, set(mempool))
Exemplo n.º 15
0
    def run_test(self):
        node0 = NodeConnCB()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread

        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
        self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that an invalid-according-to-CLTV transaction can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                     self.nodeaddress, 1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1),
                             block_time)
        block.nVersion = 3
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 4")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
        block.nVersion = 3
        block.solve()
        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(node0.last_message["reject"].reason,
                         b'bad-version(0x00000003)')
            assert_equal(node0.last_message["reject"].data, block.sha256)
            del node0.last_message["reject"]

        self.log.info(
            "Test that invalid-according-to-cltv transactions cannot appear in a block"
        )
        block.nVersion = 4

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                     self.nodeaddress, 1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for CLTV by getting it
        # accepted to the mempool (which we can achieve with
        # -promiscuousmempoolflags).
        node0.send_and_ping(MsgTx(spendtx))
        assert spendtx.hash in self.nodes[0].getrawmempool()

        # Now we verify that a block with this transaction is invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert node0.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(node0.last_message["reject"].data, block.sha256)
            if node0.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(node0.last_message["reject"].reason,
                             b'block-validation-failed')
            else:
                assert b'Negative locktime' in node0.last_message[
                    "reject"].reason

        self.log.info(
            "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted"
        )
        spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
        spendtx.rehash()

        block.vtx.pop(1)
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
Exemplo n.º 16
0
    def run_test(self):
        test_node = NodeConnCB()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
        test_node.add_connection(connections[0])
        NetworkThread().start()
        test_node.wait_for_verack()

        starting_height = 3
        self.nodes[0].generate(starting_height)

        # Create block with P2SH output and send it to node.
        # It should be validated and accepted.
        block = self.make_block_withP2SH_coinbase()
        test_node.send_message(msg_block(block))
        test_node.sync_with_ping()
        # check if block was accepted
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        # submitblock with P2SH in coinbase tx (not included in blockchain)
        block = self.make_block_withP2SH_coinbase()
        block.solve()
        assert_raises_rpc_error(-26, "bad-txns-vout-p2sh",
                                self.nodes[0].submitblock, ToHex(block))

        # verifyblockcandidate with P2SH in coinbase tx (not included in blockchain)
        assert_raises_rpc_error(-26, "bad-txns-vout-p2sh",
                                self.nodes[0].verifyblockcandidate,
                                ToHex(block))

        # submitblock without P2SH in coinbase tx (included in blockchain)
        hashPrev = int(self.nodes[0].getbestblockhash(), 16)
        ctx = create_coinbase(self.nodes[0].getblockcount() + 1)
        block2 = create_block(hashPrev, ctx)
        block2.solve()
        self.nodes[0].submitblock(ToHex(block2))
        assert_equal(block2.hash, self.nodes[0].getbestblockhash())

        # submit block with: submitminingsolution
        # Add P2SH to coinbase output - should be rejected
        candidate = self.nodes[0].getminingcandidate(False)
        block, ctx = create_block_from_candidate(candidate, False)
        coinbase_tx = create_coinbase_P2SH(self.nodes[0].getblockcount() + 1,
                                           example_script_hash)

        # submitminingsolution with P2SH in coinbase tx - should be denied.
        assert_raises_rpc_error(
            -26, "bad-txns-vout-p2sh", self.nodes[0].submitminingsolution, {
                'id': candidate['id'],
                'nonce': block.nNonce,
                'coinbase': '{}'.format(ToHex(coinbase_tx))
            })
        # submitminingsolution without P2SH in coinbase - should be accepted
        candidate = self.nodes[0].getminingcandidate(False)
        block, ctx = create_block_from_candidate(candidate, False)
        result = self.nodes[0].submitminingsolution({
            'id':
            candidate['id'],
            'nonce':
            block.nNonce,
            'coinbase':
            '{}'.format(ToHex(ctx))
        })
        assert_equal(result, True)
        assert_equal(block.hash, self.nodes[0].getbestblockhash())

        # generatetoaddress with nonP2SH address
        height_before = self.nodes[0].getblockcount()
        address = self.nodes[0].getnewaddress()
        self.nodes[0].generatetoaddress(1, address)
        height_after = self.nodes[0].getblockcount()
        assert_equal(height_before + 1, height_after)

        # generatetoaddress with P2SH address (example for regtest: 2MzQwSSnBHWHqSAqtTVQ6v47XtaisrJa1Vc)
        assert_raises_rpc_error(-26, "bad-txns-vout-p2sh",
                                self.nodes[0].generatetoaddress, 1,
                                '2MzQwSSnBHWHqSAqtTVQ6v47XtaisrJa1Vc')
Exemplo n.º 17
0
    def run_test(self):
        block_count = 0

        # Create a P2P connection
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count-1

        block2_hard = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count, spend=out[0], extra_txns=2)
        easier_block_num = block_count
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4_hard = self.chain.next_block(block_count, spend=out[0], extra_txns=10)
        block_count += 1

        # make child block of easier block
        self.chain.set_tip(easier_block_num)
        block5 = self.chain.next_block(block_count)
        block5_num = block_count
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        # send blocks via different p2p connection
        node0.send_message(msg_block(block2_hard))
        node1.send_message(msg_block(block4_hard))

        # make sure we started validating blocks
        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        # send easier block through different p2p connection too
        node2.send_message(msg_block(block3_easier))
        self.log.info(f"easier block hash: {block3_easier.hash}")
        self.nodes[0].waitforblockheight(102)
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())

        # child block of block3_easier
        self.log.info(f"child block hash: {block5.hash}")
        self.nodes[0].waitaftervalidatingblock(block5.hash, "add")

        # make sure child block is in waiting list and then send it
        wait_for_not_validating_blocks({block5.hash}, self.nodes[0], self.log)
        node2.send_message(msg_block(block5))

        # make sure we started validating child block
        wait_for_validating_blocks({block5.hash}, self.nodes[0], self.log)

        # finish validation on block2_hard
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "remove")
        wait_for_not_validating_blocks({block2_hard.hash}, self.nodes[0], self.log)

        # finish validation on child block
        self.nodes[0].waitaftervalidatingblock(block5.hash, "remove")
        wait_for_not_validating_blocks({block5.hash}, self.nodes[0], self.log)

        # block5 should be active at this point
        assert_equal(block5.hash, self.nodes[0].getbestblockhash())

        # finish validation on block4_hard
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "remove")
        wait_for_not_validating_blocks({block4_hard.hash}, self.nodes[0], self.log)

        # block5 should still be active at this point
        assert_equal(block5.hash, self.nodes[0].getbestblockhash())

        # Make three siblings and send them via same p2p connection.
        block6_hard = self.chain.next_block(block_count, spend=out[1], extra_txns=8)
        block_count += 1

        self.chain.set_tip(block5_num)

        block7_easier = self.chain.next_block(block_count, spend=out[1], extra_txns=2)
        block_count += 1

        self.chain.set_tip(block5_num)

        block8_hard = self.chain.next_block(block_count, spend=out[1], extra_txns=10)
        block_count += 1

        self.log.info(f"hard block6 hash: {block6_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block6_hard.hash, "add")
        self.log.info(f"hard block8 hash: {block8_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block8_hard.hash, "add")
        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block6_hard.hash, block8_hard.hash}, self.nodes[0], self.log)

        # sending blocks via same p2p connection
        node0.send_message(msg_block(block6_hard))
        node0.send_message(msg_block(block8_hard))

        # make sure we started validating blocks
        wait_for_validating_blocks({block6_hard.hash, block8_hard.hash}, self.nodes[0], self.log)

        # send easier block through same p2p connection too
        node0.send_message(msg_block(block7_easier))

        self.nodes[0].waitforblockheight(104)
        assert_equal(block7_easier.hash, self.nodes[0].getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        self.nodes[0].waitaftervalidatingblock(block6_hard.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block8_hard.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block7_easier.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 18
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        _, outs, block_count = prepare_init_chain(self.chain, 101, 1, block_0=False, start_block=0, node=node0)
        out = outs[0]

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count, spend=out, extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count, spend=out, extra_txns=2)
        block_count += 1

        mining_candidate = self.nodes[0].getminingcandidate()
        block4_hard = self.chain.next_block(block_count)
        block4_hard.hashPrevBlock = int(mining_candidate["prevhash"], 16)
        block4_hard.nTime = mining_candidate["time"]
        block4_hard.nVersion = mining_candidate["version"]
        block4_hard.solve()

        mining_solution = {"id": mining_candidate["id"],
                           "nonce": block4_hard.nNonce,
                           "coinbase": ToHex(block4_hard.vtx[0]),
                           "time": mining_candidate["time"],
                           "version": mining_candidate["version"]}

        # send three "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        # send one block via p2p and one via rpc
        node0.send_message(msg_block(block2_hard))

        # making rpc call submitminingsolution in a separate thread because waitaftervalidation is blocking
        # the return of submitminingsolution
        submitminingsolution_thread = threading.Thread(target=self.nodes[0].submitminingsolution, args=(mining_solution,))
        submitminingsolution_thread.start()

        # because self.nodes[0] rpc is blocked we use another rpc client
        rpc_client = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0), 0,
                                   coveragedir=self.options.coveragedir)

        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, rpc_client, self.log)

        self.log.info(f"easy block3 hash: {block3_easier.hash}")
        node1.send_message(msg_block(block3_easier))

        rpc_client.waitforblockheight(102)
        assert_equal(block3_easier.hash, rpc_client.getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
        rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
        submitminingsolution_thread.join()

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 19
0
    def run_test(self):
        block_count = 0

        # Create a P2P connection
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        network_thread = NetworkThread()
        network_thread.start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        block1_num = block_count - 1

        # num of sig operations in one transaction
        num_of_sig_checks = 70

        expensive_scriptPubKey = [
            OP_DUP, OP_HASH160,
            hash160(self.coinbase_pubkey), OP_EQUALVERIFY, OP_CHECKSIG, OP_DROP
        ] * num_of_sig_checks + [
            OP_DUP, OP_HASH160,
            hash160(self.coinbase_pubkey), OP_EQUALVERIFY, OP_CHECKSIG
        ]

        money_to_spend = 5000000000
        spend = out[0]

        block2_hard = self.next_block(block_count)

        # creates 4000 hard transaction and 4000 transaction to spend them. It will be 8k transactions in total
        add_txns = self.get_hard_transactions(
            spend,
            money_to_spend=money_to_spend,
            num_of_transactions=4000,
            num_of_sig_checks=num_of_sig_checks,
            expensive_script=expensive_scriptPubKey)
        self.chain.update_block(block_count, add_txns)
        block_count += 1
        self.log.info(f"block2_hard hash: {block2_hard.hash}")

        self.chain.set_tip(block1_num)
        block3_easier = self.next_block(block_count)
        add_txns = self.get_hard_transactions(
            spend,
            money_to_spend=money_to_spend,
            num_of_transactions=1000,
            num_of_sig_checks=num_of_sig_checks,
            expensive_script=expensive_scriptPubKey)
        self.chain.update_block(block_count, add_txns)
        self.log.info(f"block3_easier hash: {block3_easier.hash}")

        node0.send_message(msg_block(block2_hard))
        node0.send_message(msg_block(block3_easier))

        def wait_for_log():
            text_activation = f"Block {block2_hard.hash} was not activated as best"
            text_block2 = "Verify 8000 txins"
            text_block3 = "Verify 2000 txins"
            results = 0
            for line in open(
                    glob.glob(self.options.tmpdir + "/node0" +
                              "/regtest/bitcoind.log")[0]):
                if text_activation in line:
                    results += 1
                elif text_block2 in line:
                    results += 1
                elif text_block3 in line:
                    results += 1
            return True if results == 3 else False

        # wait that everything is written to the log
        # try accounting for slower machines by having a large timeout
        wait_until(wait_for_log, timeout=120)

        text_activation = f"Block {block2_hard.hash} was not activated as best"
        text_block2 = "Verify 8000 txins"
        text_block3 = "Verify 2000 txins"
        for line in open(
                glob.glob(self.options.tmpdir + "/node0" +
                          "/regtest/bitcoind.log")[0]):
            if text_activation in line:
                self.log.info(
                    f"block2_hard was not activated as block3_easy won the validation race"
                )
            elif text_block2 in line:
                line = line.split()
                self.log.info(
                    f"block2_hard took {line[len(line) - 1]} to verify")
            elif text_block3 in line:
                line = line.split()
                self.log.info(
                    f"block3_easy took {line[len(line)-1]} to verify")

        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
        node0.connection.close()
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count, spend=out[0], extra_txns=2)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4_hard = self.chain.next_block(block_count, spend=out[0], extra_txns=10)
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")
        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        node0.send_message(msg_block(block2_hard))
        node1.send_message(msg_block(block4_hard))

        # make sure we started validating blocks
        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        self.log.info(f"easier block3 hash: {block3_easier.hash}")
        node2.send_message(msg_block(block3_easier))

        self.nodes[0].waitforblockheight(102)
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 21
0
    def run_test(self):
        # Create a P2P connection to the first node
        node0 = NodeConnCB()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Out of IBD
        self.nodes[0].generate(1)

        # First create funding transaction that pays to output that does not require signatures.
        out_value = 10000
        ftx = CTransaction()
        ftx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
        ftxHex = self.nodes[0].fundrawtransaction(
            ToHex(ftx), {'changePosition': len(ftx.vout)})['hex']
        ftxHex = self.nodes[0].signrawtransaction(ftxHex)['hex']
        ftx = FromHex(CTransaction(), ftxHex)
        ftx.rehash()

        # Allow coinbase to mature
        self.nodes[0].generate(101)

        # Feed in funding txn and wait for both nodes to see it
        connections[0].send_message(msg_tx(ftx))
        wait_until(lambda: ftx.hash in self.nodes[0].getrawmempool(),
                   timeout=5)
        wait_until(lambda: ftx.hash in self.nodes[1].getrawmempool(),
                   timeout=5)

        # Create non-final txn.
        parent_txid = ftx.sha256
        send_value = out_value - 500
        tx = CTransaction()
        tx.vin.append(CTxIn(COutPoint(parent_txid, 0), b'', 0x01))
        tx.vout.append(CTxOut(int(send_value), CScript([OP_TRUE])))
        tx.nLockTime = int(time.time()) + 300
        tx.rehash()

        # Send non-final txn to node0. It should be forwarded over P2P to node1.
        connections[0].send_message(msg_tx(tx))
        wait_until(lambda: tx.hash in self.nodes[0].getrawnonfinalmempool(),
                   timeout=5)
        wait_until(lambda: tx.hash in self.nodes[1].getrawnonfinalmempool(),
                   timeout=5)
        assert (tx.hash not in self.nodes[0].getrawmempool())
        assert (tx.hash not in self.nodes[1].getrawmempool())

        # Create finalising txn.
        finaltx = copy.deepcopy(tx)
        finaltx.vin[0].nSequence = 0xFFFFFFFF
        finaltx.rehash()

        # Send finalising txn to node0. It should be forwarded over P2P to node1.
        connections[0].send_message(msg_tx(finaltx))
        wait_until(lambda: finaltx.hash in self.nodes[0].getrawmempool(),
                   timeout=5)
        wait_until(lambda: finaltx.hash in self.nodes[1].getrawmempool(),
                   timeout=5)
        assert (tx.hash not in self.nodes[0].getrawnonfinalmempool())
        assert (tx.hash not in self.nodes[1].getrawnonfinalmempool())
Exemplo n.º 22
0
    def run_test(self):

        block_count = 0

        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()

        self.log.info("Sending blocks to get spendable output")
        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        block2 = self.chain.next_block(block_count, spend=out[0], extra_txns=1)
        block2_count = block_count
        block_count += 1
        self.log.info(f"blockA hash: {block2.hash}")
        node0.send_message(msg_block(block2))
        self.nodes[0].waitforblockheight(102)

        block3_hard = self.chain.next_block(block_count,
                                            spend=out[1],
                                            extra_txns=8)
        block_count += 1
        self.chain.set_tip(block2_count)

        block4_easier = self.chain.next_block(block_count,
                                              spend=out[1],
                                              extra_txns=2)
        block_count += 1
        self.chain.set_tip(block2_count)

        block5_hard = self.chain.next_block(block_count,
                                            spend=out[1],
                                            extra_txns=10)
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block3 hash: {block3_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block3_hard.hash, "add")
        self.log.info(f"hard block5 hash: {block5_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block5_hard.hash, "add")
        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block3_hard.hash, block5_hard.hash},
                                self.nodes[0], self.log)

        self.log.info(
            "Sending blocks 3,4,5 on branch 2 for parallel validation")
        node0.send_message(msg_block(block3_hard))
        node2.send_message(msg_block(block5_hard))

        # make sure we started validating blocks
        wait_for_validating_blocks({block3_hard.hash, block5_hard.hash},
                                   self.nodes[0], self.log)

        self.log.info(f"easier hash: {block4_easier.hash}")
        node1.send_message(msg_block(block4_easier))
        self.nodes[0].waitforblockheight(103)

        # Because 4 is easy to validate it will be validated first and set as active tip
        assert_equal(block4_easier.hash, self.nodes[0].getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        self.nodes[0].waitaftervalidatingblock(block3_hard.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block5_hard.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block4_easier.hash, self.nodes[0].getbestblockhash())

        self.log.info("Sending blocks 6,7,8 on competing chain to cause reorg")
        self.chain.set_tip(tip_block_num)

        block6 = self.chain.next_block(block_count, spend=out[0], extra_txns=2)
        block_count += 1
        self.log.info(f"block6 hash: {block6.hash}")
        node0.send_message(msg_block(block6))

        block7 = self.chain.next_block(block_count)
        block_count += 1
        self.log.info(f"block7: {block7.hash}")
        node0.send_message(msg_block(block7))

        # send one to cause reorg this should be active
        block8 = self.chain.next_block(block_count)
        block_count += 1
        self.log.info(f"block8: {block8.hash}")
        node0.send_message(msg_block(block8))

        self.nodes[0].waitforblockheight(104)
        assert_equal(block8.hash, self.nodes[0].getbestblockhash())

        self.log.info(
            "Invalidating block7 on competing chain to reorg to first branch again"
        )
        self.log.info(f"invalidating hash {block7.hash}")
        self.nodes[0].invalidateblock(block7.hash)

        #after invalidating, active block should be the one first validated on first branch
        assert_equal(block4_easier.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 23
0
    def run_test(self):
        block_count = 0

        # Create a P2P connection
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        node3 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node3)
        node3.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()
        node3.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        block2 = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3 = self.chain.next_block(block_count,
                                       spend=out[0],
                                       extra_txns=10)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4 = self.chain.next_block(block_count,
                                       spend=out[0],
                                       extra_txns=12)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block5 = self.chain.next_block(block_count,
                                       spend=out[0],
                                       extra_txns=14)
        block5_num = block_count
        block_count += 1

        block6 = self.chain.next_block(block_count, spend=out[1], extra_txns=8)
        block_count += 1

        self.chain.set_tip(block5_num)

        block7 = self.chain.next_block(block_count,
                                       spend=out[1],
                                       extra_txns=10)

        self.log.info(f"block2 hash: {block2.hash}")
        self.nodes[0].waitaftervalidatingblock(block2.hash, "add")
        self.log.info(f"block3 hash: {block3.hash}")
        self.nodes[0].waitaftervalidatingblock(block3.hash, "add")
        self.log.info(f"block4 hash: {block4.hash}")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2.hash, block3.hash, block4.hash},
                                self.nodes[0], self.log)

        node0.send_message(msg_block(block2))
        # make sure we started validating block2 first as we expect this one to
        # be terminated later on in the test before its validation is complete
        # (algorithm for premature termination selects based on block height and
        # and validation duration - those that are in validation with smaller
        # height and longer are terminated first)
        wait_for_validating_blocks({block2.hash}, self.nodes[0], self.log)

        node1.send_message(msg_block(block3))
        node2.send_message(msg_block(block4))
        # make sure we started validating blocks
        wait_for_validating_blocks({block2.hash, block3.hash, block4.hash},
                                   self.nodes[0], self.log)

        node3.send_message(msg_block(block5))
        self.log.info(f"block5 hash: {block5.hash}")

        # check log file for logging about which block validation was terminated
        termination_log_found = False
        for line in open(
                glob.glob(self.options.tmpdir + "/node0" +
                          "/regtest/bitcoind.log")[0]):
            if f"Block {block2.hash} will not be considered by the current tip activation as the maximum parallel block" in line:
                termination_log_found = True
                self.log.info("Found line: %s", line.strip())
                break

        self.log.info(f"block6 hash: {block6.hash}")
        self.nodes[0].waitaftervalidatingblock(block6.hash, "add")
        self.log.info(f"block7 hash: {block7.hash}")
        self.nodes[0].waitaftervalidatingblock(block7.hash, "add")

        wait_for_waiting_blocks({block6.hash, block7.hash}, self.nodes[0],
                                self.log)

        node3.send_message(msg_block(block6))
        wait_for_validating_blocks({block6.hash}, self.nodes[0], self.log)

        node3.send_message(msg_block(block7))
        wait_for_validating_blocks({block7.hash}, self.nodes[0], self.log)

        self.nodes[0].waitaftervalidatingblock(block2.hash, "remove")
        # block2 should be canceled.
        wait_for_not_validating_blocks({block2.hash}, self.nodes[0], self.log)

        self.log.info("removing wait status from block7")
        self.nodes[0].waitaftervalidatingblock(block7.hash, "remove")

        # finish block7 validation
        wait_for_not_validating_blocks({block7.hash}, self.nodes[0], self.log)

        # remove wait status from block to finish its validations so the test exits properly
        self.nodes[0].waitaftervalidatingblock(block3.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block6.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block7 should be active in the end
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())

        # check log file for logging about which block validation was terminated
        termination_log_found = False
        for line in open(
                glob.glob(self.options.tmpdir + "/node0" +
                          "/regtest/bitcoind.log")[0]):
            if f"Block {block2.hash} validation was terminated before completion." in line:
                termination_log_found = True
                self.log.info("Found line: %s", line.strip())
                break

        assert_equal(termination_log_found, True)
Exemplo n.º 24
0
    def run_test(self):
        block_count = 0

        # Create a P2P connection
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        _, out, block_count = prepare_init_chain(self.chain, 101, 100, block_0=False, start_block=0, node=node0)

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        block2_hard = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1
        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count, spend=out[0], extra_txns=2)
        block_count += 1
        self.chain.set_tip(tip_block_num)

        block4_hard = self.chain.next_block(block_count, spend=out[0], extra_txns=10)
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")
        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        node0.send_message(msg_block(block2_hard))
        node1.send_message(msg_block(block4_hard))
 
        # make sure we started validating blocks
        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        self.log.info(f"easier hash: {block3_easier.hash}")
        node2.send_message(msg_block(block3_easier))

        self.nodes[0].waitforblockheight(102)
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # now we want our precious block to be one of the harder blocks (block4_hard)
        self.nodes[0].preciousblock(block4_hard.hash)
        assert_equal(block4_hard.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 25
0
    def run_test(self):
        node0 = NodeConnCB()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])
        NetworkThread().start()  # Start up network handling in another thread

        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
        self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that a transaction with non-DER signature can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                     self.nodeaddress, 1.0)
        un_der_ify(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1),
                             block_time)
        block.nVersion = 2
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 3")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
        block.nVersion = 2
        block.rehash()
        block.solve()
        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(node0.last_message["reject"].reason,
                         b'bad-version(0x00000002)')
            assert_equal(node0.last_message["reject"].data, block.sha256)
            del node0.last_message["reject"]

        self.log.info(
            "Test that transactions with non-DER signatures cannot appear in a block"
        )
        block.nVersion = 3

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                     self.nodeaddress, 1.0)
        un_der_ify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # accepted to the mempool (which we can achieve with
        # -promiscuousmempoolflags).
        node0.send_and_ping(MsgTx(spendtx))
        assert spendtx.hash in self.nodes[0].getrawmempool()

        # Now we verify that a block with this transaction is invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            # We can receive different reject messages depending on whether
            # Krond is running with multiple script check threads. If script
            # check threads are not in use, then transaction script validation
            # happens sequentially, and Krond produces more specific reject
            # reasons.
            assert node0.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(node0.last_message["reject"].data, block.sha256)
            if node0.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(node0.last_message["reject"].reason,
                             b'block-validation-failed')
            else:
                assert b'Non-canonical DER signature' in node0.last_message[
                    "reject"].reason

        self.log.info(
            "Test that a version 3 block with a DERSIG-compliant transaction is accepted"
        )
        block.vtx[1] = create_transaction(self.nodes[0],
                                          self.coinbase_blocks[1],
                                          self.nodeaddress, 1.0)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
Exemplo n.º 26
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        block1 = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1
        # send block but block him at validation point
        self.nodes[0].waitaftervalidatingblock(block1.hash, "add")
        node0.send_message(msg_block(block1))
        self.log.info(f"block1 hash: {block1.hash}")

        # make sure block hash is in waiting list
        wait_for_waiting_blocks({block1.hash}, self.nodes[0], self.log)

        # send child block
        block2 = self.chain.next_block(block_count,
                                       spend=out[1],
                                       extra_txns=10)
        block_count += 1
        node0.send_message(msg_block(block2))
        self.log.info(f"block2 hash: {block2.hash}")

        def wait_for_log():
            line_text = block2.hash + " will not be considered by the current"
            for line in open(
                    glob.glob(self.options.tmpdir + "/node0" +
                              "/regtest/bitcoind.log")[0]):
                if line_text in line:
                    self.log.info("Found line: %s", line)
                    return True
            return False

        wait_until(wait_for_log)

        self.nodes[0].waitaftervalidatingblock(block1.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block that arrived last on competing chain should be active
        assert_equal(block2.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 27
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count-1

        # left branch
        block2 = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block2_num = block_count
        block_count += 1
        node0.send_message(msg_block(block2))
        self.log.info(f"block2 hash: {block2.hash}")

        self.nodes[0].waitforblockheight(102)

        # send blocks 3,4 for parallel validation on left branch
        block3 = self.chain.next_block(block_count, spend=out[1], extra_txns=10)
        block_count += 1

        self.chain.set_tip(block2_num)

        block4 = self.chain.next_block(block_count, spend=out[1], extra_txns=8)
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"block3 hash: {block3.hash}")
        self.nodes[0].waitaftervalidatingblock(block3.hash, "add")
        self.log.info(f"block4 hash: {block4.hash}")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "add")
        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block3.hash, block4.hash}, self.nodes[0], self.log)

        node0.send_message(msg_block(block3))
        node2.send_message(msg_block(block4))

        # make sure we started validating blocks
        wait_for_validating_blocks({block3.hash, block4.hash}, self.nodes[0], self.log)

        # right branch
        self.chain.set_tip(tip_block_num)
        block5 = self.chain.next_block(block_count, spend=out[0], extra_txns=10)
        block_count += 1
        node1.send_message(msg_block(block5))
        self.log.info(f"block5 hash: {block5.hash}")

        # and two blocks to extend second branch to cause reorg
        # - they must be sent from the same node as otherwise they will be
        #   rejected with "prev block not found"
        block6 = self.chain.next_block(block_count)
        node1.send_message(msg_block(block6))
        self.log.info(f"block6 hash: {block6.hash}")
        block_count += 1

        block7 = self.chain.next_block(block_count)
        node1.send_message(msg_block(block7))
        self.log.info(f"block7 hash: {block7.hash}")
        block_count += 1

        self.nodes[0].waitforblockheight(104)
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())

        self.log.info("releasing wait status on parallel blocks to finish their validation")
        self.nodes[0].waitaftervalidatingblock(block3.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block that arrived last on competing chain should be active
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 28
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection0 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection0)

        node1 = NodeConnCB()
        connection1 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection1)

        # *** Prepare node connection for early announcements testing
        node2 = NodeConnCB()
        node2.add_connection(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2))

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        # *** Activate early announcement functionality for this connection
        #     After this point the early announcements are not received yet -
        #     we still need to set latest announced block (CNode::pindexBestKnownBlock)
        #     which is set for e.g. by calling best headers message with locator
        #     set to non-null
        node2.wait_for_verack()
        node2.send_message(msg_sendcmpct(announce=True))

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        _, outs, block_count = prepare_init_chain(self.chain,
                                                  101,
                                                  1,
                                                  block_0=False,
                                                  start_block=0,
                                                  node=node0)
        out = outs[0]

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count,
                                            spend=out,
                                            extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count,
                                              spend=out,
                                              extra_txns=2)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4_hard = self.chain.next_block(block_count,
                                            spend=out,
                                            extra_txns=10)
        block_count += 1

        # send three "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash},
                                self.nodes[0], self.log)

        # *** Complete early announcement setup by sending getheaders message
        #     with a non-null locator (pointing to the last block that we know
        #     of on python side - we claim that we know of all the blocks that
        #     bitcoind node knows of)
        #
        #     We also set on_cmpctblock handler as early announced blocks are
        #     announced via compact block messages instead of inv messages
        node2.send_and_ping(
            msg_getheaders(
                locator_have=[int(self.nodes[0].getbestblockhash(), 16)]))
        receivedAnnouncement = False
        waiting_for_announcement_block_hash = block2_hard.sha256

        def on_cmpctblock(conn, message):
            nonlocal receivedAnnouncement
            message.header_and_shortids.header.calc_sha256()
            if message.header_and_shortids.header.sha256 == waiting_for_announcement_block_hash:
                receivedAnnouncement = True

        node2.on_cmpctblock = on_cmpctblock

        # send one block via p2p and one via rpc
        node0.send_message(msg_block(block2_hard))

        # *** make sure that we receive announcement of the block before it has
        #     been validated
        wait_until(lambda: receivedAnnouncement)

        # making rpc call submitblock in a separate thread because waitaftervalidation is blocking
        # the return of submitblock
        submitblock_thread = threading.Thread(target=self.nodes[0].submitblock,
                                              args=(ToHex(block4_hard), ))
        submitblock_thread.start()

        # because self.nodes[0] rpc is blocked we use another rpc client
        rpc_client = get_rpc_proxy(rpc_url(
            get_datadir_path(self.options.tmpdir, 0), 0),
                                   0,
                                   coveragedir=self.options.coveragedir)

        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash},
                                   rpc_client, self.log)

        # *** prepare to intercept block3_easier announcement - it will not be
        #     announced before validation is complete as early announcement is
        #     limited to announcing one block per height (siblings are ignored)
        #     but after validation is complete we should still get the announcing
        #     compact block message
        receivedAnnouncement = False
        waiting_for_announcement_block_hash = block3_easier.sha256

        self.log.info(f"easy block3 hash: {block3_easier.hash}")
        node1.send_message(msg_block(block3_easier))

        # *** Make sure that we receive compact block announcement of the block
        #     after the validation is complete even though it was not the first
        #     block that was received by bitcoind node.
        #
        #     Also make sure that we receive inv announcement of the block after
        #     the validation is complete by the nodes that are not using early
        #     announcement functionality.
        wait_until(lambda: receivedAnnouncement)
        node0.wait_for_inv([CInv(CInv.BLOCK, block3_easier.sha256)])
        # node 1 was the sender but receives inv for block non the less
        # (with early announcement that's not the case - sender does not receive the announcement)
        node1.wait_for_inv([CInv(CInv.BLOCK, block3_easier.sha256)])

        rpc_client.waitforblockheight(102)
        assert_equal(block3_easier.hash, rpc_client.getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
        rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
        submitblock_thread.join()

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
Exemplo n.º 29
0
    def run_test(self):
        node0 = NodeConnCB()

        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        NetworkThread().start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 6)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=10)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
Exemplo n.º 30
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))

        getDataMessages = []

        def on_getdata(conn, message):
            getDataMessages.append(message)

        node0.on_getdata = on_getdata

        # ***** 1. *****
        # starting_blocks are needed to provide spendable outputs
        starting_blocks = MIN_TTOR_VALIDATION_DISTANCE + 1
        for i in range(starting_blocks):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))
        out = []
        for i in range(starting_blocks):
            out.append(self.chain.get_spendable_output())
        self.nodes[0].waitforblockheight(starting_blocks)

        tip_block_index = block_count - 1

        self.log.info("Block tip height: %d " % block_count)

        # ***** 2. *****
        # branch with blocks that do not violate TTOR
        valid_ttor_branch_height = MIN_TTOR_VALIDATION_DISTANCE + 1
        for i in range(0, valid_ttor_branch_height):
            block = self.chain.next_block(block_count,
                                          spend=out[i],
                                          extra_txns=8)
            block_count += 1
            node0.send_message(msg_block(block))
        chaintip_valid_branch = block
        self.nodes[0].waitforblockheight(starting_blocks +
                                         valid_ttor_branch_height)

        self.log.info("Node's active chain height: %d " %
                      (starting_blocks + valid_ttor_branch_height))

        # ***** 3. *****
        # branch with invalid transaction order that will try to cause a reorg
        self.chain.set_tip(tip_block_index)
        blocks_invalid_ttor = []
        headers_message = msg_headers()
        headers_message.headers = []
        invalid_ttor_branch_height = MIN_TTOR_VALIDATION_DISTANCE + 1
        for i in range(0, invalid_ttor_branch_height):
            spend = out[i]
            block = self.chain.next_block(block_count)
            add_txns = self.get_chained_transactions(spend,
                                                     num_of_transactions=10)

            # change order of transaction that output uses transaction that comes later (makes block violate TTOR)
            temp1 = add_txns[1]
            temp2 = add_txns[2]
            add_txns[1] = temp2
            add_txns[2] = temp1
            self.chain.update_block(block_count, add_txns)
            blocks_invalid_ttor.append(block)
            block_count += 1

            if (i == 0):
                first_block = block
            # wait with sending header for the last block
            if (i != MIN_TTOR_VALIDATION_DISTANCE):
                headers_message.headers.append(CBlockHeader(block))

        self.log.info("Sending %d headers..." % MIN_TTOR_VALIDATION_DISTANCE)

        node0.send_message(headers_message)
        # Wait to make sure we do not receive GETDATA messages yet.
        time.sleep(1)
        # Check that getData is not received until this chain is long at least as the active chain.
        assert_equal(len(getDataMessages), 0)

        self.log.info("Sending 1 more header...")
        # Send HEADERS message for the last block.
        headers_message.headers = [CBlockHeader(block)]
        node0.send_message(headers_message)
        node0.wait_for_getdata()
        self.log.info("Received GETDATA.")
        assert_equal(len(getDataMessages), 1)

        # Send the first block on invalid chain. Chain should be invalidated.
        node0.send_message(msg_block(first_block))

        def wait_to_invalidate_fork():
            chaintips = self.nodes[0].getchaintips()
            if len(chaintips) > 1:
                chaintips_status = [
                    chaintips[0]["status"], chaintips[1]["status"]
                ]
                if "active" in chaintips_status and "invalid" in chaintips_status:
                    active_chain_tip_hash = chaintips[0]["hash"] if chaintips[
                        0]["status"] == "active" else chaintips[1]["hash"]
                    invalid_fork_tip_hash = chaintips[0]["hash"] if chaintips[
                        0]["status"] == "invalid" else chaintips[1]["hash"]
                    assert (active_chain_tip_hash != invalid_fork_tip_hash)

                    for block in blocks_invalid_ttor:
                        if block.hash == invalid_fork_tip_hash:
                            return True
                    return False
                else:
                    return False
            else:
                return False

        wait_until(wait_to_invalidate_fork)

        # chaintip of valid branch should be active
        assert_equal(self.nodes[0].getbestblockhash(),
                     chaintip_valid_branch.hash)

        # check log file that reorg didnt happen
        disconnect_block_log = False
        for line in open(
                glob.glob(self.options.tmpdir + "/node0" +
                          "/regtest/bitcoind.log")[0]):
            if f"Disconnect block" in line:
                disconnect_block_log = True
                self.log.info("Found line: %s", line.strip())
                break

        # we should not find information about disconnecting blocks
        assert_equal(disconnect_block_log, False)

        # check log file that contains information about TTOR violation
        ttor_violation_log = False
        for line in open(
                glob.glob(self.options.tmpdir + "/node0" +
                          "/regtest/bitcoind.log")[0]):
            if f"violates TTOR order" in line:
                ttor_violation_log = True
                self.log.info("Found line: %s", line.strip())
                break

        # we should find information about TTOR being violated
        assert_equal(ttor_violation_log, True)