Пример #1
0
    def run_test(self):
        node0 = NodeConnCB()

        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        NetworkThread().start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 6)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=10)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
Пример #2
0
    def run_test(self):
        node0 = NodeConnCB()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread

        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
        self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that an invalid-according-to-CLTV transaction can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                     self.nodeaddress, 1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1),
                             block_time)
        block.nVersion = 3
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 4")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
        block.nVersion = 3
        block.solve()
        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(node0.last_message["reject"].reason,
                         b'bad-version(0x00000003)')
            assert_equal(node0.last_message["reject"].data, block.sha256)
            del node0.last_message["reject"]

        self.log.info(
            "Test that invalid-according-to-cltv transactions cannot appear in a block"
        )
        block.nVersion = 4

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                     self.nodeaddress, 1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for CLTV by getting it
        # accepted to the mempool (which we can achieve with
        # -promiscuousmempoolflags).
        node0.send_and_ping(MsgTx(spendtx))
        assert spendtx.hash in self.nodes[0].getrawmempool()

        # Now we verify that a block with this transaction is invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert node0.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(node0.last_message["reject"].data, block.sha256)
            if node0.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(node0.last_message["reject"].reason,
                             b'block-validation-failed')
            else:
                assert b'Negative locktime' in node0.last_message[
                    "reject"].reason

        self.log.info(
            "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted"
        )
        spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
        spendtx.rehash()

        block.vtx.pop(1)
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
Пример #3
0
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection0 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection0)

        node1 = NodeConnCB()
        connection1 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection1)

        # *** Prepare node connection for early announcements testing
        node2 = NodeConnCB()
        node2.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2))

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        # *** Activate early announcement functionality for this connection
        #     After this point the early announcements are not received yet -
        #     we still need to set latest announced block (CNode::pindexBestKnownBlock)
        #     which is set for e.g. by calling best headers message with locator
        #     set to non-null
        node2.wait_for_verack()
        node2.send_message(msg_sendcmpct(announce=True))

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = self.chain.get_spendable_output()

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count, spend=out, extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count, spend=out, extra_txns=2)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4_hard = self.chain.next_block(block_count, spend=out, extra_txns=10)
        block_count += 1

        # send three "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)

        # *** Complete early announcement setup by sending getheaders message
        #     with a non-null locator (pointing to the last block that we know
        #     of on python side - we claim that we know of all the blocks that
        #     bitcoind node knows of)
        #
        #     We also set on_cmpctblock handler as early announced blocks are
        #     announced via compact block messages instead of inv messages
        node2.send_and_ping(msg_getheaders(locator_have=[int(self.nodes[0].getbestblockhash(), 16)]))
        receivedAnnouncement = False
        waiting_for_announcement_block_hash = block2_hard.sha256
        def on_cmpctblock(conn, message):
            nonlocal receivedAnnouncement
            message.header_and_shortids.header.calc_sha256()
            if message.header_and_shortids.header.sha256 == waiting_for_announcement_block_hash:
                receivedAnnouncement = True
        node2.on_cmpctblock = on_cmpctblock

        # send one block via p2p and one via rpc
        node0.send_message(msg_block(block2_hard))

        # *** make sure that we receive announcement of the block before it has
        #     been validated
        wait_until(lambda: receivedAnnouncement)

        # making rpc call submitblock in a separate thread because waitaftervalidation is blocking
        # the return of submitblock
        submitblock_thread = threading.Thread(target=self.nodes[0].submitblock, args=(ToHex(block4_hard),))
        submitblock_thread.start()

        # because self.nodes[0] rpc is blocked we use another rpc client
        rpc_client = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0), 0,
                             coveragedir=self.options.coveragedir)

        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, rpc_client, self.log)

        # *** prepare to intercept block3_easier announcement - it will not be
        #     announced before validation is complete as early announcement is
        #     limited to announcing one block per height (siblings are ignored)
        #     but after validation is complete we should still get the announcing
        #     compact block message
        receivedAnnouncement = False
        waiting_for_announcement_block_hash = block3_easier.sha256

        self.log.info(f"easy block3 hash: {block3_easier.hash}")
        node1.send_message(msg_block(block3_easier))

        # *** Make sure that we receive compact block announcement of the block
        #     after the validation is complete even though it was not the first
        #     block that was received by bitcoind node.
        #
        #     Also make sure that we receive inv announcement of the block after
        #     the validation is complete by the nodes that are not using early
        #     announcement functionality.
        wait_until(lambda: receivedAnnouncement)
        node0.wait_for_inv([CInv(2, block3_easier.sha256)]) # 2 == GetDataMsg::MSG_BLOCK
        # node 1 was the sender but receives inv for block non the less
        # (with early announcement that's not the case - sender does not receive the announcement)
        node1.wait_for_inv([CInv(2, block3_easier.sha256)]) # 2 == GetDataMsg::MSG_BLOCK

        rpc_client.waitforblockheight(102)
        assert_equal(block3_easier.hash, rpc_client.getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
        rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
        submitblock_thread.join()

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
    def run_test(self):
        node0 = NodeConnCB()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])
        NetworkThread().start()  # Start up network handling in another thread

        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
        self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that a transaction with non-DER signature can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                     self.nodeaddress, 1.0)
        un_der_ify(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1),
                             block_time)
        block.nVersion = 2
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 3")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
        block.nVersion = 2
        block.rehash()
        block.solve()
        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(node0.last_message["reject"].reason,
                         b'bad-version(0x00000002)')
            assert_equal(node0.last_message["reject"].data, block.sha256)
            del node0.last_message["reject"]

        self.log.info(
            "Test that transactions with non-DER signatures cannot appear in a block"
        )
        block.nVersion = 3

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                     self.nodeaddress, 1.0)
        un_der_ify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # accepted to the mempool (which we can achieve with
        # -promiscuousmempoolflags).
        node0.send_and_ping(MsgTx(spendtx))
        assert spendtx.hash in self.nodes[0].getrawmempool()

        # Now we verify that a block with this transaction is invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            # We can receive different reject messages depending on whether
            # zelantusd is running with multiple script check threads. If script
            # check threads are not in use, then transaction script validation
            # happens sequentially, and zelantusd produces more specific reject
            # reasons.
            assert node0.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(node0.last_message["reject"].data, block.sha256)
            if node0.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(node0.last_message["reject"].reason,
                             b'block-validation-failed')
            else:
                assert b'Non-canonical DER signature' in node0.last_message[
                    "reject"].reason

        self.log.info(
            "Test that a version 3 block with a DERSIG-compliant transaction is accepted"
        )
        block.vtx[1] = create_transaction(self.nodes[0],
                                          self.coinbase_blocks[1],
                                          self.nodeaddress, 1.0)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
class MockDsdetector():
    def __init__(self, testRig, node):
        self.peer = NodeConnCB()
        self.peer.add_connection(
            NodeConn('127.0.0.1', p2p_port(0), node, self.peer))
        NetworkThread().start()
        self.peer.wait_for_verack()
        testRig.dsdetector = self
        self.message = None

    def SendDsNotification(self):
        if self.message:
            self.peer.send_and_ping(self.message)

    def CheckForDoubleSpends(self, nodes):
        spent_inputs = []
        seen_transactions = []
        ds_counter = 0
        for node in nodes:
            for height in range(node.getblockcount() + 1):
                blockhash = node.getblockhash(height)
                block = node.getblock(blockhash, 2)
                blockHex = node.getblock(blockhash, False)
                for txraw in block['tx'][1:]:  # exclude coinbase
                    # skip the identical transactions in the two chains, they are no double spends
                    if txraw['txid'] in seen_transactions:
                        continue
                    else:
                        seen_transactions.append(txraw['txid'])
                    for i in txraw['vin']:
                        utxoA = (i['txid'], i['vout'])
                        blockA = FromHex(CBlock(), blockHex)
                        txA = FromHex(CTransaction(), txraw['hex'])
                        foundB = [
                            j for j in spent_inputs if j['utxo'] == utxoA
                        ]
                        if foundB:
                            ds_counter += 1
                            foundB = foundB[0]
                            blockB = foundB['block']
                            txB = foundB['tx']
                            txA.rehash()

                            txB.rehash()

                            blockA.vtx[0].rehash()
                            blockB.vtx[0].rehash()
                            sha256_A = blockA.vtx[0].sha256
                            sha256_B = blockB.vtx[0].sha256

                            dsdMessage = msg_dsdetected(blocksDetails=[
                                BlockDetails([CBlockHeader(blockA)],
                                             DSMerkleProof(
                                                 1, txA, blockA.hashMerkleRoot,
                                                 [MerkleProofNode(sha256_A)])),
                                BlockDetails([CBlockHeader(blockB)],
                                             DSMerkleProof(
                                                 1, txB, blockB.hashMerkleRoot,
                                                 [MerkleProofNode(sha256_B)]))
                            ])
                            self.message = dsdMessage

                            dsdBytes = dsdMessage.serialize()
                            dsdMessageDeserialized = msg_dsdetected()
                            dsdMessageDeserialized.deserialize(
                                BytesIO(dsdBytes))
                            assert_equal(str(dsdMessage),
                                         str(dsdMessageDeserialized))

                            break
                        else:
                            spent_inputs.append({
                                'txid': txraw['txid'],
                                'tx': txA,
                                'utxo': utxoA,
                                'block': blockA
                            })

        return ds_counter
class Send_node():
    rejected_blocks = []

    def __init__(self, tmpdir, log, node_no, p2p_connection, rpc_connection):
        self.p2p = p2p_connection
        self.rpc = rpc_connection
        self.node_no = node_no
        self.tmpdir = tmpdir
        self.log = log
        self._register_on_reject()

    def _register_on_reject(self):
        def on_reject(conn, msg):
            self.rejected_blocks.append(msg)
        self.p2p.connection.cb.on_reject = on_reject

    def restart_node(self, extra_args=None):
        self.log.info("Restarting node ...")
        self.rpc.stop_node()
        self.rpc.wait_until_stopped()
        self.rpc.start(True, extra_args)
        self.rpc.wait_for_rpc_connection()
        self.p2p = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.rpc, self.p2p)
        NetworkThread().start()
        self.p2p.add_connection(connection)
        self.p2p.wait_for_verack()
        self._register_on_reject()

    def send_block(self, block, expect_tip, expect_reject = False):
        self.p2p.send_and_ping(msg_block(block))

        if expect_reject:
            assert_equal(expect_tip, self.rpc.getbestblockhash())
            self.reject_check(block)
            assert(self.check_frozen_tx_log(block.hash))
        else:
            assert_equal(expect_tip, self.rpc.getbestblockhash())
            assert(self.check_frozen_tx_log(block.hash) == False)

    def reject_check(self, block):
        self.p2p.wait_for_reject()

        assert_equal(len(self.rejected_blocks), 1)
        assert_equal(self.rejected_blocks[0].data, block.sha256)
        assert_equal(self.rejected_blocks[0].reason, b'bad-txns-inputs-frozen')

        self.rejected_blocks = []
        self.p2p.message_count["reject"] = 0

    def check_frozen_tx_log(self, hash):
        for line in open(glob.glob(self.tmpdir + f"/node{self.node_no}" + "/regtest/blacklist.log")[0]):
            if hash in line:
                self.log.debug("Found line in blacklist.log: %s", line)
                return True
        return False

    def check_log(self, line_text):
        for line in open(glob.glob(self.tmpdir + f"/node{self.node_no}" + "/regtest/bitcoind.log")[0]):
            if re.search(line_text, line) is not None:
                self.log.debug("Found line in bitcoind.log: %s", line.strip())
                return True
        return False