示例#1
0
 def run_test(self):
     return  #TODO p2p stuff throwing format errors
     test = TestManager(self, self.options.tmpdir)
     test.add_all_connections(self.nodes)
     NetworkThread().start()  # Start up network handling in another thread
     test.run()
示例#2
0
    def run_test(self):
        """Main test logic"""

        # Create a P2P connection to one of the nodes
        node0 = BaseNode()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all([self.nodes[0:1]])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        height = 1

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height),
                                 self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our NodeConn connection
            node0.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Add P2P connection to node2")
        node2 = BaseNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[1])
        node2.wait_for_verack()

        self.log.info(
            "Wait for node2 reach current tip. Test that it has propogated all the blocks to us"
        )

        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        node2.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # NodeConnCB objects.
        wait_until(lambda: sorted(blocks) == sorted(
            list(node2.block_receive_map.keys())),
                   timeout=5,
                   lock=mininode_lock)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in node2.block_receive_map.values():
                assert_equal(block, 1)
示例#3
0
 def run_test(self):
     self.test = TestManager(self, self.options.tmpdir)
     self.test.add_all_connections(self.nodes)
     NetworkThread().start()  # Start up network handling in another thread
     self.test.run()
示例#4
0
    def run_test(self):
        self.address = self.nodes[0].getnewaddress()
        self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])
        self.wit_address = self.nodes[0].addwitnessaddress(self.address)
        self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)

        NetworkThread().start()  # Start up network handling in another thread
        self.coinbase_blocks = self.nodes[0].generate(2)  # Block 2
        coinbase_txid = []
        for i in self.coinbase_blocks:
            coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
        self.nodes[0].generate(427)  # Block 429
        self.lastblockhash = self.nodes[0].getbestblockhash()
        self.tip = int("0x" + self.lastblockhash, 0)
        self.lastblockheight = 429
        self.lastblocktime = int(time.time()) + 429

        print(
            "Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]"
        )
        test1txs = [
            self.create_transaction(self.nodes[0], coinbase_txid[0],
                                    self.ms_address, 49)
        ]
        txid1 = self.tx_submit(self.nodes[0], test1txs[0])
        test1txs.append(
            self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
        txid2 = self.tx_submit(self.nodes[0], test1txs[1])
        test1txs.append(
            self.create_transaction(self.nodes[0], coinbase_txid[1],
                                    self.wit_ms_address, 49))
        txid3 = self.tx_submit(self.nodes[0], test1txs[2])
        self.block_submit(self.nodes[0], test1txs, False, True)

        print(
            "Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation"
        )
        test2tx = self.create_transaction(self.nodes[0], txid2,
                                          self.ms_address, 48)
        trueDummy(test2tx)
        txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)

        print(
            "Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]"
        )
        self.block_submit(self.nodes[0], [test2tx], False, True)

        print(
            "Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation"
        )
        test4tx = self.create_transaction(self.nodes[0], txid4, self.address,
                                          47)
        test6txs = [CTransaction(test4tx)]
        trueDummy(test4tx)
        self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR)
        self.block_submit(self.nodes[0], [test4tx])

        print(
            "Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation"
        )
        test5tx = self.create_transaction(self.nodes[0], txid3,
                                          self.wit_address, 48)
        test6txs.append(CTransaction(test5tx))
        test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
        self.tx_submit(self.nodes[0], test5tx, NULLDUMMY_ERROR)
        self.block_submit(self.nodes[0], [test5tx], True)

        print(
            "Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]"
        )
        for i in test6txs:
            self.tx_submit(self.nodes[0], i)
        self.block_submit(self.nodes[0], test6txs, True, True)
示例#5
0
    def run_test(self):
        node0 = NodeConnCB()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread

        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
        self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that an invalid-according-to-CLTV transaction can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                     self.nodeaddress, 1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1),
                             block_time)
        block.nVersion = 3
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 4")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
        block.nVersion = 3
        block.solve()
        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(node0.last_message["reject"].reason,
                         b'bad-version(0x00000003)')
            assert_equal(node0.last_message["reject"].data, block.sha256)
            del node0.last_message["reject"]

        self.log.info(
            "Test that invalid-according-to-cltv transactions cannot appear in a block"
        )
        block.nVersion = 4

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                     self.nodeaddress, 1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for CLTV by getting it
        # accepted to the mempool (which we can achieve with
        # -promiscuousmempoolflags).
        node0.send_and_ping(MsgTx(spendtx))
        assert spendtx.hash in self.nodes[0].getrawmempool()

        # Now we verify that a block with this transaction is invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(),
                   lock=mininode_lock,
                   err_msg="last_message")
        with mininode_lock:
            assert node0.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(node0.last_message["reject"].data, block.sha256)
            if node0.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(node0.last_message["reject"].reason,
                             b'block-validation-failed')
            else:
                assert b'Negative locktime' in node0.last_message[
                    "reject"].reason

        self.log.info(
            "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted"
        )
        spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
        spendtx.rehash()

        block.vtx.pop(1)
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        node0.send_and_ping(MsgBlock(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
示例#6
0
    def run_test(self):
        testnode0 = TestNode()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], testnode0,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode0.add_connection(connections[0])

        # Start up network handling in another thread
        NetworkThread().start()
        testnode0.wait_for_verack()

        # Verify mininodes are connected to zcashd nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        # Mine some blocks so we can spend
        coinbase_blocks = self.nodes[0].generate(200)
        node_address = self.nodes[0].getnewaddress()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 0)

        # Mininodes send expiring soon transaction in "tx" message to zcashd node
        self.send_transaction(testnode0, coinbase_blocks[0], node_address, 203)

        # Assert that the tx is not in the mempool (expiring soon)
        assert_equal([], self.nodes[0].getrawmempool())
        assert_equal([], self.nodes[1].getrawmempool())
        assert_equal([], self.nodes[2].getrawmempool())

        # Mininodes send transaction in "tx" message to zcashd node
        tx2 = self.send_transaction(testnode0, coinbase_blocks[1],
                                    node_address, 204)

        # tx2 is not expiring soon
        assert_equal([tx2.hash], self.nodes[0].getrawmempool())
        assert_equal([tx2.hash], self.nodes[1].getrawmempool())
        # node 2 is isolated
        assert_equal([], self.nodes[2].getrawmempool())

        # Verify txid for tx2
        self.verify_inv(testnode0, tx2)
        self.send_data_message(testnode0, tx2)
        self.verify_last_tx(testnode0, tx2)

        # Sync and mine an empty block with node 2, leaving tx in the mempool of node0 and node1
        for blkhash in coinbase_blocks:
            blk = self.nodes[0].getblock(blkhash, 0)
            self.nodes[2].submitblock(blk)
        self.nodes[2].generate(1)

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Reconnect node 2 to the network
        connect_nodes_bi(self.nodes, 0, 2)

        # Set up test node for node 2
        testnode2 = TestNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], testnode2,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode2.add_connection(connections[-1])

        # Verify block count
        sync_blocks(self.nodes[:3])
        assert_equal(self.nodes[0].getblockcount(), 201)
        assert_equal(self.nodes[1].getblockcount(), 201)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Verify contents of mempool
        assert_equal([tx2.hash], self.nodes[0].getrawmempool())
        assert_equal([tx2.hash], self.nodes[1].getrawmempool())
        assert_equal([], self.nodes[2].getrawmempool())

        # Confirm tx2 cannot be submitted to a mempool because it is expiring soon.
        try:
            rawtx2 = hexlify(tx2.serialize())
            self.nodes[2].sendrawtransaction(rawtx2)
            fail("Sending transaction should have failed")
        except JSONRPCException as e:
            assert_equal(
                "tx-expiring-soon: expiryheight is 204 but should be at least 205 to avoid transaction expiring soon",
                e.error['message'])

        self.send_data_message(testnode0, tx2)

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify node 0 does not reply to "getdata" by sending "tx" message, as tx2 is expiring soon
        with mininode_lock:
            assert_equal(testnode0.last_tx, None)

        # Verify mininode received a "notfound" message containing the txid of tx2
        with mininode_lock:
            msg = testnode0.last_notfound
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Create a transaction to verify that processing of "getdata" messages is functioning
        tx3 = self.send_transaction(testnode0, coinbase_blocks[2],
                                    node_address, 999)

        self.send_data_message(testnode0, tx3)
        self.verify_last_tx(testnode0, tx3)
        # Verify txid for tx3 is returned in "inv", but tx2 which is expiring soon is not returned
        self.verify_inv(testnode0, tx3)
        self.verify_inv(testnode2, tx3)

        # Verify contents of mempool
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[0].getrawmempool()))
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[1].getrawmempool()))
        assert_equal({tx3.hash}, set(self.nodes[2].getrawmempool()))

        # Verify banscore for nodes are still zero
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[0].getpeerinfo()))
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[2].getpeerinfo()))

        [c.disconnect_node() for c in connections]
示例#7
0
    def run_test(self):
        self.address = self.nodes[0].getnewaddress()
        self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])
        self.wit_address = self.nodes[0].addwitnessaddress(self.address)
        self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)

        NetworkThread().start()  # Start up network handling in another thread
        self.coinbase_blocks = self.nodes[0].generate(2)  # Block 2
        coinbase_txid = []
        for i in self.coinbase_blocks:
            coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])

        for i in range(COINBASE_MATURITY):
            block = create_block(
                int(self.nodes[0].getbestblockhash(), 16),
                create_coinbase(self.nodes[0].getblockcount() + 1),
                int(time.time()) + 2 + i)
            block.nVersion = 4
            block.hashMerkleRoot = block.calc_merkle_root()
            block.rehash()
            block.solve()
            self.nodes[0].submitblock(bytes_to_hex_str(block.serialize()))

        # Generate the number blocks signalling  that the continuation of the test case expects
        self.nodes[0].generate(863 - COINBASE_MATURITY - 2 - 2)
        self.lastblockhash = self.nodes[0].getbestblockhash()
        self.tip = int("0x" + self.lastblockhash, 0)
        self.lastblockheight = self.nodes[0].getblockcount()
        self.lastblocktime = int(time.time()) + self.lastblockheight + 1

        self.log.info(
            "Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]"
        )
        test1txs = [
            self.create_transaction(self.nodes[0], coinbase_txid[0],
                                    self.ms_address, INITIAL_BLOCK_REWARD - 1)
        ]
        txid1 = self.nodes[0].sendrawtransaction(
            bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
        test1txs.append(
            self.create_transaction(self.nodes[0], txid1, self.ms_address,
                                    INITIAL_BLOCK_REWARD - 2))
        txid2 = self.nodes[0].sendrawtransaction(
            bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
        test1txs.append(
            self.create_transaction(self.nodes[0], coinbase_txid[1],
                                    self.wit_ms_address,
                                    INITIAL_BLOCK_REWARD - 1))
        txid3 = self.nodes[0].sendrawtransaction(
            bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
        self.block_submit(self.nodes[0], test1txs, False, True)

        self.log.info(
            "Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation"
        )
        test2tx = self.create_transaction(self.nodes[0], txid2,
                                          self.ms_address,
                                          INITIAL_BLOCK_REWARD - 3)
        trueDummy(test2tx)
        assert_raises_rpc_error(
            -26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction,
            bytes_to_hex_str(test2tx.serialize_with_witness()), True)

        self.log.info(
            "Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]"
        )
        self.block_submit(self.nodes[0], [test2tx], False, True)

        self.log.info(
            "Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation"
        )
        test4tx = self.create_transaction(self.nodes[0], test2tx.hash,
                                          self.address,
                                          INITIAL_BLOCK_REWARD - 4)
        test6txs = [CTransaction(test4tx)]
        trueDummy(test4tx)
        assert_raises_rpc_error(
            -26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction,
            bytes_to_hex_str(test4tx.serialize_with_witness()), True)
        self.block_submit(self.nodes[0], [test4tx])

        self.log.info(
            "Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation"
        )
        test5tx = self.create_transaction(self.nodes[0], txid3,
                                          self.wit_address,
                                          INITIAL_BLOCK_REWARD - 2)
        test6txs.append(CTransaction(test5tx))
        test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
        assert_raises_rpc_error(
            -26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction,
            bytes_to_hex_str(test5tx.serialize_with_witness()), True)
        self.block_submit(self.nodes[0], [test5tx], True)

        self.log.info(
            "Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]"
        )
        for i in test6txs:
            self.nodes[0].sendrawtransaction(
                bytes_to_hex_str(i.serialize_with_witness()), True)
        self.block_submit(self.nodes[0], test6txs, True, True)
示例#8
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        test_node = TestNode()  # connects to node0 (not whitelisted)
        white_node = TestNode()  # connects to node1 (whitelisted)

        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
        connections.append(
            NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
        test_node.add_connection(connections[0])
        white_node.add_connection(connections[1])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()
        white_node.wait_for_verack()

        # 1. Have both nodes mine a block (leave IBD)
        [n.generate(1) for n in self.nodes]
        tips = [int("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes]

        # 2. Send one block that builds on each tip.
        # This should be accepted.
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = time.time() + 1
        for i in xrange(2):
            blocks_h2.append(
                create_block(tips[i], create_coinbase_h(2), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(msg_block(blocks_h2[0]))
        white_node.send_message(msg_block(blocks_h2[1]))

        [x.sync_with_ping() for x in [test_node, white_node]]
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 2)
        print "First height 2 block accepted by both nodes"

        # 3. Send another block that builds on the original tip.
        blocks_h2f = []  # Blocks at height 2 that fork off the main chain
        for i in xrange(2):
            blocks_h2f.append(
                create_block(tips[i], create_coinbase_h(2),
                             blocks_h2[i].nTime + 1))
            blocks_h2f[i].solve()
        test_node.send_message(msg_block(blocks_h2f[0]))
        white_node.send_message(msg_block(blocks_h2f[1]))

        [x.sync_with_ping() for x in [test_node, white_node]]
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h2f[0].hash:
                assert_equal(x['status'], "headers-only")

        for x in self.nodes[1].getchaintips():
            if x['hash'] == blocks_h2f[1].hash:
                assert_equal(x['status'], "valid-headers")

        print "Second height 2 block accepted only from whitelisted peer"

        # 4. Now send another block that builds on the forking chain.
        blocks_h3 = []
        for i in xrange(2):
            blocks_h3.append(
                create_block(blocks_h2f[i].sha256, create_coinbase_h(3),
                             blocks_h2f[i].nTime + 1))
            blocks_h3[i].solve()
        test_node.send_message(msg_block(blocks_h3[0]))
        white_node.send_message(msg_block(blocks_h3[1]))

        [x.sync_with_ping() for x in [test_node, white_node]]
        # Since the earlier block was not processed by node0, the new block
        # can't be fully validated.
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h3[0].hash:
                assert_equal(x['status'], "headers-only")

        # But this block should be accepted by node0 since it has more work.
        try:
            self.nodes[0].getblock(blocks_h3[0].hash)
            print "Unrequested more-work block accepted from non-whitelisted peer"
        except:
            raise AssertionError(
                "Unrequested more work block was not processed")

        # Node1 should have accepted and reorged.
        assert_equal(self.nodes[1].getblockcount(), 3)
        print "Successfully reorged to length 3 chain from whitelisted peer"

        # 4b. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node0.  Node1 should process the tip if
        # we give it the headers chain leading to the tip.
        tips = blocks_h3
        headers_message_1 = msg_headers()
        headers_message_2 = msg_headers()
        all_blocks = []  # node0's blocks
        for j in xrange(2):
            chainHeight = 3
            for i in xrange(288):
                next_block = create_block(tips[j].sha256,
                                          create_coinbase_h(chainHeight + 1),
                                          tips[j].nTime + 1,
                                          get_nBits(chainHeight))
                next_block.solve()
                chainHeight += 1
                if j == 0:
                    test_node.send_message(msg_block(next_block))
                    all_blocks.append(next_block)
                else:
                    # we can not send more than zend::MAX_HEADERS_RESULTS=160 to a zend node
                    if len(headers_message_1.headers) < 160:
                        headers_message_1.headers.append(
                            CBlockHeader(next_block))
                    else:
                        headers_message_2.headers.append(
                            CBlockHeader(next_block))
                tips[j] = next_block
                if ((i + 1) % 10) == 0:
                    print "... {} blocks created".format(i + 1)

        time.sleep(2)
        for x in all_blocks:
            try:
                self.nodes[0].getblock(x.hash)
                if x == all_blocks[287]:
                    raise AssertionError(
                        "Unrequested block too far-ahead should have been ignored"
                    )
            except:
                if x == all_blocks[287]:
                    print "Unrequested block too far-ahead not processed"
                else:
                    raise AssertionError(
                        "Unrequested block with more work should have been accepted"
                    )

        headers_message_2.headers.pop()  # Ensure the last block is unrequested
        white_node.send_message(
            headers_message_1)  # Send headers leading to tip
        white_node.send_message(
            headers_message_2)  # Send headers leading to tip
        white_node.send_message(msg_block(tips[1]))  # Now deliver the tip
        try:
            white_node.sync_with_ping()
            self.nodes[1].getblock(tips[1].hash)
            print "Unrequested block far ahead of tip accepted from whitelisted peer"
        except:
            raise AssertionError(
                "Unrequested block from whitelisted peer not accepted")

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).
        test_node.send_message(msg_block(blocks_h2f[0]))

        # Here, if the sleep is too short, the test could falsely succeed (if the
        # node hasn't processed the block by the time the sleep returns, and then
        # the node processes it and incorrectly advances the tip).
        # But this would be caught later on, when we verify that an inv triggers
        # a getdata request for this block.
        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        print "Unrequested block that would complete more-work chain was ignored"

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_getdata = None
            test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_getdata

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
        print "Inv at tip triggered getdata for unprocessed block"

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_message(msg_block(blocks_h2f[0]))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        print "Successfully reorged to longer chain from non-whitelisted peer"

        [c.disconnect_node() for c in connections]
示例#9
0
 def run_test(self):
     test = TestManager(self, self.options.tmpdir)
     # Don't call test.add_all_connections because there is only one node.
     NetworkThread().start()  # Start up network handling in another thread
     test.run()
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        self.test_node = TestNode()
        self.segwit_node = TestNode()
        self.old_node = TestNode()  # version 1 peer <--> segwit node

        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node),
            NodeConn('127.0.0.1',
                     p2p_port(1),
                     self.nodes[1],
                     self.segwit_node,
                     services=NODE_NETWORK | NODE_WITNESS),
            NodeConn('127.0.0.1',
                     p2p_port(1),
                     self.nodes[1],
                     self.old_node,
                     services=NODE_NETWORK)
        ]
        self.test_node.add_connection(connections[0])
        self.segwit_node.add_connection(connections[1])
        self.old_node.add_connection(connections[2])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        self.test_node.wait_for_verack()

        # We will need UTXOs to construct transactions in later tests.
        self.make_utxos()

        self.log.info("Running tests, pre-segwit activation:")

        self.log.info("Testing SENDCMPCT p2p message... ")
        self.test_sendcmpct(self.nodes[0], self.test_node, 1)
        sync_blocks(self.nodes)
        self.test_sendcmpct(self.nodes[1],
                            self.segwit_node,
                            2,
                            old_node=self.old_node)
        sync_blocks(self.nodes)

        self.log.info("Testing compactblock construction...")
        self.test_compactblock_construction(self.nodes[0], self.test_node, 1,
                                            False)
        sync_blocks(self.nodes)
        self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2,
                                            False)
        sync_blocks(self.nodes)

        self.log.info("Testing compactblock requests... ")
        self.test_compactblock_requests(self.nodes[0], self.test_node, 1,
                                        False)
        sync_blocks(self.nodes)
        self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2,
                                        False)
        sync_blocks(self.nodes)

        self.log.info("Testing getblocktxn requests...")
        self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
        sync_blocks(self.nodes)
        self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
        sync_blocks(self.nodes)

        self.log.info("Testing getblocktxn handler...")
        self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
        sync_blocks(self.nodes)
        self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
        self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
        sync_blocks(self.nodes)

        self.log.info(
            "Testing compactblock requests/announcements not at chain tip...")
        self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
        sync_blocks(self.nodes)
        self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
        self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
        sync_blocks(self.nodes)

        self.log.info("Testing handling of incorrect blocktxn responses...")
        self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
        sync_blocks(self.nodes)
        self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node,
                                              2)
        sync_blocks(self.nodes)

        # End-to-end block relay tests
        self.log.info("Testing end-to-end block relay...")
        self.request_cb_announcements(self.test_node, self.nodes[0], 1)
        self.request_cb_announcements(self.old_node, self.nodes[1], 1)
        self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
        self.test_end_to_end_block_relay(
            self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
        self.test_end_to_end_block_relay(
            self.nodes[1], [self.segwit_node, self.test_node, self.old_node])

        self.log.info("Testing handling of invalid compact blocks...")
        self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node,
                                             False)
        self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node,
                                             False)
        self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node,
                                             False)

        self.log.info(
            "Testing reconstructing compact blocks from all peers...")
        self.test_compactblock_reconstruction_multiple_peers(
            self.nodes[1], self.segwit_node, self.old_node)
        sync_blocks(self.nodes)

        # Advance to segwit activation
        self.log.info("Advancing to segwit activation")
        self.activate_segwit(self.nodes[1])
        self.log.info("Running tests, post-segwit activation...")

        self.log.info("Testing compactblock construction...")
        self.test_compactblock_construction(self.nodes[1], self.old_node, 1,
                                            True)
        self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2,
                                            True)
        sync_blocks(self.nodes)

        self.log.info("Testing compactblock requests (unupgraded node)... ")
        self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)

        self.log.info("Testing getblocktxn requests (unupgraded node)...")
        self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)

        # Need to manually sync node0 and node1, because post-segwit activation,
        # node1 will not download blocks from node0.
        self.log.info("Syncing nodes...")
        assert (self.nodes[0].getbestblockhash() !=
                self.nodes[1].getbestblockhash())
        while self.nodes[0].getblockcount() > self.nodes[1].getblockcount():
            block_hash = self.nodes[0].getblockhash(
                self.nodes[1].getblockcount() + 1)
            self.nodes[1].submitblock(self.nodes[0].getblock(
                block_hash, False))
        assert_equal(self.nodes[0].getbestblockhash(),
                     self.nodes[1].getbestblockhash())

        self.log.info("Testing compactblock requests (segwit node)... ")
        self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2,
                                        True)

        self.log.info("Testing getblocktxn requests (segwit node)...")
        self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
        sync_blocks(self.nodes)

        self.log.info(
            "Testing getblocktxn handler (segwit node should return witnesses)..."
        )
        self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
        self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)

        # Test that if we submitblock to node1, we'll get a compact block
        # announcement to all peers.
        # (Post-segwit activation, blocks won't propagate from node0 to node1
        # automatically, so don't bother testing a block announced to node0.)
        self.log.info("Testing end-to-end block relay...")
        self.request_cb_announcements(self.test_node, self.nodes[0], 1)
        self.request_cb_announcements(self.old_node, self.nodes[1], 1)
        self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
        self.test_end_to_end_block_relay(
            self.nodes[1], [self.segwit_node, self.test_node, self.old_node])

        self.log.info("Testing handling of invalid compact blocks...")
        self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node,
                                             False)
        self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node,
                                             True)
        self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node,
                                             True)

        self.log.info("Testing invalid index in cmpctblock message...")
        self.test_invalid_cmpctblock_message()
示例#11
0
    def run_test(self):
        testnode0 = TestNode()
        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], testnode0,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode0.add_connection(connections[0])

        # Start up network handling in another thread
        NetworkThread().start()
        testnode0.wait_for_verack()

        # Verify mininodes are connected to cruzadod nodes
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(1, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(0, peerinfo[0]["banscore"])

        # Mine some blocks so we can spend
        self.coinbase_blocks = self.nodes[0].generate(200)
        self.nodeaddress = self.nodes[0].getnewaddress()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 0)

        # Mininodes send expiring soon transaction in "tx" message to cruzadod node
        tx1 = self.create_transaction(self.nodes[0], self.coinbase_blocks[0],
                                      self.nodeaddress, 10.0, 203)
        testnode0.send_message(msg_tx(tx1))

        # Mininodes send transaction in "tx" message to cruzadod node
        tx2 = self.create_transaction(self.nodes[0], self.coinbase_blocks[1],
                                      self.nodeaddress, 10.0, 204)
        testnode0.send_message(msg_tx(tx2))

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Sync nodes 0 and 1
        sync_blocks(self.nodes[:2])
        sync_mempools(self.nodes[:2])

        # Verify contents of mempool
        assert (tx1.hash not in self.nodes[0].getrawmempool()
                )  # tx1 rejected as expiring soon
        assert (tx1.hash not in self.nodes[1].getrawmempool())
        assert (tx2.hash in self.nodes[0].getrawmempool())  # tx2 accepted
        assert (tx2.hash in self.nodes[1].getrawmempool())
        assert_equal(len(self.nodes[2].getrawmempool()),
                     0)  # node 2 is isolated and empty

        # Send p2p message "mempool" to receive contents from cruzadod node in "inv" message
        with mininode_lock:
            testnode0.last_inv = None
            testnode0.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify txid for tx2
        with mininode_lock:
            msg = testnode0.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Send p2p message "getdata" to verify tx2 gets sent in "tx" message
        getdatamsg = msg_getdata()
        getdatamsg.inv = [CInv(1, tx2.sha256)]
        with mininode_lock:
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        testnode0.sync_with_ping()

        # Verify data received in "tx" message is for tx2
        with mininode_lock:
            incoming_tx = testnode0.last_tx.tx
            incoming_tx.rehash()
            assert_equal(tx2.sha256, incoming_tx.sha256)

        # Sync and mine an empty block with node 2, leaving tx in the mempool of node0 and node1
        for blkhash in self.coinbase_blocks:
            blk = self.nodes[0].getblock(blkhash, 0)
            self.nodes[2].submitblock(blk)
        self.nodes[2].generate(1)

        # Verify block count
        assert_equal(self.nodes[0].getblockcount(), 200)
        assert_equal(self.nodes[1].getblockcount(), 200)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Reconnect node 2 to the network
        connect_nodes_bi(self.nodes, 1, 2)

        # Set up test node for node 2
        testnode2 = TestNode()
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], testnode2,
                     "regtest", OVERWINTER_PROTO_VERSION))
        testnode2.add_connection(connections[-1])

        # Verify block count
        sync_blocks(self.nodes[:3])
        assert_equal(self.nodes[0].getblockcount(), 201)
        assert_equal(self.nodes[1].getblockcount(), 201)
        assert_equal(self.nodes[2].getblockcount(), 201)

        # Verify contents of mempool
        assert (tx2.hash in self.nodes[0].getrawmempool())
        assert (tx2.hash in self.nodes[1].getrawmempool())
        assert (tx2.hash not in self.nodes[2].getrawmempool())

        # Confirm tx2 cannot be submitted to a mempool because it is expiring soon.
        try:
            rawtx2 = hexlify(tx2.serialize())
            self.nodes[2].sendrawtransaction(rawtx2)
            assert (False)
        except JSONRPCException as e:
            errorString = e.error['message']
            assert ("tx-expiring-soon" in errorString)

        # Ask node 0 for tx2...
        with mininode_lock:
            testnode0.last_notfound = None
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify node 0 does not reply to "getdata" by sending "tx" message, as tx2 is expiring soon
        with mininode_lock:
            assert_equal(testnode0.last_tx, None)

        # Verify mininode received a "notfound" message containing the txid of tx2
        with mininode_lock:
            msg = testnode0.last_notfound
            assert_equal(len(msg.inv), 1)
            assert_equal(tx2.sha256, msg.inv[0].hash)

        # Create a transaction to verify that processing of "getdata" messages is functioning
        tx3 = self.create_transaction(self.nodes[0], self.coinbase_blocks[2],
                                      self.nodeaddress, 10.0, 999)

        # Mininodes send tx3 to cruzadod node
        testnode0.send_message(msg_tx(tx3))
        getdatamsg = msg_getdata()
        getdatamsg.inv = [CInv(1, tx3.sha256)]
        with mininode_lock:
            testnode0.last_tx = None
            testnode0.send_message(getdatamsg)

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify we received a "tx" message for tx3
        with mininode_lock:
            incoming_tx = testnode0.last_tx.tx
            incoming_tx.rehash()
            assert_equal(tx3.sha256, incoming_tx.sha256)

        # Send p2p message "mempool" to receive contents from cruzadod node in "inv" message
        with mininode_lock:
            testnode0.last_inv = None
            testnode0.send_message(msg_mempool())

        # Sync up with node after p2p messages delivered
        [x.sync_with_ping() for x in [testnode0, testnode2]]

        # Verify txid for tx3 is returned in "inv", but tx2 which is expiring soon is not returned
        with mininode_lock:
            msg = testnode0.last_inv
            assert_equal(len(msg.inv), 1)
            assert_equal(tx3.sha256, msg.inv[0].hash)

        # Verify contents of mempool
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[0].getrawmempool()))
        assert_equal({tx2.hash, tx3.hash}, set(self.nodes[1].getrawmempool()))
        assert_equal({tx3.hash}, set(self.nodes[2].getrawmempool()))

        # Verify banscore for nodes are still zero
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[0].getpeerinfo()))
        assert_equal(
            0, sum(peer["banscore"] for peer in self.nodes[2].getpeerinfo()))

        [c.disconnect_node() for c in connections]
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = self.chain.get_spendable_output()

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        # adding extra transactions to get different block hashes
        block2_hard = self.chain.next_block(block_count,
                                            spend=out,
                                            extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3_easier = self.chain.next_block(block_count,
                                              spend=out,
                                              extra_txns=2)
        block_count += 1

        mining_candidate = self.nodes[0].getminingcandidate()
        block4_hard = self.chain.next_block(block_count)
        block4_hard.hashPrevBlock = int(mining_candidate["prevhash"], 16)
        block4_hard.nTime = mining_candidate["time"]
        block4_hard.nVersion = mining_candidate["version"]
        block4_hard.solve()

        mining_solution = {
            "id": mining_candidate["id"],
            "nonce": block4_hard.nNonce,
            "coinbase": ToHex(block4_hard.vtx[0]),
            "time": mining_candidate["time"],
            "version": mining_candidate["version"]
        }

        # send three "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"hard block2 hash: {block2_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
        self.log.info(f"hard block4 hash: {block4_hard.hash}")
        self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash},
                                self.nodes[0], self.log)

        # send one block via p2p and one via rpc
        node0.send_message(msg_block(block2_hard))

        # making rpc call submitminingsolution in a separate thread because waitaftervalidation is blocking
        # the return of submitminingsolution
        submitminingsolution_thread = threading.Thread(
            target=self.nodes[0].submitminingsolution,
            args=(mining_solution, ))
        submitminingsolution_thread.start()

        # because self.nodes[0] rpc is blocked we use another rpc client
        rpc_client = get_rpc_proxy(rpc_url(
            get_datadir_path(self.options.tmpdir, 0), 0),
                                   0,
                                   coveragedir=self.options.coveragedir)

        wait_for_validating_blocks({block2_hard.hash, block4_hard.hash},
                                   rpc_client, self.log)

        self.log.info(f"easy block3 hash: {block3_easier.hash}")
        node1.send_message(msg_block(block3_easier))

        rpc_client.waitforblockheight(102)
        assert_equal(block3_easier.hash, rpc_client.getbestblockhash())

        # now we can remove waiting status from blocks and finish their validation
        rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
        rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
        submitminingsolution_thread.join()

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # easier block should still be on tip
        assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
示例#13
0
class BlockStoringInFile(BitcoinTestFramework):
    def set_test_params(self):
        self.num_nodes = 3
        self.runner_nodes = []
        self.setup_clean_chain = True
        self.mining_block_max_size = 20 * ONE_MEGABYTE
        self.excessive_block_size = 22 * ONE_MEGABYTE

        extra_header_space = 8  # one block header takes up 8 bytes
        self.preferred_blockfile_size = 2 * (ONE_MEGABYTE + extra_header_space)
        # +1 is a magic number for extra block file space as check whether the
        # file is already full is written with >= instead of >
        self.preferred_blockfile_size += 1

        self.extra_args = [
            '-whitelist=127.0.0.1',
            "-excessiveblocksize=%d" % self.excessive_block_size,
            "-preferredblockfilesize=%d" % self.preferred_blockfile_size,
            "-blockmaxsize=%d" % self.mining_block_max_size
        ]

    def setup_network(self):
        self.add_nodes(self.num_nodes)

        for i in range(self.num_nodes):
            self.start_node(i, self.extra_args)
            self.runner_nodes.append(RunnerNode(self.nodes[i], i))

        # Start up network handling in another thread
        self._network_thread = NetworkThread()
        self._network_thread.start()

        for i in range(self.num_nodes):
            self.runner_nodes[i].finish_setup_after_network_is_started(
                self.options.tmpdir)

    def __count_blk_files(self, block_number, expected_number_of_files,
                          node_number):
        blockfile_count = len(
            glob.glob(self.options.tmpdir + "/node" + str(node_number) +
                      "/regtest/blocks/blk0000*.dat"))

        assert blockfile_count == expected_number_of_files, (
            "unexpected blockfile count for block: " + str(block_number) +
            "; node: " + str(node_number) + "; expected: " +
            str(expected_number_of_files) + "; got: " + str(blockfile_count))

    def __compare_local_and_remote_block_size(self, local_block, remote_node):
        remote_best_block_hash = remote_node.getbestblockhash()
        assert_equal(remote_best_block_hash, local_block.hash)

        # check that we can successfully read block from file
        remote_block = remote_node.getblock(remote_best_block_hash)
        assert_equal(remote_block['size'], len(local_block.serialize()))

    def __send_and_test_block(self, runner_node, block_size,
                              expected_number_of_files):
        local_block, block_number = runner_node.create_and_send_block(
            block_size)
        self.__compare_local_and_remote_block_size(local_block,
                                                   runner_node.remote_node)
        self.__count_blk_files(block_number, expected_number_of_files,
                               runner_node.node_number)

    def __test_two_blocks_less_than_preferred_file_size_in_single_file(
            self, runner_node):
        print(
            "- test two blocks size is less than preferred file size, put in single file"
        )
        self.__send_and_test_block(runner_node, ONE_MEGABYTE, 2)
        self.__send_and_test_block(runner_node, ONE_MEGABYTE, 2)

    def __test_one_block_in_file_second_block_exceeds_preferred_file_size(
            self, runner_node):
        print(
            "- test one block in file, second block exceeds preferred file size"
        )
        self.__send_and_test_block(runner_node, ONE_MEGABYTE, 2)
        self.__send_and_test_block(runner_node, ONE_MEGABYTE * 2, 3)
        self.__send_and_test_block(runner_node, ONE_MEGABYTE, 4)
        self.__send_and_test_block(runner_node, ONE_MEGABYTE, 4)

    def __test_block_larger_than_preferred_file_size(self, runner_node):
        print("- test block larger than preferred filesize")
        self.__send_and_test_block(runner_node, ONE_MEGABYTE * 5, 2)
        self.__send_and_test_block(runner_node, ONE_MEGABYTE, 3)
        self.__send_and_test_block(runner_node, ONE_MEGABYTE, 3)

    def run_test(self):
        # each test should run with different runner/test node combination as we want to start
        # with a fresh blockfile structure

        self.__test_two_blocks_less_than_preferred_file_size_in_single_file(
            self.runner_nodes[0])
        self.__test_one_block_in_file_second_block_exceeds_preferred_file_size(
            self.runner_nodes[1])
        self.__test_block_larger_than_preferred_file_size(self.runner_nodes[2])
示例#14
0
class ComparisonTestFramework(BitcoinTestFramework):
    """Test framework for doing p2p comparison testing

    Sets up some bitcoind binaries:
    - 1 binary: test binary
    - 2 binaries: 1 test binary, 1 ref binary
    - n>2 binaries: 1 test binary, n-1 ref binaries"""
    def __init__(self):
        super(ComparisonTestFramework, self).__init__()
        self.chain = ChainManager()
        self._network_thread = None
        if not hasattr(self, "testbinary"):
            self.testbinary = [os.getenv("BITCOIND", "bitcoind")]
        if not hasattr(self, "refbinary"):
            self.refbinary = [os.getenv("BITCOIND", "bitcoind")]

    def set_test_params(self):
        self.num_nodes = 2
        self.setup_clean_chain = True

    def add_options(self, parser):
        parser.add_option("--testbinary",
                          dest="testbinary",
                          help="bitcoind binary to test")
        parser.add_option(
            "--refbinary",
            dest="refbinary",
            help="bitcoind binary to use for reference nodes (if any)")

    def setup_network(self):
        extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
        if hasattr(self, "extra_args"):
            extra_args = self.extra_args
        if self.options.testbinary:
            self.testbinary = [self.options.testbinary]
        if self.options.refbinary:
            self.refbinary = [self.options.refbinary]
        binaries = [self.testbinary] + [self.refbinary] * (self.num_nodes - 1)
        self.add_nodes(self.num_nodes, extra_args, binaries=binaries)
        self.start_nodes()
        self.init_network()

    def restart_network(self, timeout=None):
        self.test.clear_all_connections()
        # If we had a network thread from eariler, make sure it's finished before reconnecting
        if self._network_thread is not None:
            self._network_thread.join(timeout)
        # Reconnect
        self.test.add_all_connections(self.nodes)
        self._network_thread = NetworkThread()
        self._network_thread.start()

    def init_network(self):
        # Start creating test manager which help to manage test cases
        self.test = TestManager(self, self.options.tmpdir)
        # (Re)start network
        self.restart_network()

    # returns a test case that asserts that the current tip was accepted
    def accepted(self, sync_timeout=300):
        return TestInstance([[self.chain.tip, True]],
                            sync_timeout=sync_timeout)

    # returns a test case that asserts that the current tip was rejected
    def rejected(self, reject=None):
        if reject is None:
            return TestInstance([[self.chain.tip, False]])
        else:
            return TestInstance([[self.chain.tip, reject]])
示例#15
0
 def run_test(self):
     test = TestManager()
     test.add_new_connection(
         NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
     NetworkThread().start()  # Start up network handling in another thread
     test.run()
示例#16
0
    def run_test(self):
        # Set up test nodes.
        # - test_nodes[0] will only request v4 transactions
        # - test_nodes[1] will only request v5 transactions
        # - test_nodes[2] will test invalid v4 request using MSG_WTXID
        # - test_nodes[3] will test invalid v5 request using MSG_TX
        test_nodes = []
        connections = []

        for i in range(4):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1',
                         p2p_port(0),
                         self.nodes[0],
                         test_nodes[i],
                         protocol_version=NU5_PROTO_VERSION))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        net_version = self.nodes[0].getnetworkinfo()["protocolversion"]
        if net_version < NU5_PROTO_VERSION:
            # Sending a getdata message containing a MSG_WTX CInv message type
            # results in a reject message.
            self.verify_invalid_cinv(
                test_nodes[0],
                connections[0],
                5,
                "Negotiated protocol version does not support CInv message type MSG_WTX",
            )

            # Sending a getdata message containing an invalid CInv message type
            # results in a reject message.
            self.verify_invalid_cinv(test_nodes[1], connections[1], 0xffff,
                                     "Unknown CInv message type")

            print(
                "Node's block index is not NU5-aware, skipping remaining tests"
            )
            return

        # Load funds into the Sprout address
        sproutzaddr = self.nodes[0].z_getnewaddress('sprout')
        result = self.nodes[2].z_shieldcoinbase("*", sproutzaddr, 0)
        wait_and_assert_operationid_status(self.nodes[2], result['opid'])
        self.sync_all()
        self.nodes[1].generate(1)
        self.sync_all()

        # Activate NU5. Block height after this is 210.
        self.nodes[0].generate(9)
        self.sync_all()

        # Add v4 transaction to the mempool.
        node1_taddr = self.nodes[1].getnewaddress()
        opid = self.nodes[0].z_sendmany(sproutzaddr, [{
            'address': node1_taddr,
            'amount': 1,
        }])
        v4_txid = uint256_from_str(
            hex_str_to_bytes(
                wait_and_assert_operationid_status(self.nodes[0], opid))[::-1])

        # Add v5 transaction to the mempool.
        v5_txid = self.nodes[0].sendtoaddress(node1_taddr, 1, "", "", True)
        v5_tx = self.nodes[0].getrawtransaction(v5_txid, 1)
        assert_equal(v5_tx['version'], 5)
        v5_txid = uint256_from_str(hex_str_to_bytes(v5_txid)[::-1])
        v5_auth_digest = uint256_from_str(
            hex_str_to_bytes(v5_tx['authdigest'])[::-1])

        # Wait for the mempools to sync.
        self.sync_all()

        #
        # inv
        #

        # On a mempool request, nodes should return an inv message containing:
        # - the v4 tx, with type MSG_TX.
        # - the v5 tx, with type MSG_WTX.
        for testnode in test_nodes:
            self.verify_inv(testnode, [
                self.cinv_for(v4_txid),
                self.cinv_for(v5_txid, v5_auth_digest),
            ])

        #
        # getdata
        #

        # We can request a v4 transaction with MSG_TX.
        self.send_data_message(test_nodes[0], v4_txid)
        self.verify_last_tx(test_nodes[0], v4_txid)

        # We can request a v5 transaction with MSG_WTX.
        self.send_data_message(test_nodes[1], v5_txid, v5_auth_digest)
        self.verify_last_tx(test_nodes[1], v5_txid, v5_auth_digest)

        # Requesting with a different authDigest results in a notfound.
        self.send_data_message(test_nodes[1], v5_txid, 1)
        self.verify_last_notfound(test_nodes[1], v5_txid, 1)

        # Requesting a v4 transaction with MSG_WTX causes a disconnect.
        self.send_data_message(test_nodes[2], v4_txid, (1 << 256) - 1)
        self.verify_disconnected(test_nodes[2])

        # Requesting a v5 transaction with MSG_TX causes a disconnect.
        self.send_data_message(test_nodes[3], v5_txid)
        self.verify_disconnected(test_nodes[3])

        # Sending a getdata message containing an invalid CInv message type
        # results in a reject message.
        self.verify_invalid_cinv(test_nodes[0], connections[0], 0xffff,
                                 "Unknown CInv message type")

        [c.disconnect_node() for c in connections]
示例#17
0
    def run_test(self):
        node0 = NodeConnCB()

        connections = []
        connections.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])

        NetworkThread().start()
        node0.wait_for_verack()

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 6)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=10)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
示例#18
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        test_node = NodeConnCB()  # connects to node0 (not whitelisted)
        white_node = NodeConnCB()  # connects to node1 (whitelisted)
        min_work_node = NodeConnCB()  # connects to node2 (not whitelisted)

        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node),
            NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node),
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node)
        ]
        test_node.add_connection(connections[0])
        white_node.add_connection(connections[1])
        min_work_node.add_connection(connections[2])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()
        white_node.wait_for_verack()
        min_work_node.wait_for_verack()

        # 1. Have nodes mine a block (nodes1/2 leave IBD)
        [n.generate(1) for n in self.nodes]
        tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]

        # 2. Send one block that builds on each tip.
        # This should be accepted by nodes 1/2
        blocks_h2 = []  # the height 2 blocks on each node's chain
        block_time = int(time.time()) + 1
        for i in range(3):
            blocks_h2.append(
                create_block(tips[i], create_coinbase(2), block_time))
            blocks_h2[i].solve()
            block_time += 1
        test_node.send_message(MsgBlock(blocks_h2[0]))
        white_node.send_message(MsgBlock(blocks_h2[1]))
        min_work_node.send_message(MsgBlock(blocks_h2[2]))

        for x in [test_node, white_node, min_work_node]:
            x.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        assert_equal(self.nodes[1].getblockcount(), 2)
        assert_equal(self.nodes[2].getblockcount(), 1)
        self.log.info(
            "First height 2 block accepted by node0/node1; correctly rejected by node2"
        )

        # 3. Send another block that builds on the original tip.
        blocks_h2f = []  # Blocks at height 2 that fork off the main chain
        for i in range(2):
            blocks_h2f.append(
                create_block(tips[i], create_coinbase(2),
                             blocks_h2[i].nTime + 1))
            blocks_h2f[i].solve()
        test_node.send_message(MsgBlock(blocks_h2f[0]))
        white_node.send_message(MsgBlock(blocks_h2f[1]))

        for x in [test_node, white_node]:
            x.sync_with_ping()
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h2f[0].hash:
                assert_equal(x['status'], "headers-only")

        for x in self.nodes[1].getchaintips():
            if x['hash'] == blocks_h2f[1].hash:
                assert_equal(x['status'], "valid-headers")

        self.log.info(
            "Second height 2 block accepted only from whitelisted peer")

        # 4. Now send another block that builds on the forking chain.
        blocks_h3 = []
        for i in range(2):
            blocks_h3.append(
                create_block(blocks_h2f[i].sha256, create_coinbase(3),
                             blocks_h2f[i].nTime + 1))
            blocks_h3[i].solve()
        test_node.send_message(MsgBlock(blocks_h3[0]))
        white_node.send_message(MsgBlock(blocks_h3[1]))

        for x in [test_node, white_node]:
            x.sync_with_ping()
        # Since the earlier block was not processed by node0, the new block
        # can't be fully validated.
        for x in self.nodes[0].getchaintips():
            if x['hash'] == blocks_h3[0].hash:
                assert_equal(x['status'], "headers-only")

        # But this block should be accepted by node0 since it has more work.
        self.nodes[0].getblock(blocks_h3[0].hash)
        self.log.info(
            "Unrequested more-work block accepted from non-whitelisted peer")

        # Node1 should have accepted and reorged.
        assert_equal(self.nodes[1].getblockcount(), 3)
        self.log.info(
            "Successfully reorged to length 3 chain from whitelisted peer")

        # 4b. Now mine 288 more blocks and deliver; all should be processed but
        # the last (height-too-high) on node0.  Node1 should process the tip if
        # we give it the headers chain leading to the tip.
        tips = blocks_h3
        headers_message = MsgHeaders()
        all_blocks = []  # node0's blocks
        for j in range(2):
            for i in range(288):
                next_block = create_block(tips[j].sha256,
                                          create_coinbase(i + 4),
                                          tips[j].nTime + 1)
                next_block.solve()
                if j == 0:
                    test_node.send_message(MsgBlock(next_block))
                    all_blocks.append(next_block)
                else:
                    headers_message.headers.append(CBlockHeader(next_block))
                tips[j] = next_block

        time.sleep(2)
        # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
        for x in all_blocks[:-1]:
            self.nodes[0].getblock(x.hash)
        assert_raises_rpc_error(-1, "Block not found on disk",
                                self.nodes[0].getblock, all_blocks[-1].hash)

        headers_message.headers.pop()  # Ensure the last block is unrequested
        white_node.send_message(headers_message)  # Send headers leading to tip
        white_node.send_message(MsgBlock(tips[1]))  # Now deliver the tip
        white_node.sync_with_ping()
        self.nodes[1].getblock(tips[1].hash)
        self.log.info(
            "Unrequested block far ahead of tip accepted from whitelisted peer"
        )

        # 5. Test handling of unrequested block on the node that didn't process
        # Should still not be processed (even though it has a child that has more
        # work).
        test_node.send_message(MsgBlock(blocks_h2f[0]))

        # Here, if the sleep is too short, the test could falsely succeed (if the
        # node hasn't processed the block by the time the sleep returns, and then
        # the node processes it and incorrectly advances the tip).
        # But this would be caught later on, when we verify that an inv triggers
        # a getdata request for this block.
        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 2)
        self.log.info(
            "Unrequested block that would complete more-work chain was ignored"
        )

        # 6. Try to get node to request the missing block.
        # Poke the node with an inv for block at height 3 and see if that
        # triggers a getdata on block 2 (it should if block 2 is missing).
        with mininode_lock:
            # Clear state so we can check the getdata request
            test_node.last_message.pop("getdata", None)
            test_node.send_message(MsgInv([CInv(2, blocks_h3[0].sha256)]))

        test_node.sync_with_ping()
        with mininode_lock:
            getdata = test_node.last_message["getdata"]

        # Check that the getdata includes the right block
        assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
        self.log.info("Inv at tip triggered getdata for unprocessed block")

        # 7. Send the missing block for the third time (now it is requested)
        test_node.send_message(MsgBlock(blocks_h2f[0]))

        test_node.sync_with_ping()
        assert_equal(self.nodes[0].getblockcount(), 290)
        self.log.info(
            "Successfully reorged to longer chain from non-whitelisted peer")

        # 8. Connect node2 to node0 and ensure it is able to sync
        connect_nodes(self.nodes[0], 2)
        sync_blocks([self.nodes[0], self.nodes[2]])
        self.log.info("Successfully synced nodes 2 and 0")

        [c.disconnect_node() for c in connections]
示例#19
0
    def test_BIP(self, bipName, activated_version, invalidate,
                 invalidatePostSignature, bitno):
        assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
        assert_equal(self.get_bip9_status(bipName)['since'], 0)

        # generate some coins for later
        self.coinbase_blocks = self.nodes[0].generate(2)
        self.height = 3  # height of the next block to build
        self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
        self.nodeaddress = self.nodes[0].getnewaddress()
        self.last_block_time = int(time.time())

        assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
        assert_equal(self.get_bip9_status(bipName)['since'], 0)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])
        assert (bipName not in tmpl['vbavailable'])
        assert_equal(tmpl['vbrequired'], 0)
        assert_equal(tmpl['version'], 0x20000000)

        # Test 1
        # Advance from DEFINED to STARTED
        test_blocks = self.generate_blocks(141, 4)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['status'], 'started')
        assert_equal(self.get_bip9_status(bipName)['since'], 144)
        assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
        assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])
        assert_equal(tmpl['vbavailable'][bipName], bitno)
        assert_equal(tmpl['vbrequired'], 0)
        assert (tmpl['version'] & activated_version)

        # Test 1-A
        # check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period
        test_blocks = self.generate_blocks(
            36, 4, test_blocks)  # 0x00000004 (signalling not)
        test_blocks = self.generate_blocks(
            10, activated_version)  # 0x20000001 (signalling ready)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(
            self.get_bip9_status(bipName)['statistics']['elapsed'], 46)
        assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
        assert_equal(
            self.get_bip9_status(bipName)['statistics']['possible'], True)

        # Test 1-B
        # check stats after one additional "signalling not" block --  LOCKED_IN no longer possible this period
        test_blocks = self.generate_blocks(
            1, 4, test_blocks)  # 0x00000004 (signalling not)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(
            self.get_bip9_status(bipName)['statistics']['elapsed'], 47)
        assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
        assert_equal(
            self.get_bip9_status(bipName)['statistics']['possible'], False)

        # Test 1-C
        # finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN
        test_blocks = self.generate_blocks(
            97, activated_version)  # 0x20000001 (signalling ready)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
        assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
        assert_equal(
            self.get_bip9_status(bipName)['statistics']['possible'], True)
        assert_equal(self.get_bip9_status(bipName)['status'], 'started')

        # Test 2
        # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
        # using a variety of bits to simulate multiple parallel softforks
        test_blocks = self.generate_blocks(
            50, activated_version)  # 0x20000001 (signalling ready)
        test_blocks = self.generate_blocks(
            20, 4, test_blocks)  # 0x00000004 (signalling not)
        test_blocks = self.generate_blocks(
            50, activated_version,
            test_blocks)  # 0x20000101 (signalling ready)
        test_blocks = self.generate_blocks(
            24, 4, test_blocks)  # 0x20010000 (signalling not)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['status'], 'started')
        assert_equal(self.get_bip9_status(bipName)['since'], 144)
        assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
        assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])
        assert_equal(tmpl['vbavailable'][bipName], bitno)
        assert_equal(tmpl['vbrequired'], 0)
        assert (tmpl['version'] & activated_version)

        # Test 3
        # 108 out of 144 signal bit 1 to achieve LOCKED_IN
        # using a variety of bits to simulate multiple parallel softforks
        test_blocks = self.generate_blocks(
            57, activated_version)  # 0x20000001 (signalling ready)
        test_blocks = self.generate_blocks(
            26, 4, test_blocks)  # 0x00000004 (signalling not)
        test_blocks = self.generate_blocks(
            50, activated_version,
            test_blocks)  # 0x20000101 (signalling ready)
        test_blocks = self.generate_blocks(
            10, 4, test_blocks)  # 0x20010000 (signalling not)
        yield TestInstance(test_blocks, sync_every_block=False)

        # check counting stats and "possible" flag before last block of this period achieves LOCKED_IN...
        assert_equal(
            self.get_bip9_status(bipName)['statistics']['elapsed'], 143)
        assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107)
        assert_equal(
            self.get_bip9_status(bipName)['statistics']['possible'], True)
        assert_equal(self.get_bip9_status(bipName)['status'], 'started')

        # ...continue with Test 3
        test_blocks = self.generate_blocks(
            1, activated_version)  # 0x20000001 (signalling ready)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
        assert_equal(self.get_bip9_status(bipName)['since'], 576)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])

        # Test 4
        # 143 more version 536870913 blocks (waiting period-1)
        test_blocks = self.generate_blocks(143, 4)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
        assert_equal(self.get_bip9_status(bipName)['since'], 576)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])

        # Test 5
        # Check that the new rule is enforced
        spendtx = self.create_transaction(self.nodes[0],
                                          self.coinbase_blocks[0],
                                          self.nodeaddress, 1.0)
        invalidate(spendtx)
        spendtx = self.sign_transaction(self.nodes[0], spendtx)
        spendtx.rehash()
        invalidatePostSignature(spendtx)
        spendtx.rehash()
        block = create_block(self.tip, create_coinbase(self.height),
                             self.last_block_time + 1)
        block.nVersion = activated_version
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.last_block_time += 1
        self.tip = block.sha256
        self.height += 1
        yield TestInstance([[block, True]])

        assert_equal(self.get_bip9_status(bipName)['status'], 'active')
        assert_equal(self.get_bip9_status(bipName)['since'], 720)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName in tmpl['rules'])
        assert (bipName not in tmpl['vbavailable'])
        assert_equal(tmpl['vbrequired'], 0)
        assert (not (tmpl['version'] & (1 << bitno)))

        # Test 6
        # Check that the new sequence lock rules are enforced
        spendtx = self.create_transaction(self.nodes[0],
                                          self.coinbase_blocks[1],
                                          self.nodeaddress, 1.0)
        invalidate(spendtx)
        spendtx = self.sign_transaction(self.nodes[0], spendtx)
        spendtx.rehash()
        invalidatePostSignature(spendtx)
        spendtx.rehash()

        block = create_block(self.tip, create_coinbase(self.height),
                             self.last_block_time + 1)
        block.nVersion = 5
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()
        self.last_block_time += 1
        yield TestInstance([[block, False]])

        # Restart all
        self.test.clear_all_connections()
        self.stop_nodes()
        self.nodes = []
        shutil.rmtree(self.options.tmpdir + "/node0")
        self.setup_chain()
        self.setup_network()
        self.test.add_all_connections(self.nodes)
        NetworkThread().start()
        self.test.test_nodes[0].wait_for_verack()
    def run_test(self):
        block_count = 0

        # Create a P2P connection
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        node2 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2)
        node2.add_connection(connection)

        node3 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node3)
        node3.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()
        node2.wait_for_verack()
        node3.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        for i in range(100):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(100):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 101 via rpc")
        self.nodes[0].waitforblockheight(101)

        tip_block_num = block_count - 1

        block2 = self.chain.next_block(block_count, spend=out[0], extra_txns=8)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block3 = self.chain.next_block(block_count,
                                       spend=out[0],
                                       extra_txns=10)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block4 = self.chain.next_block(block_count,
                                       spend=out[0],
                                       extra_txns=12)
        block_count += 1

        self.chain.set_tip(tip_block_num)

        block5 = self.chain.next_block(block_count,
                                       spend=out[0],
                                       extra_txns=14)
        block5_num = block_count
        block_count += 1

        block6 = self.chain.next_block(block_count, spend=out[1], extra_txns=8)
        block_count += 1

        self.chain.set_tip(block5_num)

        block7 = self.chain.next_block(block_count,
                                       spend=out[1],
                                       extra_txns=10)

        self.log.info(f"block2 hash: {block2.hash}")
        self.nodes[0].waitaftervalidatingblock(block2.hash, "add")
        self.log.info(f"block3 hash: {block3.hash}")
        self.nodes[0].waitaftervalidatingblock(block3.hash, "add")
        self.log.info(f"block4 hash: {block4.hash}")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block2.hash, block3.hash, block4.hash},
                                self.nodes[0], self.log)

        node0.send_message(msg_block(block2))
        # make sure we started validating block2 first as we expect this one to
        # be terminated later on in the test before its validation is complete
        # (algorithm for premature termination selects based on block height and
        # and validation duration - those that are in validation with smaller
        # height and longer are terminated first)
        wait_for_validating_blocks({block2.hash}, self.nodes[0], self.log)

        node1.send_message(msg_block(block3))
        node2.send_message(msg_block(block4))
        # make sure we started validating blocks
        wait_for_validating_blocks({block2.hash, block3.hash, block4.hash},
                                   self.nodes[0], self.log)

        node3.send_message(msg_block(block5))
        self.log.info(f"block5 hash: {block5.hash}")

        # check log file for logging about which block validation was terminated
        termination_log_found = False
        for line in open(
                glob.glob(self.options.tmpdir + "/node0" +
                          "/regtest/bitcoind.log")[0]):
            if f"Block {block2.hash} will not be considered by the current tip activation as the maximum parallel block" in line:
                termination_log_found = True
                self.log.info("Found line: %s", line.strip())
                break

        self.log.info(f"block6 hash: {block6.hash}")
        self.nodes[0].waitaftervalidatingblock(block6.hash, "add")
        self.log.info(f"block7 hash: {block7.hash}")
        self.nodes[0].waitaftervalidatingblock(block7.hash, "add")

        wait_for_waiting_blocks({block6.hash, block7.hash}, self.nodes[0],
                                self.log)

        node3.send_message(msg_block(block6))
        wait_for_validating_blocks({block6.hash}, self.nodes[0], self.log)

        node3.send_message(msg_block(block7))
        wait_for_validating_blocks({block7.hash}, self.nodes[0], self.log)

        self.nodes[0].waitaftervalidatingblock(block2.hash, "remove")
        # block2 should be canceled.
        wait_for_not_validating_blocks({block2.hash}, self.nodes[0], self.log)

        self.log.info("removing wait status from block7")
        self.nodes[0].waitaftervalidatingblock(block7.hash, "remove")

        # finish block7 validation
        wait_for_not_validating_blocks({block7.hash}, self.nodes[0], self.log)

        # remove wait status from block to finish its validations so the test exits properly
        self.nodes[0].waitaftervalidatingblock(block3.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "remove")
        self.nodes[0].waitaftervalidatingblock(block6.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block7 should be active in the end
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())

        # check log file for logging about which block validation was terminated
        termination_log_found = False
        for line in open(
                glob.glob(self.options.tmpdir + "/node0" +
                          "/regtest/bitcoind.log")[0]):
            if f"Block {block2.hash} validation was terminated before completion." in line:
                termination_log_found = True
                self.log.info("Found line: %s", line.strip())
                break

        assert_equal(termination_log_found, True)
    def run_test(self):
        block_count = 0

        # Create a P2P connections
        node0 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        node0.add_connection(connection)

        node1 = NodeConnCB()
        connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
        node1.add_connection(connection)

        NetworkThread().start()
        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()
        node1.wait_for_verack()

        self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
        block = self.chain.next_block(block_count)
        block_count += 1
        self.chain.save_spendable_output()
        node0.send_message(msg_block(block))

        num_blocks = 150
        for i in range(num_blocks):
            block = self.chain.next_block(block_count)
            block_count += 1
            self.chain.save_spendable_output()
            node0.send_message(msg_block(block))

        out = []
        for i in range(num_blocks):
            out.append(self.chain.get_spendable_output())

        self.log.info("waiting for block height 151 via rpc")
        self.nodes[0].waitforblockheight(num_blocks + 1)

        tip_block_num = block_count - 1

        # left branch
        block2 = self.chain.next_block(block_count,
                                       spend=out[0:9],
                                       extra_txns=8)
        block_count += 1
        node0.send_message(msg_block(block2))
        self.log.info(f"block2 hash: {block2.hash}")

        self.nodes[0].waitforblockheight(num_blocks + 2)

        # send blocks 3,4 for parallel validation on left branch
        self.chain.set_tip(tip_block_num)
        block3 = self.chain.next_block(block_count,
                                       spend=out[10:19],
                                       extra_txns=10)
        block_count += 1

        block4 = self.chain.next_block(block_count,
                                       spend=out[20:29],
                                       extra_txns=8)
        block_count += 1

        # send two "hard" blocks, with waitaftervalidatingblock we artificially
        # extend validation time.
        self.log.info(f"block3 hash: {block3.hash}")
        self.log.info(f"block4 hash: {block4.hash}")
        self.nodes[0].waitaftervalidatingblock(block4.hash, "add")

        # make sure block hashes are in waiting list
        wait_for_waiting_blocks({block4.hash}, self.nodes[0], self.log)

        node1.send_message(msg_block(block3))
        node1.send_message(msg_block(block4))

        # make sure we started validating blocks
        wait_for_validating_blocks({block4.hash}, self.nodes[0], self.log)

        # right branch
        self.chain.set_tip(tip_block_num)
        block5 = self.chain.next_block(block_count)
        # Add some txns from block2 & block3 to block5, just to check that they get
        # filtered from the mempool and not re-added
        block5_duplicated_txns = block3.vtx[1:3] + block2.vtx[1:3]
        self.chain.update_block(block_count, block5_duplicated_txns)
        block_count += 1
        node0.send_message(msg_block(block5))
        self.log.info(f"block5 hash: {block5.hash}")

        # and two blocks to extend second branch to cause reorg
        # - they must be sent from the same node as otherwise they will be
        #   rejected with "prev block not found" as we don't wait for the first
        #   block to arrive so there is a race condition which block is seen
        #   first when using multiple connections
        block6 = self.chain.next_block(block_count)
        node0.send_message(msg_block(block6))
        self.log.info(f"block6 hash: {block6.hash}")
        block_count += 1
        block7 = self.chain.next_block(block_count)
        node0.send_message(msg_block(block7))
        self.log.info(f"block7 hash: {block7.hash}")
        block_count += 1

        self.nodes[0].waitforblockheight(num_blocks + 4)
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())

        self.log.info(
            "releasing wait status on parallel blocks to finish their validation"
        )
        self.nodes[0].waitaftervalidatingblock(block4.hash, "remove")

        # wait till validation of block or blocks finishes
        node0.sync_with_ping()

        # block that arrived last on competing chain should be active
        assert_equal(block7.hash, self.nodes[0].getbestblockhash())

        # make sure that transactions from block2 and 3 (except coinbase, and those also
        # in block 5) are in mempool
        not_expected_in_mempool = set()
        for txn in block5_duplicated_txns:
            not_expected_in_mempool.add(txn.hash)
        expected_in_mempool = set()
        for txn in block2.vtx[1:] + block3.vtx[1:]:
            expected_in_mempool.add(txn.hash)
        expected_in_mempool = expected_in_mempool.difference(
            not_expected_in_mempool)

        mempool = self.nodes[0].getrawmempool()
        assert_equal(expected_in_mempool, set(mempool))
    def run_test(self):
        # Create all the connections we will need to node0 at the start because they all need to be
        # setup before we call NetworkThread().start()

        # Create a P2P connection with no association ID (old style)
        oldStyleConnCB = TestNode()
        oldStyleConn = NodeConn('127.0.0.1',
                                p2p_port(0),
                                self.nodes[0],
                                oldStyleConnCB,
                                nullAssocID=True)
        oldStyleConnCB.add_connection(oldStyleConn)

        # Create a P2P connection with a new association ID
        newStyleConnCB = TestNode()
        newStyleConn = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                                newStyleConnCB)
        newStyleConnCB.add_connection(newStyleConn)

        # Create a P2P connection with a new association ID and another connection that uses the same ID
        newStyleFirstConnCB = TestNode()
        newStyleFirstConn = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                                     newStyleFirstConnCB)
        newStyleFirstConnCB.add_connection(newStyleFirstConn)
        # By setting the assocID on this second NodeConn we prevent it sending a version message
        newStyleSecondConnCB = TestNode()
        newStyleSecondConn = NodeConn('127.0.0.1',
                                      p2p_port(0),
                                      self.nodes[0],
                                      newStyleSecondConnCB,
                                      assocID=newStyleFirstConn.assocID)
        newStyleSecondConnCB.add_connection(newStyleSecondConn)

        # Some connections we will use to test setup of DATA2, DATA3, DATA4 streams
        newStyleSecondConnCB_Data2 = TestNode()
        newStyleSecondConn_Data2 = NodeConn('127.0.0.1',
                                            p2p_port(0),
                                            self.nodes[0],
                                            newStyleSecondConnCB_Data2,
                                            assocID=newStyleFirstConn.assocID)
        newStyleSecondConnCB_Data2.add_connection(newStyleSecondConn_Data2)
        newStyleSecondConnCB_Data3 = TestNode()
        newStyleSecondConn_Data3 = NodeConn('127.0.0.1',
                                            p2p_port(0),
                                            self.nodes[0],
                                            newStyleSecondConnCB_Data3,
                                            assocID=newStyleFirstConn.assocID)
        newStyleSecondConnCB_Data3.add_connection(newStyleSecondConn_Data3)
        newStyleSecondConnCB_Data4 = TestNode()
        newStyleSecondConn_Data4 = NodeConn('127.0.0.1',
                                            p2p_port(0),
                                            self.nodes[0],
                                            newStyleSecondConnCB_Data4,
                                            assocID=newStyleFirstConn.assocID)
        newStyleSecondConnCB_Data4.add_connection(newStyleSecondConn_Data4)

        # Some connections we will use to test error scenarios
        newStyleThirdConnCB = TestNode()
        badStreamConn1 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleThirdConnCB,
                                  assocID=create_association_id())
        newStyleThirdConnCB.add_connection(badStreamConn1)
        newStyleFourthConnCB = TestNode()
        badStreamConn2 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleFourthConnCB,
                                  assocID=newStyleFirstConn.assocID)
        newStyleFourthConnCB.add_connection(badStreamConn2)
        newStyleFifthConnCB = TestNode()
        badStreamConn3 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleFifthConnCB,
                                  assocID=newStyleFirstConn.assocID)
        newStyleFifthConnCB.add_connection(badStreamConn3)
        newStyleSixthConnCB = TestNode()
        badStreamConn4 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleSixthConnCB,
                                  assocID=newStyleFirstConn.assocID)
        newStyleSixthConnCB.add_connection(badStreamConn4)
        newStyleSeventhConnCB = TestNode()
        badStreamConn5 = NodeConn('127.0.0.1',
                                  p2p_port(0),
                                  self.nodes[0],
                                  newStyleSeventhConnCB,
                                  assocID=newStyleFirstConn.assocID)
        newStyleSeventhConnCB.add_connection(badStreamConn5)

        # Start up network handling in another thread. This needs to be called
        # after the P2P connections have been created.
        NetworkThread().start()

        # Wait for all connections to come up to the required initial state
        oldStyleConnCB.wait_for_protoconf()
        newStyleConnCB.wait_for_protoconf()
        newStyleFirstConnCB.wait_for_protoconf()

        # Check initial state
        with mininode_lock:
            assert_equal(oldStyleConnCB.recvAssocID, None)
        with mininode_lock:
            assert_equal(oldStyleConnCB.recvStreamPolicies,
                         b'BlockPriority,Default')
        with mininode_lock:
            assert_equal(newStyleConnCB.recvAssocID, newStyleConn.assocID)
        with mininode_lock:
            assert_equal(newStyleConnCB.recvStreamPolicies,
                         b'BlockPriority,Default')
        with mininode_lock:
            assert_equal(newStyleFirstConnCB.recvAssocID,
                         newStyleFirstConn.assocID)
        with mininode_lock:
            assert_equal(newStyleFirstConnCB.recvStreamPolicies,
                         b'BlockPriority,Default')
        with mininode_lock:
            assert_equal(len(newStyleSecondConnCB.message_count), 0)
        with mininode_lock:
            assert_equal(len(newStyleSecondConnCB_Data2.message_count), 0)
        with mininode_lock:
            assert_equal(len(newStyleSecondConnCB_Data3.message_count), 0)
        with mininode_lock:
            assert_equal(len(newStyleSecondConnCB_Data4.message_count), 0)
        expected = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,  # newStyleConn
                'associd': str(newStyleConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 2,  # newStyleFirstConn
                'associd': str(newStyleFirstConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 3,  # newStyleSecondConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 4,  # newStyleSecondConn_Data2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 5,  # newStyleSecondConn_Data3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 6,  # newStyleSecondConn_Data4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 7,  # badStreamConn1
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 8,  # badStreamConn2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 9,  # badStreamConn3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 10,  # badStreamConn4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 11,  # badStreamConn5
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected),
                   timeout=5)

        # Check a new block is recieved by all connections
        self.nodes[0].generate(1)
        tip = self.nodes[0].getbestblockhash()
        wait_until(lambda: oldStyleConnCB.seen_block(tip),
                   lock=mininode_lock,
                   timeout=5)
        wait_until(lambda: newStyleConnCB.seen_block(tip),
                   lock=mininode_lock,
                   timeout=5)
        wait_until(lambda: newStyleFirstConnCB.seen_block(tip),
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (not newStyleSecondConnCB.seen_block(tip))
        with mininode_lock:
            assert (not newStyleSecondConnCB_Data2.seen_block(tip))
        with mininode_lock:
            assert (not newStyleSecondConnCB_Data3.seen_block(tip))
        with mininode_lock:
            assert (not newStyleSecondConnCB_Data4.seen_block(tip))

        # Send create new stream message
        newStyleSecondConn.send_message(
            msg_createstream(stream_type=StreamType.DATA1.value,
                             stream_policy=b"BlockPriority",
                             assocID=newStyleFirstConn.assocID))
        expected = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,
                'associd': str(newStyleConn.assocID),  # newStyleConn
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 2,
                'associd': str(newStyleFirstConn.assocID
                               ),  # newStyleFirstConn & newStyleSecondConn
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
            {
                'id': 4,  # newStyleSecondConn_Data2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 5,  # newStyleSecondConn_Data3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 6,  # newStyleSecondConn_Data4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 7,  # badStreamConn1
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 8,  # badStreamConn2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 9,  # badStreamConn3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 10,  # badStreamConn4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 11,  # badStreamConn5
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected),
                   timeout=5)
        with mininode_lock:
            assert (newStyleSecondConnCB.last_streamack is not None)

        # Send create stream with wrong association ID
        badStreamConn1.send_message(
            msg_createstream(stream_type=StreamType.DATA2.value,
                             assocID=badStreamConn1.assocID))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleThirdConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleThirdConnCB.last_streamack is None)
        assert ("No node found with association ID"
                in str(newStyleThirdConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn1.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Send create stream with missing association ID
        badStreamConn5.send_message(
            msg_createstream(stream_type=StreamType.DATA2.value, assocID=""))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleSeventhConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleSeventhConnCB.last_streamack is None)
        assert ("Badly formatted message"
                in str(newStyleSeventhConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn5.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Send create stream for unknown stream type
        badStreamConn2.send_message(
            msg_createstream(stream_type=9, assocID=badStreamConn2.assocID))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleFourthConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleFourthConnCB.last_streamack is None)
        assert ("StreamType out of range"
                in str(newStyleFourthConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn2.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Send create stream for existing stream type
        badStreamConn3.send_message(
            msg_createstream(stream_type=StreamType.GENERAL.value,
                             assocID=badStreamConn3.assocID))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleFifthConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleFifthConnCB.last_streamack is None)
        assert ("Attempt to overwrite existing stream"
                in str(newStyleFifthConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn3.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Send create stream with unknown stream policy specified
        badStreamConn4.send_message(
            msg_createstream(stream_type=StreamType.GENERAL.value,
                             stream_policy=b"UnknownPolicy",
                             assocID=badStreamConn3.assocID))
        # Should receive reject, no streamack
        wait_until(lambda: newStyleSixthConnCB.last_reject is not None,
                   lock=mininode_lock,
                   timeout=5)
        with mininode_lock:
            assert (newStyleSixthConnCB.last_streamack is None)
        assert ("Unknown stream policy name"
                in str(newStyleSixthConnCB.last_reject.reason))
        # Connection will be closed
        wait_until(lambda: badStreamConn4.state == "closed",
                   lock=mininode_lock,
                   timeout=5)

        # Check streams are in the expected state after all those errors
        expected = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,  # newStyleConn
                'associd': str(newStyleConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 2,  # newStyleFirstConn & newStyleSecondConn
                'associd': str(newStyleFirstConn.assocID),
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
            {
                'id': 4,  # newStyleSecondConn_Data2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 5,  # newStyleSecondConn_Data3
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 6,  # newStyleSecondConn_Data4
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected),
                   timeout=5)

        # See if we can establish all the possible stream types
        newStyleSecondConn_Data2.send_message(
            msg_createstream(stream_type=StreamType.DATA2.value,
                             assocID=newStyleFirstConn.assocID))
        newStyleSecondConn_Data3.send_message(
            msg_createstream(stream_type=StreamType.DATA3.value,
                             assocID=newStyleFirstConn.assocID))
        newStyleSecondConn_Data4.send_message(
            msg_createstream(stream_type=StreamType.DATA4.value,
                             assocID=newStyleFirstConn.assocID))
        expected = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,
                'associd': str(newStyleConn.assocID),  # newStyleConn
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id':
                2,  # newStyleFirstConn, newStyleSecondConn, newStyleSecondConn_Data2,
                'associd':
                str(newStyleFirstConn.assocID
                    ),  # newStyleSecondConn_Data3, newStyleSecondConn_Data4
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1', 'DATA2', 'DATA3', 'DATA4']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected),
                   timeout=5)

        # Connect 2 nodes and check they establish the expected streams
        connect_nodes(self.nodes[0], 1)
        expected0 = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,  # newStyleConn
                'associd': str(newStyleConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id':
                2,  # newStyleFirstConn, newStyleSecondConn, newStyleSecondConn_Data2,
                'associd':
                str(newStyleFirstConn.assocID
                    ),  # newStyleSecondConn_Data3, newStyleSecondConn_Data4
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1', 'DATA2', 'DATA3', 'DATA4']
            },
            {
                'id': 12,  # A new association established to node1
                'associd': '<UNKNOWN>',
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected0),
                   timeout=5)
        expected1 = [
            {
                'id': 0,  # An association to node0
                'associd': '<UNKNOWN>',
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[1], expected1),
                   timeout=5)

        # Connect 2 nodes, one of which has streams disabled, and check they establish the expected streams
        connect_nodes(self.nodes[0], 2)
        expected0 = [
            {
                'id': 0,  # oldStyleConn
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id': 1,  # newStyleConn
                'associd': str(newStyleConn.assocID),
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
            {
                'id':
                2,  # newStyleFirstConn, newStyleSecondConn, newStyleSecondConn_Data2,
                'associd':
                str(newStyleFirstConn.assocID
                    ),  # newStyleSecondConn_Data3, newStyleSecondConn_Data4
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1', 'DATA2', 'DATA3', 'DATA4']
            },
            {
                'id': 12,  # Association to node 1
                'associd': '<UNKNOWN>',
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
            {
                'id': 14,  # Old style association to node 2
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[0], expected0),
                   timeout=5)
        expected2 = [
            {
                'id': 0,  # An association to node0
                'associd': 'Not-Set',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[2], expected2),
                   timeout=5)

        # Make sure everyone sees all blocks over whatever stream
        self.nodes[0].generate(1)
        tip = self.nodes[0].getbestblockhash()
        wait_until(lambda: self.nodes[1].getbestblockhash() == tip, timeout=5)
        wait_until(lambda: self.nodes[2].getbestblockhash() == tip, timeout=5)

        self.nodes[1].generate(1)
        tip = self.nodes[1].getbestblockhash()
        wait_until(lambda: self.nodes[0].getbestblockhash() == tip, timeout=5)
        wait_until(lambda: self.nodes[2].getbestblockhash() == tip, timeout=5)

        self.nodes[2].generate(1)
        tip = self.nodes[2].getbestblockhash()
        wait_until(lambda: self.nodes[0].getbestblockhash() == tip, timeout=5)
        wait_until(lambda: self.nodes[1].getbestblockhash() == tip, timeout=5)

        # Add another node, configured to only support the Default stream policy
        self.add_node(3,
                      extra_args=[
                          '-whitelist=127.0.0.1',
                          '-multistreampolicies=Default'
                      ],
                      init_data_dir=True)
        self.start_node(3)

        # Check streampolicies field from getnetworkinfo
        assert_equal(self.nodes[0].getnetworkinfo()["streampolicies"],
                     "BlockPriority,Default")
        assert_equal(self.nodes[1].getnetworkinfo()["streampolicies"],
                     "BlockPriority,Default")
        assert_equal(self.nodes[2].getnetworkinfo()["streampolicies"],
                     "BlockPriority,Default")
        assert_equal(self.nodes[3].getnetworkinfo()["streampolicies"],
                     "Default")

        # Connect the new node to one of the existing nodes and check that they establish a Default association
        connect_nodes(self.nodes[1], 3)
        expected1 = [
            {
                'id': 0,  # An association to node0
                'associd': '<UNKNOWN>',
                'streampolicy': 'BlockPriority',
                'streams': ['GENERAL', 'DATA1']
            },
            {
                'id': 2,  # An association to node3
                'associd': '<UNKNOWN>',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[1], expected1),
                   timeout=5)
        expected3 = [
            {
                'id': 0,  # An association to node1
                'associd': '<UNKNOWN>',
                'streampolicy': 'Default',
                'streams': ['GENERAL']
            },
        ]
        wait_until(lambda: self.check_peer_info(self.nodes[3], expected3),
                   timeout=5)
    def run_test(self):
        (node, ) = self.nodes
        self.pynode = P2PDataStore()
        self.connection = NodeConn('127.0.0.1', p2p_port(0), node, self.pynode)
        self.pynode.add_connection(self.connection)
        NetworkThread().start()
        self.pynode.wait_for_verack()
        # Get out of IBD
        node.generate(1)

        tip = self.getbestblock(node)

        logging.info("Create some blocks with OP_1 coinbase for spending.")
        blocks = []
        for _ in range(20):
            tip = self.build_block(tip)
            blocks.append(tip)
        self.pynode.send_blocks_and_test(blocks, node, timeout=10)
        self.spendable_outputs = deque(block.vtx[0] for block in blocks)

        logging.info("Mature the blocks.")
        node.generate(100)

        tip = self.getbestblock(node)

        # To make compact and fast-to-verify transactions, we'll use
        # CHECKDATASIG over and over with the same data.
        # (Using the same stuff over and over again means we get to hit the
        # node's signature cache and don't need to make new signatures every
        # time.)
        cds_message = b''
        # r=1 and s=1 ecdsa, the minimum values.
        cds_signature = bytes.fromhex('3006020101020101')
        # Recovered pubkey
        cds_pubkey = bytes.fromhex(
            '03089b476b570d66fad5a20ae6188ebbaf793a4c2a228c65f3d79ee8111d56c932'
        )

        fundings = []

        def make_spend(scriptpubkey, scriptsig):
            # Add a funding tx to fundings, and return a tx spending that using
            # scriptsig.
            logging.debug(
                "Gen tx with locking script {} unlocking script {} .".format(
                    scriptpubkey.hex(), scriptsig.hex()))

            # get funds locked with OP_1
            sourcetx = self.spendable_outputs.popleft()
            # make funding that forwards to scriptpubkey
            fundtx = create_transaction(sourcetx, scriptpubkey)
            fundings.append(fundtx)

            # make the spending
            tx = CTransaction()
            tx.vin.append(CTxIn(COutPoint(fundtx.sha256, 1), scriptsig))
            tx.vout.append(CTxOut(0, CScript([OP_RETURN])))
            pad_tx(tx)
            tx.rehash()
            return tx

        logging.info("Generating txes used in this test")

        # "Good" txns that pass our rule:

        goodtxes = [
            # most dense allowed input -- 2 sigchecks with a 26-byte scriptsig.
            make_spend(
                CScript([
                    cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                    OP_CHECKDATASIGVERIFY
                ]), CScript([b'x' * 16, cds_signature])),

            # 4 sigchecks with a 112-byte scriptsig, just at the limit for this
            # sigchecks count.
            make_spend(
                CScript([
                    cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                    OP_3DUP, OP_CHECKDATASIGVERIFY, OP_3DUP,
                    OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY
                ]), CScript([b'x' * 101, cds_signature])),

            # "nice" transaction - 1 sigcheck with 9-byte scriptsig.
            make_spend(CScript([cds_message, cds_pubkey, OP_CHECKDATASIG]),
                       CScript([cds_signature])),

            # 1 sigcheck with 0-byte scriptsig.
            make_spend(
                CScript(
                    [cds_signature, cds_message, cds_pubkey, OP_CHECKDATASIG]),
                CScript([])),
        ]

        badtxes = [
            # "Bad" txns:
            # 2 sigchecks with a 25-byte scriptsig, just 1 byte too short.
            make_spend(
                CScript([
                    cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                    OP_CHECKDATASIGVERIFY
                ]), CScript([b'x' * 15, cds_signature])),

            # 4 sigchecks with a 111-byte scriptsig, just 1 byte too short.
            make_spend(
                CScript([
                    cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY,
                    OP_3DUP, OP_CHECKDATASIGVERIFY, OP_3DUP,
                    OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY
                ]), CScript([b'x' * 100, cds_signature])),
        ]

        goodtxids = set(t.hash for t in goodtxes)
        badtxids = set(t.hash for t in badtxes)

        logging.info("Funding the txes")
        tip = self.build_block(tip, fundings)
        self.pynode.send_blocks_and_test([tip], node, timeout=10)

        # Activation tests

        logging.info("Approach to just before upgrade activation")
        # Move our clock to the uprade time so we will accept such
        # future-timestamped blocks.
        node.setmocktime(MAY2020_START_TIME + 10)
        # Mine six blocks with timestamp starting at
        # SIGCHECKS_ACTIVATION_TIME-1
        blocks = []
        for i in range(-1, 5):
            tip = self.build_block(tip, nTime=MAY2020_START_TIME + i)
            blocks.append(tip)
        self.pynode.send_blocks_and_test(blocks, node, timeout=10)
        assert_equal(node.getblockchaininfo()['mediantime'],
                     MAY2020_START_TIME - 1)

        logging.info(
            "The next block will activate, but the activation block itself must follow old rules"
        )

        logging.info("Send all the transactions just before upgrade")

        self.pynode.send_txs_and_test(goodtxes, node)
        self.pynode.send_txs_and_test(badtxes, node)

        assert_equal(set(node.getrawmempool()), goodtxids | badtxids)

        # ask the node to mine a block, it should include the bad txes.
        [blockhash] = node.generate(1)
        assert_equal(set(node.getblock(blockhash, 1)['tx'][1:]),
                     goodtxids | badtxids)
        assert_equal(node.getrawmempool(), [])

        # discard that block
        node.invalidateblock(blockhash)
        waitFor(30, lambda: set(node.getrawmempool()) == goodtxids | badtxids)

        logging.info("Mine the activation block itself")
        tip = self.build_block(tip)
        self.pynode.send_blocks_and_test([tip], node, timeout=10)

        logging.info("We have activated!")
        assert_equal(node.getblockchaininfo()['mediantime'],
                     MAY2020_START_TIME)

        logging.info(
            "The high-sigchecks transactions got evicted but the good ones are still around"
        )
        waitFor(10, lambda: set(node.getrawmempool()) == goodtxids)

        logging.info(
            "Now the high-sigchecks transactions are rejected from mempool.")
        # try sending some of the bad txes again after the upgrade
        for tx in badtxes:
            self.check_for_no_ban_on_rejected_tx(
                node, tx,
                None)  # No reject reason because we don't log on rejection
            assert_raises_rpc_error(-26, TX_INPUT_SIGCHECKS_ERROR,
                                    node.sendrawtransaction, ToHex(tx))

        logging.info("But they can still be mined!")

        # Now make a block with all the txes, they still are accepted in blocks!
        tip = self.build_block(tip, goodtxes + badtxes)
        self.pynode.send_blocks_and_test([tip], node, timeout=10)

        assert_equal(node.getbestblockhash(), tip.hash)
示例#24
0
    def run_test(self):
        # Before we connect anything, we first set the time on the node
        # to be in the past, otherwise things break because the CNode
        # time counters can't be reset backward after initialization
        old_time = int(time.time() - 2 * 60 * 60 * 24 * 7)
        self.nodes[0].setmocktime(old_time)

        # Generate some old blocks
        self.nodes[0].generate(130)

        # test_nodes[0] will only request old blocks
        # test_nodes[1] will only request new blocks
        # test_nodes[2] will test resetting the counters
        test_nodes = []
        connections = []

        for i in range(3):
            test_nodes.append(TestNode())
            connections.append(
                NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
                         test_nodes[i]))
            test_nodes[i].add_connection(connections[i])

        NetworkThread().start()  # Start up network handling in another thread
        [x.wait_for_verack() for x in test_nodes]

        # Test logic begins here

        # Now mine a big block
        mine_large_block(self.nodes[0], self.utxo_cache)

        # Store the hash; we'll request this later
        big_old_block = self.nodes[0].getbestblockhash()
        old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
        big_old_block = int(big_old_block, 16)

        # Advance to two days ago
        self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24)

        # Mine one more block, so that the prior block looks old
        mine_large_block(self.nodes[0], self.utxo_cache)

        # We'll be requesting this new block too
        big_new_block = self.nodes[0].getbestblockhash()
        big_new_block = int(big_new_block, 16)

        # test_nodes[0] will test what happens if we just keep requesting the
        # the same big old block too many times (expect: disconnect)

        getdata_request = MsgGetdata()
        getdata_request.inv.append(CInv(2, big_old_block))

        block_rate_minutes = 1
        blocks_per_day = 24 * 60 / block_rate_minutes
        max_block_serialized_size = 8000000  # This is MAX_BLOCK_SERIALIZED_SIZE_RIP2
        max_bytes_per_day = self.maxuploadtarget * 1024 * 1024
        daily_buffer = blocks_per_day * max_block_serialized_size
        max_bytes_available = max_bytes_per_day - daily_buffer
        success_count = max_bytes_available // old_block_size

        # 224051200B will be reserved for relaying new blocks, so expect this to
        # succeed for ~236 tries.
        for i in range(int(success_count)):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_old_block], i + 1)

        assert_equal(len(self.nodes[0].getpeerinfo()), 3)
        # At most a couple more tries should succeed (depending on how long
        # the test has been running so far).
        for i in range(3):
            test_nodes[0].send_message(getdata_request)
        test_nodes[0].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        self.log.info(
            "Peer 0 disconnected after downloading old block too many times")

        # Requesting the current block on test_nodes[1] should succeed indefinitely,
        # even when over the max upload target.
        # We'll try lots of times
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(500):
            test_nodes[1].send_message(getdata_request)
            test_nodes[1].sync_with_ping()
            assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1)

        self.log.info("Peer 1 able to repeatedly download new block")

        # But if test_nodes[1] tries for an old block, it gets disconnected too.
        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[1].send_message(getdata_request)
        test_nodes[1].wait_for_disconnect()
        assert_equal(len(self.nodes[0].getpeerinfo()), 1)

        self.log.info("Peer 1 disconnected after trying to download old block")

        self.log.info("Advancing system time on node to clear counters...")

        # If we advance the time by 24 hours, then the counters should reset,
        # and test_nodes[2] should be able to retrieve the old block.
        self.nodes[0].setmocktime(int(time.time()))
        test_nodes[2].sync_with_ping()
        test_nodes[2].send_message(getdata_request)
        test_nodes[2].sync_with_ping()
        assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)

        self.log.info("Peer 2 able to download old block")

        [c.disconnect_node() for c in connections]

        # stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
        self.log.info("Restarting nodes with -whitelist=127.0.0.1")
        self.stop_node(0)
        self.start_node(0, [
            "-whitelist=127.0.0.1", "-maxuploadtarget=1",
            "-blockmaxsize=999000"
        ])

        # recreate/reconnect a test node
        test_nodes = [TestNode()]
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[0])
        ]
        test_nodes[0].add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread
        test_nodes[0].wait_for_verack()

        # retrieve 20 blocks which should be enough to break the 1MB limit
        getdata_request.inv = [CInv(2, big_new_block)]
        for i in range(20):
            test_nodes[0].send_message(getdata_request)
            test_nodes[0].sync_with_ping()
            assert_equal(test_nodes[0].block_receive_map[big_new_block], i + 1)

        getdata_request.inv = [CInv(2, big_old_block)]
        test_nodes[0].send_and_ping(getdata_request)
        assert_equal(len(self.nodes[0].getpeerinfo()),
                     1)  # node is still connected because of the whitelist

        self.log.info(
            "Peer still connected after trying to download old block (whitelisted)"
        )
示例#25
0
    def run_test(self):
        test = TestManager()

        # Launch Sprout, Overwinter, and Sapling mininodes
        nodes = []
        for x in xrange(10):
            nodes.append(
                NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test,
                         "regtest", SPROUT_PROTO_VERSION))
            nodes.append(
                NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test,
                         "regtest", OVERWINTER_PROTO_VERSION))
            nodes.append(
                NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test,
                         "regtest", SAPLING_PROTO_VERSION))

        # Start up network handling in another thread
        NetworkThread().start()

        # Sprout consensus rules apply at block height 9
        self.nodes[0].generate(9)
        assert_equal(9, self.nodes[0].getblockcount())

        # Verify mininodes are still connected to resistanced node
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(10, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(10, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(10, versions.count(SAPLING_PROTO_VERSION))

        # Overwinter consensus rules activate at block height 10
        self.nodes[0].generate(1)
        assert_equal(10, self.nodes[0].getblockcount())
        print('Overwinter active')

        # Mininodes send ping message to resistanced node.
        pingCounter = 1
        for node in nodes:
            node.send_message(msg_ping(pingCounter))
            pingCounter = pingCounter + 1

        time.sleep(3)

        # Verify Sprout mininodes have been dropped, while Overwinter and
        # Sapling mininodes are still connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(10, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(10, versions.count(SAPLING_PROTO_VERSION))

        # Extend the Overwinter chain with another block.
        self.nodes[0].generate(1)

        # Connect a new Overwinter mininode to the resistanced node, which is accepted.
        nodes.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest",
                     OVERWINTER_PROTO_VERSION))
        time.sleep(3)
        assert_equal(21, len(self.nodes[0].getpeerinfo()))

        # Connect a new Sapling mininode to the resistanced node, which is accepted.
        nodes.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest",
                     SAPLING_PROTO_VERSION))
        time.sleep(3)
        assert_equal(22, len(self.nodes[0].getpeerinfo()))

        # Try to connect a new Sprout mininode to the resistanced node, which is rejected.
        sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test,
                          "regtest", SPROUT_PROTO_VERSION)
        nodes.append(sprout)
        time.sleep(3)
        assert ("Version must be 170003 or greater"
                in str(sprout.rejectMessage))

        # Verify that only Overwinter and Sapling mininodes are connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(11, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(11, versions.count(SAPLING_PROTO_VERSION))

        # Sapling consensus rules activate at block height 15
        self.nodes[0].generate(4)
        assert_equal(15, self.nodes[0].getblockcount())
        print('Sapling active')

        # Mininodes send ping message to resistanced node.
        pingCounter = 1
        for node in nodes:
            node.send_message(msg_ping(pingCounter))
            pingCounter = pingCounter + 1

        time.sleep(3)

        # Verify Sprout and Overwinter mininodes have been dropped, while
        # Sapling mininodes are still connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(0, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(11, versions.count(SAPLING_PROTO_VERSION))

        # Extend the Sapling chain with another block.
        self.nodes[0].generate(1)

        # Connect a new Sapling mininode to the resistanced node, which is accepted.
        nodes.append(
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest",
                     SAPLING_PROTO_VERSION))
        time.sleep(3)
        assert_equal(12, len(self.nodes[0].getpeerinfo()))

        # Try to connect a new Sprout mininode to the resistanced node, which is rejected.
        sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test,
                          "regtest", SPROUT_PROTO_VERSION)
        nodes.append(sprout)
        time.sleep(3)
        assert ("Version must be 170006 or greater"
                in str(sprout.rejectMessage))

        # Try to connect a new Overwinter mininode to the resistanced node, which is rejected.
        sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test,
                          "regtest", OVERWINTER_PROTO_VERSION)
        nodes.append(sprout)
        time.sleep(3)
        assert ("Version must be 170006 or greater"
                in str(sprout.rejectMessage))

        # Verify that only Sapling mininodes are connected.
        peerinfo = self.nodes[0].getpeerinfo()
        versions = [x["version"] for x in peerinfo]
        assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
        assert_equal(0, versions.count(OVERWINTER_PROTO_VERSION))
        assert_equal(12, versions.count(SAPLING_PROTO_VERSION))

        for node in nodes:
            node.disconnect_node()
示例#26
0
    def run_test(self):
        # Setup the p2p connections and start up the network thread.
        inv_node = TestNode()
        test_node = TestNode()

        self.p2p_connections = [inv_node, test_node]

        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node),
            NodeConn('127.0.0.1',
                     p2p_port(0),
                     self.nodes[0],
                     test_node,
                     services=0)
        ]
        # Set nServices to 0 for test_node, so no block download will occur outside of
        # direct fetching
        inv_node.add_connection(connections[0])
        test_node.add_connection(connections[1])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        inv_node.wait_for_verack()
        test_node.wait_for_verack()

        # Ensure verack's have been processed by our peer
        inv_node.sync_with_ping()
        test_node.sync_with_ping()

        tip = int(self.nodes[0].getbestblockhash(), 16)

        # PART 1
        # 1. Mine a block; expect inv announcements each time
        self.log.info(
            "Part 1: headers don't start before sendheaders message...")
        block_time = 0
        for i in range(4):
            old_tip = tip
            tip = self.mine_blocks(1)
            assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
            assert_equal(test_node.check_last_announcement(inv=[tip]), True)
            # Try a few different responses; none should affect next announcement
            if i == 0:
                # first request the block
                test_node.get_data([tip])
                test_node.wait_for_block(tip)
            elif i == 1:
                # next try requesting header and block
                test_node.get_headers(locator=[old_tip], hashstop=tip)
                test_node.get_data([tip])
                test_node.wait_for_block(tip)
                test_node.clear_last_announcement(
                )  # since we requested headers...
            elif i == 2:
                # this time announce own block via headers
                height = self.nodes[0].getblockcount()
                last_time = self.nodes[0].getblock(
                    self.nodes[0].getbestblockhash())['time']
                block_time = last_time + 1
                new_block = create_block(tip, create_coinbase(height + 1),
                                         block_time)
                new_block.solve()
                test_node.send_header_for_blocks([new_block])
                test_node.wait_for_getdata([new_block.sha256])
                test_node.send_message(MsgBlock(new_block))
                test_node.sync_with_ping()  # make sure this block is processed
                inv_node.clear_last_announcement()
                test_node.clear_last_announcement()

        self.log.info("Part 1: success!")
        self.log.info(
            "Part 2: announce blocks with headers after sendheaders message..."
        )
        # PART 2
        # 2. Send a sendheaders message and test that headers announcements
        # commence and keep working.
        test_node.send_message(MsgSendHeaders())
        prev_tip = int(self.nodes[0].getbestblockhash(), 16)
        test_node.get_headers(locator=[prev_tip], hashstop=0)
        test_node.sync_with_ping()

        # Now that we've synced headers, headers announcements should work
        tip = self.mine_blocks(1)
        assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
        assert_equal(test_node.check_last_announcement(headers=[tip]), True)

        height = self.nodes[0].getblockcount() + 1
        block_time += 10  # Advance far enough ahead
        for i in range(10):
            # Mine i blocks, and alternate announcing either via
            # inv (of tip) or via headers. After each, new blocks
            # mined by the node should successfully be announced
            # with block header, even though the blocks are never requested
            for j in range(2):
                blocks = []
                for _ in range(i + 1):
                    blocks.append(
                        create_block(tip, create_coinbase(height), block_time))
                    blocks[-1].solve()
                    tip = blocks[-1].sha256
                    block_time += 1
                    height += 1
                if j == 0:
                    # Announce via inv
                    test_node.send_block_inv(tip)
                    test_node.wait_for_getheaders()
                    # Should have received a getheaders now
                    test_node.send_header_for_blocks(blocks)
                    # Test that duplicate inv's won't result in duplicate
                    # getdata requests, or duplicate headers announcements
                    [inv_node.send_block_inv(x.sha256) for x in blocks]
                    test_node.wait_for_getdata([x.sha256 for x in blocks])
                    inv_node.sync_with_ping()
                else:
                    # Announce via headers
                    test_node.send_header_for_blocks(blocks)
                    test_node.wait_for_getdata([x.sha256 for x in blocks])
                    # Test that duplicate headers won't result in duplicate
                    # getdata requests (the check is further down)
                    inv_node.send_header_for_blocks(blocks)
                    inv_node.sync_with_ping()
                [test_node.send_message(MsgBlock(x)) for x in blocks]
                test_node.sync_with_ping()
                inv_node.sync_with_ping()
                # This block should not be announced to the inv node (since it also
                # broadcast it)
                assert "inv" not in inv_node.last_message
                assert "headers" not in inv_node.last_message
                tip = self.mine_blocks(1)
                assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
                assert_equal(test_node.check_last_announcement(headers=[tip]),
                             True)
                height += 1
                block_time += 1

        self.log.info("Part 2: success!")

        self.log.info(
            "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..."
        )

        # PART 3.  Headers announcements can stop after large reorg, and resume after
        # getheaders or inv from peer.
        for j in range(2):
            # First try mining a reorg that can propagate with header announcement
            new_block_hashes = self.mine_reorg(length=7)
            tip = new_block_hashes[-1]
            assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
            assert_equal(
                test_node.check_last_announcement(headers=new_block_hashes),
                True)

            block_time += 8

            # Mine a too-large reorg, which should be announced with a single inv
            new_block_hashes = self.mine_reorg(length=8)
            tip = new_block_hashes[-1]
            assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
            assert_equal(test_node.check_last_announcement(inv=[tip]), True)

            block_time += 9

            fork_point = self.nodes[0].getblock(
                "%02x" % new_block_hashes[0])["previousblockhash"]
            fork_point = int(fork_point, 16)

            # Use getblocks/getdata
            test_node.send_getblocks(locator=[fork_point])
            assert_equal(
                test_node.check_last_announcement(inv=new_block_hashes), True)
            test_node.get_data(new_block_hashes)
            test_node.wait_for_block(new_block_hashes[-1])

            for i in range(3):
                # Mine another block, still should get only an inv
                tip = self.mine_blocks(1)
                assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
                assert_equal(test_node.check_last_announcement(inv=[tip]),
                             True)
                if i == 0:
                    # Just get the data -- shouldn't cause headers announcements to resume
                    test_node.get_data([tip])
                    test_node.wait_for_block(tip)
                elif i == 1:
                    # Send a getheaders message that shouldn't trigger headers announcements
                    # to resume (best header sent will be too old)
                    test_node.get_headers(locator=[fork_point],
                                          hashstop=new_block_hashes[1])
                    test_node.get_data([tip])
                    test_node.wait_for_block(tip)
                elif i == 2:
                    test_node.get_data([tip])
                    test_node.wait_for_block(tip)
                    # This time, try sending either a getheaders to trigger resumption
                    # of headers announcements, or mine a new block and inv it, also
                    # triggering resumption of headers announcements.
                    if j == 0:
                        test_node.get_headers(locator=[tip], hashstop=0)
                        test_node.sync_with_ping()
                    else:
                        test_node.send_block_inv(tip)
                        test_node.sync_with_ping()
            # New blocks should now be announced with header
            tip = self.mine_blocks(1)
            assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
            assert_equal(test_node.check_last_announcement(headers=[tip]),
                         True)

        self.log.info("Part 3: success!")

        self.log.info("Part 4: Testing direct fetch behavior...")
        tip = self.mine_blocks(1)
        height = self.nodes[0].getblockcount() + 1
        last_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time']
        block_time = last_time + 1

        # Create 2 blocks.  Send the blocks, then send the headers.
        blocks = []
        for _ in range(2):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1
            inv_node.send_message(MsgBlock(blocks[-1]))

        inv_node.sync_with_ping()  # Make sure blocks are processed
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks)
        test_node.sync_with_ping()
        # should not have received any getdata messages
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        # This time, direct fetch should work
        blocks = []
        for _ in range(3):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        test_node.send_header_for_blocks(blocks)
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks],
                                   timeout=int(direct_fetch_response_time))

        [test_node.send_message(MsgBlock(x)) for x in blocks]

        test_node.sync_with_ping()

        # Now announce a header that forks the last two blocks
        tip = blocks[0].sha256
        height -= 1
        blocks = []

        # Create extra blocks for later
        for _ in range(20):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        # Announcing one block on fork should not trigger direct fetch
        # (less work than tip)
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks[0:1])
        test_node.sync_with_ping()
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        # Announcing one more block on fork should trigger direct fetch for
        # both blocks (same work as tip)
        test_node.send_header_for_blocks(blocks[1:2])
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]],
                                   timeout=int(direct_fetch_response_time))

        # Announcing 16 more headers should trigger direct fetch for 14 more
        # blocks
        test_node.send_header_for_blocks(blocks[2:18])
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]],
                                   timeout=int(direct_fetch_response_time))

        # Announcing 1 more header should not trigger any response
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks[18:19])
        test_node.sync_with_ping()
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        self.log.info("Part 4: success!")

        # Now deliver all those blocks we announced.
        [test_node.send_message(MsgBlock(x)) for x in blocks]

        self.log.info("Part 5: Testing handling of unconnecting headers")
        # First we test that receipt of an unconnecting header doesn't prevent
        # chain sync.
        for i in range(10):
            test_node.last_message.pop("getdata", None)
            blocks = []
            # Create two more blocks.
            for j in range(2):
                blocks.append(
                    create_block(tip, create_coinbase(height), block_time))
                blocks[-1].solve()
                tip = blocks[-1].sha256
                block_time += 1
                height += 1
            # Send the header of the second block -> this won't connect.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[1]])
            test_node.wait_for_getheaders()
            test_node.send_header_for_blocks(blocks)
            test_node.wait_for_getdata([x.sha256 for x in blocks])
            [test_node.send_message(MsgBlock(x)) for x in blocks]
            test_node.sync_with_ping()
            assert_equal(int(self.nodes[0].getbestblockhash(), 16),
                         blocks[1].sha256)

        blocks = []
        # Now we test that if we repeatedly don't send connecting headers, we
        # don't go into an infinite loop trying to get them to connect.
        MAX_UNCONNECTING_HEADERS = 10
        for j in range(MAX_UNCONNECTING_HEADERS + 1):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        for i in range(1, MAX_UNCONNECTING_HEADERS):
            # Send a header that doesn't connect, check that we get a getheaders.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[i]])
            test_node.wait_for_getheaders()

        # Next header will connect, should re-set our count:
        test_node.send_header_for_blocks([blocks[0]])

        # Remove the first two entries (blocks[1] would connect):
        blocks = blocks[2:]

        # Now try to see how many unconnecting headers we can send
        # before we get disconnected.  Should be 5*MAX_UNCONNECTING_HEADERS
        for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
            # Send a header that doesn't connect, check that we get a getheaders.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[i % len(blocks)]])
            test_node.wait_for_getheaders()

        # Eventually this stops working.
        test_node.send_header_for_blocks([blocks[-1]])

        # Should get disconnected
        test_node.wait_for_disconnect()

        self.log.info("Part 5: success!")

        # Finally, check that the inv node never received a getdata request,
        # throughout the test
        assert "getdata" not in inv_node.last_message
示例#27
0
    def run_test(self):

        # Connect to node0
        node0 = BaseNode()
        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
        ]
        node0.add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread
        node0.wait_for_verack()

        # Build the blockchain
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        self.blocks = []

        # Get a pubkey for the coinbase TXO
        coinbase_key = ECKey()
        coinbase_key.generate()
        coinbase_pubkey = coinbase_key.get_pubkey().get_bytes()

        # Create the first block with a coinbase output to our key
        height = 1
        block = create_block(self.tip, create_coinbase(height,
                                                       coinbase_pubkey),
                             self.block_time)
        self.blocks.append(block)
        self.block_time += 1
        block.solve()
        # Save the coinbase for later
        self.block1 = block
        self.tip = block.x16r
        height += 1

        # Bury the block 100 deep so the coinbase output is spendable
        for i in range(100):
            block = create_block(self.tip, create_coinbase(height),
                                 self.block_time)
            block.solve()
            self.blocks.append(block)
            self.tip = block.x16r
            self.block_time += 1
            height += 1

        # Create a transaction spending the coinbase output with an invalid (null) signature
        tx = CTransaction()
        tx.vin.append(
            CTxIn(COutPoint(self.block1.vtx[0].x16r, 0), script_sig=b""))
        tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
        tx.calc_x16r()

        block102 = create_block(self.tip, create_coinbase(height),
                                self.block_time)
        self.block_time += 1
        block102.vtx.extend([tx])
        block102.hashMerkleRoot = block102.calc_merkle_root()
        block102.rehash()
        block102.solve()
        self.blocks.append(block102)
        self.tip = block102.x16r
        self.block_time += 1
        height += 1

        # Bury the assumed valid block 2100 deep
        for i in range(2100):
            block = create_block(self.tip, create_coinbase(height),
                                 self.block_time)
            block.nVersion = 4
            block.solve()
            self.blocks.append(block)
            self.tip = block.x16r
            self.block_time += 1
            height += 1

        # Start node1 and node2 with assumevalid so they accept a block with a bad signature.
        self.start_node(1, extra_args=["-assumevalid=" + hex(block102.x16r)])
        node1 = BaseNode()  # connects to node1
        connections.append(
            NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
        node1.add_connection(connections[1])
        node1.wait_for_verack()

        self.start_node(2, extra_args=["-assumevalid=" + hex(block102.x16r)])
        node2 = BaseNode()  # connects to node2
        connections.append(
            NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
        node2.add_connection(connections[2])
        node2.wait_for_verack()

        # send header lists to all three nodes
        node0.send_header_for_blocks(self.blocks[0:2000])
        node0.send_header_for_blocks(self.blocks[2000:])
        node1.send_header_for_blocks(self.blocks[0:2000])
        node1.send_header_for_blocks(self.blocks[2000:])
        node2.send_header_for_blocks(self.blocks[0:200])

        # Send blocks to node0. Block 102 will be rejected.
        self.send_blocks_until_disconnected(node0)
        self.assert_blockchain_height(self.nodes[0], 101)

        # Send all blocks to node1. All blocks will be accepted.
        for i in range(2202):
            node1.send_message(MsgBlock(self.blocks[i]))
        # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
        node1.sync_with_ping(120)
        assert_equal(
            self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'],
            2202)

        # Send blocks to node2. Block 102 will be rejected.
        self.send_blocks_until_disconnected(node2)
        self.assert_blockchain_height(self.nodes[2], 101)
示例#28
0
    def run_test(self):
        # Setup the p2p connection and start up the network thread.
        test_node = TestNode()

        connections = [
            NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
        ]
        test_node.add_connection(connections[0])

        NetworkThread().start()  # Start up network handling in another thread

        # Test logic begins here
        test_node.wait_for_verack()

        # 1. Have the node mine one period worth of blocks
        self.nodes[0].generate(VB_PERIOD)

        # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD
        # blocks signaling some unknown bit.
        n_version = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
        self.send_blocks_with_version(test_node, VB_THRESHOLD - 1, n_version)

        # Fill rest of period with regular version blocks
        self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1)
        # Check that we're not getting any versionbit-related errors in
        # get*info()
        assert (not VB_PATTERN.match(self.nodes[0].getinfo()["errors"]))
        assert (not VB_PATTERN.match(
            self.nodes[0].getmininginfo()["warnings"]))
        assert (not VB_PATTERN.match(
            self.nodes[0].getnetworkinfo()["warnings"]))

        # 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling
        # some unknown bit
        self.send_blocks_with_version(test_node, VB_THRESHOLD, n_version)
        self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD)
        # Might not get a versionbits-related alert yet, as we should
        # have gotten a different alert due to more than 51/100 blocks
        # being of unexpected version.
        # Check that get*info() shows some kind of error.
        self.log.info(self.nodes[0].getinfo()["errors"])
        #assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getinfo()["errors"])
        #assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["warnings"])
        #assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"])

        # Mine a period worth of expected blocks so the generic block-version warning
        # is cleared, and restart the node. This should move the versionbit state
        # to ACTIVE.
        self.nodes[0].generate(VB_PERIOD)
        self.stop_nodes()
        # Empty out the alert file
        with open(self.alert_filename, 'w', encoding='utf8') as _:
            pass
        self.start_nodes()

        # Connecting one block should be enough to generate an error.
        self.nodes[0].generate(1)
        assert (WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getinfo()["errors"])
        assert (WARN_UNKNOWN_RULES_ACTIVE
                in self.nodes[0].getmininginfo()["warnings"])
        assert (WARN_UNKNOWN_RULES_ACTIVE
                in self.nodes[0].getnetworkinfo()["warnings"])
        self.stop_nodes()
        self.test_versionbits_in_alert_file()

        # Test framework expects the node to still be running...
        self.start_nodes()
示例#29
0
    def test_BIP(self, bipName, activated_version, invalidate,
                 invalidatePostSignature, bitno):
        assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
        assert_equal(self.get_bip9_status(bipName)['since'], 0)

        # generate some coins for later
        self.coinbase_blocks = self.nodes[0].generate(2)
        self.height = 3  # height of the next block to build
        self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
        self.nodeaddress = self.nodes[0].getnewaddress()
        self.last_block_time = int(time.time())

        assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
        assert_equal(self.get_bip9_status(bipName)['since'], 0)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])
        assert (bipName not in tmpl['vbavailable'])
        assert_equal(tmpl['vbrequired'], 0)
        assert_equal(tmpl['version'], 0x20000000)

        # Test 1
        # Advance from DEFINED to STARTED
        test_blocks = self.generate_blocks(141, 4)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['status'], 'started')
        assert_equal(self.get_bip9_status(bipName)['since'], 144)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])
        assert_equal(tmpl['vbavailable'][bipName], bitno)
        assert_equal(tmpl['vbrequired'], 0)
        assert (tmpl['version'] & activated_version)

        # Test 2
        # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
        # using a variety of bits to simulate multiple parallel softforks
        test_blocks = self.generate_blocks(
            50, activated_version)  # 0x20000001 (signalling ready)
        test_blocks = self.generate_blocks(
            20, 4, test_blocks)  # 0x00000004 (signalling not)
        test_blocks = self.generate_blocks(
            50, activated_version,
            test_blocks)  # 0x20000101 (signalling ready)
        test_blocks = self.generate_blocks(
            24, 4, test_blocks)  # 0x20010000 (signalling not)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['status'], 'started')
        assert_equal(self.get_bip9_status(bipName)['since'], 144)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])
        assert_equal(tmpl['vbavailable'][bipName], bitno)
        assert_equal(tmpl['vbrequired'], 0)
        assert (tmpl['version'] & activated_version)

        # Test 3
        # 108 out of 144 signal bit 1 to achieve LOCKED_IN
        # using a variety of bits to simulate multiple parallel softforks
        test_blocks = self.generate_blocks(
            58, activated_version)  # 0x20000001 (signalling ready)
        test_blocks = self.generate_blocks(
            26, 4, test_blocks)  # 0x00000004 (signalling not)
        test_blocks = self.generate_blocks(
            50, activated_version,
            test_blocks)  # 0x20000101 (signalling ready)
        test_blocks = self.generate_blocks(
            10, 4, test_blocks)  # 0x20010000 (signalling not)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
        assert_equal(self.get_bip9_status(bipName)['since'], 432)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])

        # Test 4
        # 143 more version 536870913 blocks (waiting period-1)
        test_blocks = self.generate_blocks(143, 4)
        yield TestInstance(test_blocks, sync_every_block=False)

        assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
        assert_equal(self.get_bip9_status(bipName)['since'], 432)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName not in tmpl['rules'])

        # Test 5
        # Check that the new rule is enforced
        spendtx = self.create_transaction(self.nodes[0],
                                          self.coinbase_blocks[0],
                                          self.nodeaddress, 1.0)
        invalidate(spendtx)
        spendtx = self.sign_transaction(self.nodes[0], spendtx)
        spendtx.rehash()
        invalidatePostSignature(spendtx)
        spendtx.rehash()
        block = create_block(self.tip, create_coinbase(self.height),
                             self.last_block_time + 1)
        block.nVersion = activated_version
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.last_block_time += 1
        self.tip = block.sha256
        self.height += 1
        yield TestInstance([[block, True]])

        assert_equal(self.get_bip9_status(bipName)['status'], 'active')
        assert_equal(self.get_bip9_status(bipName)['since'], 576)
        tmpl = self.nodes[0].getblocktemplate({})
        assert (bipName in tmpl['rules'])
        assert (bipName not in tmpl['vbavailable'])
        assert_equal(tmpl['vbrequired'], 0)
        assert (not (tmpl['version'] & (1 << bitno)))

        # Test 6
        # Check that the new sequence lock rules are enforced
        spendtx = self.create_transaction(self.nodes[0],
                                          self.coinbase_blocks[1],
                                          self.nodeaddress, 1.0)
        invalidate(spendtx)
        spendtx = self.sign_transaction(self.nodes[0], spendtx)
        spendtx.rehash()
        invalidatePostSignature(spendtx)
        spendtx.rehash()

        block = create_block(self.tip, create_coinbase(self.height),
                             self.last_block_time + 1)
        block.nVersion = 5
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()
        self.last_block_time += 1
        yield TestInstance([[block, False]])

        # Restart all
        self.test.block_store.close()
        stop_nodes(self.nodes)
        shutil.rmtree(self.options.tmpdir)
        self.setup_chain()
        self.setup_network()
        self.test.block_store = BlockStore(self.options.tmpdir)
        self.test.clear_all_connections()
        self.test.add_all_connections(self.nodes)
        NetworkThread().start()  # Start up network handling in another thread
示例#30
0
    def run_test(self):
        node0 = NodeConnCB()
        connections = []
        connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
        node0.add_connection(connections[0])
        NetworkThread().start() # Start up network handling in another thread

        # wait_for_verack ensures that the P2P connection is fully up.
        node0.wait_for_verack()

        self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
        self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2)
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info("Test that a transaction with non-DER signature can still appear in a block")

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
                self.nodeaddress, 1.0)
        unDERify(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
        block.nVersion = 2
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(msg_block(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 3")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
        block.nVersion = 2
        block.rehash()
        block.solve()
        node0.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock)
        with mininode_lock:
            assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000002)')
            assert_equal(node0.last_message["reject"].data, block.sha256)
            del node0.last_message["reject"]

        self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
        block.nVersion = 3

        spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
                self.nodeaddress, 1.0)
        unDERify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # accepted to the mempool (which we can achieve with
        # -promiscuousmempoolflags).
        node0.send_and_ping(msg_tx(spendtx))
        assert spendtx.hash in self.nodes[0].getrawmempool()

        # Now we verify that a block with this transaction is invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock)
        with mininode_lock:
            # We can receive different reject messages depending on whether
            # ravend is running with multiple script check threads. If script
            # check threads are not in use, then transaction script validation
            # happens sequentially, and ravend produces more specific reject
            # reasons.
            assert node0.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
            assert_equal(node0.last_message["reject"].data, block.sha256)
            if node0.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(node0.last_message["reject"].reason, b'block-validation-failed')
            else:
                assert b'Non-canonical DER signature' in node0.last_message["reject"].reason

        self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
        block.vtx[1] = create_transaction(self.nodes[0],
                self.coinbase_blocks[1], self.nodeaddress, 1.0)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        node0.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)