예제 #1
0
    def send_txs_and_test(self, txs, rpc, success=True, reject_code=None, reject_reason=None):
        """Send txs to test node and test whether they're accepted to the mempool.

         - add all txs to our tx_store
         - send tx messages for all txs
         - if success is True: assert that the tx is accepted to the mempool
         - if success is False: assert that the tx is not accepted to the mempool
         - if reject_code and reject_reason are set: assert that the correct reject message is received."""

        with mininode_lock:
            self.reject_code_received = None
            self.reject_reason_received = None

            for tx in txs:
                self.tx_store[tx.sha256] = tx

        for tx in txs:
            self.send_message(msg_tx(tx))

        self.sync_with_ping()

        raw_mempool = rpc.getrawmempool()
        if success:
            # Check that all txs are now in the mempool
            for tx in txs:
                assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
        else:
            # Check that none of the txs are now in the mempool
            for tx in txs:
                assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)

        if reject_code is not None:
            wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
        if reject_reason is not None:
            wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
예제 #2
0
    def check_last_announcement(self, headers=None, inv=None):
        """Test whether the last announcement received had the right header or the right inv.

        inv and headers should be lists of block hashes."""

        test_function = lambda: self.block_announced
        wait_until(test_function, timeout=60, lock=mininode_lock)

        with mininode_lock:
            self.block_announced = False

            compare_inv = []
            if "inv" in self.last_message:
                compare_inv = [x.hash for x in self.last_message["inv"].inv]
            if inv is not None:
                assert_equal(compare_inv, inv)

            compare_headers = []
            if "headers" in self.last_message:
                compare_headers = [x.sha256 for x in self.last_message["headers"].headers]
            if headers is not None:
                assert_equal(compare_headers, headers)

            self.last_message.pop("inv", None)
            self.last_message.pop("headers", None)
    def run_test(self):
        node = self.nodes[0]  # alias

        node.add_p2p_connection(P2PStoreTxInvs())

        self.log.info("Create a new transaction and wait until it's broadcast")
        txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16)

        # Can take a few seconds due to transaction trickling
        wait_until(lambda: node.p2p.tx_invs_received[txid] >= 1, lock=mininode_lock)

        # Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown)
        node.add_p2p_connection(P2PStoreTxInvs())

        self.log.info("Create a block")
        # Create and submit a block without the transaction.
        # Transactions are only rebroadcast if there has been a block at least five minutes
        # after the last time we tried to broadcast. Use mocktime and give an extra minute to be sure.
        block_time = int(time.time()) + 6 * 60
        node.setmocktime(block_time)
        block = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockchaininfo()['blocks']), block_time)
        block.nVersion = 3
        block.rehash()
        block.solve()
        node.submitblock(ToHex(block))

        # Transaction should not be rebroadcast
        node.p2ps[1].sync_with_ping()
        assert_equal(node.p2ps[1].tx_invs_received[txid], 0)

        self.log.info("Transaction should be rebroadcast after 30 minutes")
        # Use mocktime and give an extra 5 minutes to be sure.
        rebroadcast_time = int(time.time()) + 41 * 60
        node.setmocktime(rebroadcast_time)
        wait_until(lambda: node.p2ps[1].tx_invs_received[txid] >= 1, lock=mininode_lock)
예제 #4
0
    def send_await_disconnect(self, message, timeout=30):
        """Sends a message to the node and wait for disconnect.

        This is used when we want to send a message into the node that we expect
        will get us disconnected, eg an invalid block."""
        self.send_message(message)
        wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
예제 #5
0
파일: mininode.py 프로젝트: dgenr8/bitcoin
    def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
        """Send blocks to test node and test whether the tip advances.

         - add all blocks to our block_store
         - send a headers message for the final block
         - the on_getheaders handler will ensure that any getheaders are responded to
         - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
           ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
         - if success is True: assert that the node's tip advances to the most recent block
         - if success is False: assert that the node's tip doesn't advance
         - if reject_reason is set: assert that the correct reject message is logged"""

        with mininode_lock:
            for block in blocks:
                self.block_store[block.sha256] = block
                self.last_block_hash = block.sha256

        reject_reason = [reject_reason] if reject_reason else []
        with node.assert_debug_log(expected_msgs=reject_reason):
            if force_send:
                for b in blocks:
                    self.send_message(msg_block(block=b))
            else:
                self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
                wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)

            if expect_disconnect:
                self.wait_for_disconnect(timeout=timeout)
            else:
                self.sync_with_ping(timeout=timeout)

            if success:
                wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
            else:
                assert node.getbestblockhash() != blocks[-1].hash
예제 #6
0
    def wait_for_tx(self, txid, timeout=60):
        def test_function():
            if not self.last_message.get('tx'):
                return False
            return self.last_message['tx'].tx.rehash() == txid

        wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #7
0
    def _test_getnettotals(self):
        # getnettotals totalbytesrecv and totalbytessent should be
        # consistent with getpeerinfo. Since the RPC calls are not atomic,
        # and messages might have been recvd or sent between RPC calls, call
        # getnettotals before and after and verify that the returned values
        # from getpeerinfo are bounded by those values.
        net_totals_before = self.nodes[0].getnettotals()
        peer_info = self.nodes[0].getpeerinfo()
        net_totals_after = self.nodes[0].getnettotals()
        assert_equal(len(peer_info), 2)
        peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
        peers_sent = sum([peer['bytessent'] for peer in peer_info])

        assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
        assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
        assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
        assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)

        # test getnettotals and getpeerinfo by doing a ping
        # the bytes sent/received should change
        # note ping and pong are 32 bytes each
        self.nodes[0].ping()
        wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
        wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)

        peer_info_after_ping = self.nodes[0].getpeerinfo()
        for before, after in zip(peer_info, peer_info_after_ping):
            assert_greater_than_or_equal(after['bytesrecv_per_msg']['pong'], before['bytesrecv_per_msg']['pong'] + 32)
            assert_greater_than_or_equal(after['bytessent_per_msg']['ping'], before['bytessent_per_msg']['ping'] + 32)
예제 #8
0
파일: mininode.py 프로젝트: dgenr8/bitcoin
    def wait_for_header(self, blockhash, timeout=60):
        def test_function():
            last_headers = self.last_message.get('headers')
            if not last_headers:
                return False
            return last_headers.headers[0].rehash() == blockhash

        wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #9
0
파일: mininode.py 프로젝트: dgenr8/bitcoin
 def wait_for_inv(self, expected_inv, timeout=60):
     """Waits for an INV message and checks that the first inv object in the message was as expected."""
     if len(expected_inv) > 1:
         raise NotImplementedError("wait_for_inv() will only verify the first inv object")
     test_function = lambda: self.last_message.get("inv") and \
                             self.last_message["inv"].inv[0].type == expected_inv[0].type and \
                             self.last_message["inv"].inv[0].hash == expected_inv[0].hash
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #10
0
 def reindex(self, justchainstate=False):
     self.nodes[0].generatetoaddress(3, self.nodes[0].get_deterministic_priv_key().address)
     blockcount = self.nodes[0].getblockcount()
     self.stop_nodes()
     extra_args = [["-reindex-chainstate" if justchainstate else "-reindex"]]
     self.start_nodes(extra_args)
     wait_until(lambda: self.nodes[0].getblockcount() == blockcount)
     self.log.info("Success")
예제 #11
0
 def reindex(self, justchainstate=False):
     self.nodes[0].generate(3)
     blockcount = self.nodes[0].getblockcount()
     self.stop_nodes()
     extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
     self.start_nodes(extra_args)
     wait_until(lambda: self.nodes[0].getblockcount() == blockcount)
     self.log.info("Success")
예제 #12
0
파일: mininode.py 프로젝트: dgenr8/bitcoin
    def wait_for_getheaders(self, timeout=60):
        """Waits for a getheaders message.

        Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
        value must be explicitly cleared before calling this method, or this will return
        immediately with success. TODO: change this method to take a hash value and only
        return true if the correct block header has been requested."""
        test_function = lambda: self.last_message.get("getheaders")
        wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #13
0
 def check_last_headers_announcement(self, headers):
     """Test whether the last headers announcements received are right.
        Headers may be announced across more than one message."""
     test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
     wait_until(test_function, timeout=60, lock=mininode_lock)
     with mininode_lock:
         assert_equal(self.recent_headers_announced, headers)
         self.block_announced = False
         self.last_message.pop("headers", None)
         self.recent_headers_announced = []
예제 #14
0
 def reindex(self):
     self.nodes[0].generate(3)
     blockcount = self.nodes[0].getblockcount()
     self.stop_nodes()
     time.sleep(5)
     extra_args = [["-reindex", "-checkblockindex=1"]]
     self.start_nodes(extra_args)
     time.sleep(15)
     wait_until(lambda: self.nodes[0].getblockcount() == blockcount)
     self.log.info("Success")
예제 #15
0
    def test_incorrect_blocktxn_response(self, node, test_node, version):
        if (len(self.utxos) == 0):
            self.make_utxos()
        utxo = self.utxos.pop(0)

        block = self.build_block_with_transactions(node, utxo, 10)
        self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
        # Relay the first 5 transactions from the block in advance
        for tx in block.vtx[1:6]:
            test_node.send_message(msg_tx(tx))
        test_node.sync_with_ping()
        # Make sure all transactions were accepted.
        mempool = node.getrawmempool()
        for tx in block.vtx[1:6]:
            assert(tx.hash in mempool)

        # Send compact block
        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
        test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
        absolute_indexes = []
        with mininode_lock:
            assert("getblocktxn" in test_node.last_message)
            absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
        assert_equal(absolute_indexes, [6, 7, 8, 9, 10])

        # Now give an incorrect response.
        # Note that it's possible for bitcoind to be smart enough to know we're
        # lying, since it could check to see if the shortid matches what we're
        # sending, and eg disconnect us for misbehavior.  If that behavior
        # change was made, we could just modify this test by having a
        # different peer provide the block further down, so that we're still
        # verifying that the block isn't marked bad permanently. This is good
        # enough for now.
        msg = msg_blocktxn()
        if version==2:
            msg = msg_witness_blocktxn()
        msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
        test_node.send_and_ping(msg)

        # Tip should not have updated
        assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)

        # We should receive a getdata request
        wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
        assert_equal(len(test_node.last_message["getdata"].inv), 1)
        assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
        assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)

        # Deliver the block
        if version==2:
            test_node.send_and_ping(msg_witness_block(block))
        else:
            test_node.send_and_ping(msg_block(block))
        assert_equal(int(node.getbestblockhash(), 16), block.sha256)
예제 #16
0
    def _test_getnetworkinginfo(self):
        assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
        assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)

        self.nodes[0].setnetworkactive(False)
        assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
        # Wait a bit for all sockets to close
        wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)

        self.nodes[0].setnetworkactive(True)
        connect_nodes_bi(self.nodes, 0, 1)
        assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
        assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
    def check_last_inv_announcement(self, inv):
        """Test whether the last announcement received had the right inv.
        inv should be a list of block hashes."""

        test_function = lambda: self.block_announced
        wait_until(test_function, timeout=60, lock=mininode_lock)
        with mininode_lock:
            compare_inv = []
            if "inv" in self.last_message:
                compare_inv = [x.hash for x in self.last_message["inv"].inv]
            assert_equal(compare_inv, inv)
            self.block_announced = False
            self.last_message.pop("inv", None)
예제 #18
0
    def send_blocks_and_test(self, blocks, rpc, success=True, request_block=True, reject_code=None, reject_reason=None, timeout=60):
        """Send blocks to test node and test whether the tip advances.

         - add all blocks to our block_store
         - send a headers message for the final block
         - the on_getheaders handler will ensure that any getheaders are responded to
         - if request_block is True: wait for getdata for each of the blocks. The on_getdata handler will
           ensure that any getdata messages are responded to
         - if success is True: assert that the node's tip advances to the most recent block
         - if success is False: assert that the node's tip doesn't advance
         - if reject_code and reject_reason are set: assert that the correct reject message is received"""

        with mininode_lock:
            self.reject_code_received = None
            self.reject_reason_received = None

            for block in blocks:
                self.block_store[block.sha256] = block
                self.last_block_hash = block.sha256

        self.send_message(msg_headers([CBlockHeader(blocks[-1])]))

        if request_block:
            wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)

        if success:
            wait_until(lambda: rpc.getbestblockhash() == blocks[-1].hash, timeout=timeout)
        else:
            assert rpc.getbestblockhash() != blocks[-1].hash

        if reject_code is not None:
            wait_until(lambda: self.reject_code_received == reject_code, lock=mininode_lock)
        if reject_reason is not None:
            wait_until(lambda: self.reject_reason_received == reject_reason, lock=mininode_lock)
예제 #19
0
    def run_test(self):
        no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False, wait_for_verack=False)
        no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False, wait_for_verack=False)
        no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle())

        wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock)
        wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock)
        wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock)

        # Mine a block and make sure that it's not sent to the connected nodes
        self.nodes[0].generate(1)

        #Give the node enough time to possibly leak out a message
        time.sleep(5)

        #This node should have been banned
        assert not no_version_bannode.is_connected

        self.nodes[0].disconnect_p2ps()

        # Wait until all connections are closed
        wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)

        # Make sure no unexpected messages came in
        assert(no_version_bannode.unexpected_msg == False)
        assert(no_version_idlenode.unexpected_msg == False)
        assert(no_verack_idlenode.unexpected_msg == False)
예제 #20
0
    def run_test(self):
        self.log.info("Mining blocks...")
        self.nodes[0].generate(1)
        self.sync_all()
        self.nodes[1].generate(100)
        self.sync_all()

        # This transaction will be confirmed
        txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)

        self.nodes[0].generate(1)
        self.sync_all()

        # This transaction will not be confirmed
        txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)

        # Confirmed and unconfirmed transactions are now in the wallet.
        assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
        assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)

        # Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
        self.stop_node(0)
        self.start_node(0)

        assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
        assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)

        # Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
        # transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
        self.stop_node(0)
        self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])

        wait_until(lambda: self.nodes[0].getmempoolinfo()[
                   'size'] == 1, timeout=3)

        assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
        assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)

        # Stop node0 and restart with zapwallettxes, but not persistmempool.
        # The unconfirmed transaction is zapped and is no longer in the wallet.
        self.stop_node(0)
        self.start_node(0, ["-zapwallettxes=2"])

        # tx1 is still be available because it was confirmed
        assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)

        # This will raise an exception because the unconfirmed transaction has been zapped
        assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id',
                                self.nodes[0].gettransaction, txid2)
예제 #21
0
    def test_height_min(self):
        assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
        self.log.info("Success")
        self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
        self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
        # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
        mine_large_blocks(self.nodes[0], 25)

        # Wait for blk00000.dat to be pruned
        wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)

        self.log.info("Success")
        usage = calc_usage(self.prunedir)
        self.log.info("Usage should be below target: %d" % usage)
        assert_greater_than(550, usage)
예제 #22
0
    def test_getblocktxn_handler(self, test_node):
        version = test_node.cmpct_version
        node = self.nodes[0]
        # bitcoind will not send blocktxn responses for blocks whose height is
        # more than 10 blocks deep.
        MAX_GETBLOCKTXN_DEPTH = 10
        chain_height = node.getblockcount()
        current_height = chain_height
        while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
            block_hash = node.getblockhash(current_height)
            block = FromHex(CBlock(), node.getblock(block_hash, False))

            msg = msg_getblocktxn()
            msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
            num_to_request = random.randint(1, len(block.vtx))
            msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
            test_node.send_message(msg)
            wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)

            [tx.calc_sha256() for tx in block.vtx]
            with mininode_lock:
                assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
                all_indices = msg.block_txn_request.to_absolute()
                for index in all_indices:
                    tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
                    tx.calc_sha256()
                    assert_equal(tx.sha256, block.vtx[index].sha256)
                    if version == 1:
                        # Witnesses should have been stripped
                        assert tx.wit.is_null()
                    else:
                        # Check that the witness matches
                        assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
                test_node.last_message.pop("blocktxn", None)
            current_height -= 1

        # Next request should send a full block response, as we're past the
        # allowed depth for a blocktxn response.
        block_hash = node.getblockhash(current_height)
        msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
        with mininode_lock:
            test_node.last_message.pop("block", None)
            test_node.last_message.pop("blocktxn", None)
        test_node.send_and_ping(msg)
        with mininode_lock:
            test_node.last_message["block"].block.calc_sha256()
            assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
            assert "blocktxn" not in test_node.last_message
예제 #23
0
    def test_compactblock_requests(self, test_node, segwit=True):
        version = test_node.cmpct_version
        node = self.nodes[0]
        # Try announcing a block with an inv or header, expect a compactblock
        # request
        for announce in ["inv", "header"]:
            block = self.build_block_on_tip(node, segwit=segwit)
            with mininode_lock:
                test_node.last_message.pop("getdata", None)

            if announce == "inv":
                test_node.send_message(msg_inv([CInv(2, block.sha256)]))
                wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
                test_node.send_header_for_blocks([block])
            else:
                test_node.send_header_for_blocks([block])
            wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
            assert_equal(len(test_node.last_message["getdata"].inv), 1)
            assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
            assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)

            # Send back a compactblock message that omits the coinbase
            comp_block = HeaderAndShortIDs()
            comp_block.header = CBlockHeader(block)
            comp_block.nonce = 0
            [k0, k1] = comp_block.get_siphash_keys()
            coinbase_hash = block.vtx[0].sha256
            if version == 2:
                coinbase_hash = block.vtx[0].calc_sha256(True)
            comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]
            test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
            assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
            # Expect a getblocktxn message.
            with mininode_lock:
                assert "getblocktxn" in test_node.last_message
                absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
            assert_equal(absolute_indexes, [0])  # should be a coinbase request

            # Send the coinbase, and verify that the tip advances.
            if version == 2:
                msg = msg_witness_blocktxn()
            else:
                msg = msg_blocktxn()
            msg.block_transactions.blockhash = block.sha256
            msg.block_transactions.transactions = [block.vtx[0]]
            test_node.send_and_ping(msg)
            assert_equal(int(node.getbestblockhash(), 16), block.sha256)
    def run_test(self):
        # Handy alias
        node = self.nodes[0]
        node.add_p2p_connection(P2PInterface())
        network_thread_start()
        node.p2p.wait_for_verack()

        # Mine one period worth of blocks
        node.generate(VB_PERIOD)

        self.log.info("Check that there is no warning if previous VB_BLOCKS have <VB_THRESHOLD blocks with unknown versionbits version.")
        # Build one period of blocks with < VB_THRESHOLD blocks signaling some unknown bit
        self.send_blocks_with_version(node.p2p, VB_THRESHOLD - 1, VB_UNKNOWN_VERSION)
        node.generate(VB_PERIOD - VB_THRESHOLD + 1)

        # Check that we're not getting any versionbit-related errors in get*info()
        assert(not VB_PATTERN.match(node.getmininginfo()["warnings"]))
        assert(not VB_PATTERN.match(node.getnetworkinfo()["warnings"]))

        self.log.info("Check that there is a warning if >50 blocks in the last 100 were an unknown version")
        # Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
        self.send_blocks_with_version(node.p2p, VB_THRESHOLD, VB_UNKNOWN_VERSION)
        node.generate(VB_PERIOD - VB_THRESHOLD)

        # Check that get*info() shows the 51/100 unknown block version error.
        assert(WARN_UNKNOWN_RULES_MINED in node.getmininginfo()["warnings"])
        assert(WARN_UNKNOWN_RULES_MINED in node.getnetworkinfo()["warnings"])

        self.log.info("Check that there is a warning if previous VB_BLOCKS have >=VB_THRESHOLD blocks with unknown versionbits version.")
        # Mine a period worth of expected blocks so the generic block-version warning
        # is cleared. This will move the versionbit state to ACTIVE.
        node.generate(VB_PERIOD)

        # Stop-start the node. This is required because groincoind will only warn once about unknown versions or unknown rules activating.
        self.restart_node(0)

        # Generating one block guarantees that we'll get out of IBD
        node.generate(1)
        wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'], timeout=10, lock=mininode_lock)
        # Generating one more block will be enough to generate an error.
        node.generate(1)
        # Check that get*info() shows the versionbits unknown rules warning
        assert(WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"])
        assert(WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"])
        # Check that the alert file shows the versionbits unknown rules warning
        wait_until(lambda: self.versionbits_in_alert_file(), timeout=60)
예제 #25
0
    def test_compactblocks_not_at_tip(self, test_node):
        node = self.nodes[0]
        # Test that requesting old compactblocks doesn't work.
        MAX_CMPCTBLOCK_DEPTH = 5
        new_blocks = []
        for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
            test_node.clear_block_announcement()
            new_blocks.append(node.generate(1)[0])
            wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)

        test_node.clear_block_announcement()
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)

        test_node.clear_block_announcement()
        node.generate(1)
        wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
        test_node.clear_block_announcement()
        with mininode_lock:
            test_node.last_message.pop("block", None)
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
        with mininode_lock:
            test_node.last_message["block"].block.calc_sha256()
            assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))

        # Generate an old compactblock, and verify that it's not accepted.
        cur_height = node.getblockcount()
        hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
        block = self.build_block_on_tip(node)
        block.hashPrevBlock = hashPrevBlock
        block.solve()

        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(block)
        test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))

        tips = node.getchaintips()
        found = False
        for x in tips:
            if x["hash"] == block.hash:
                assert_equal(x["status"], "headers-only")
                found = True
                break
        assert found

        # Requesting this block via getblocktxn should silently fail
        # (to avoid fingerprinting attacks).
        msg = msg_getblocktxn()
        msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
        with mininode_lock:
            test_node.last_message.pop("blocktxn", None)
        test_node.send_and_ping(msg)
        with mininode_lock:
            assert "blocktxn" not in test_node.last_message
    def test_height_min(self):
        if not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")):
            raise AssertionError("blk00000.dat is missing, pruning too early")
        self.log.info("Success")
        self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
        self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
        # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
        for i in range(25):
            mine_large_block(self.nodes[0], self.utxo_cache_0)

        # Wait for blk00000.dat to be pruned
        wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)

        self.log.info("Success")
        usage = calc_usage(self.prunedir)
        self.log.info("Usage should be below target: %d" % usage)
        if (usage > 550):
            raise AssertionError("Pruning target not being met")
예제 #27
0
    def test_end_to_end_block_relay(self, node, listeners):
        utxo = self.utxos.pop(0)

        block = self.build_block_with_transactions(node, utxo, 10)

        [l.clear_block_announcement() for l in listeners]

        # ToHex() won't serialize with witness, but this block has no witnesses
        # anyway. TODO: repeat this test with witness tx's to a segwit node.
        node.submitblock(ToHex(block))

        for l in listeners:
            wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
        with mininode_lock:
            for l in listeners:
                assert "cmpctblock" in l.last_message
                l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
                assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
    def run_test(self):
        self.log.info("test -blocknotify")
        block_count = 10
        blocks = self.nodes[1].generate(block_count)

        # wait at most 10 seconds for expected file size before reading the content
        wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)

        # file content should equal the generated blocks hashes
        with open(self.block_filename, 'r', encoding="utf-8") as f:
            assert_equal(sorted(blocks), sorted(l.strip() for l in f.read().splitlines()))

        self.log.info("test -walletnotify")
        # wait at most 10 seconds for expected file size before reading the content
        wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)

        # file content should equal the generated transaction hashes
        txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
        with open(self.tx_filename, 'r', encoding="ascii") as f:
            assert_equal(sorted(txids_rpc), sorted(l.strip() for l in f.read().splitlines()))
        os.remove(self.tx_filename)

        self.log.info("test -walletnotify after rescan")
        # restart node to rescan to force wallet notifications
        self.restart_node(1)
        connect_nodes_bi(self.nodes, 0, 1)

        wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)

        # file content should equal the generated transaction hashes
        txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
        with open(self.tx_filename, 'r', encoding="ascii") as f:
            assert_equal(sorted(txids_rpc), sorted(l.strip() for l in f.read().splitlines()))

        # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
        self.log.info("test -alertnotify")
        self.nodes[1].generate(41)
        self.sync_all()

        # Give bitcoind 10 seconds to write the alert notification
        wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10)

        with open(self.alert_filename, 'r', encoding='utf8') as f:
            alert_text = f.read()

        # Mine more up-version blocks, should not get more alerts:
        self.nodes[1].generate(2)
        self.sync_all()

        with open(self.alert_filename, 'r', encoding='utf8') as f:
            alert_text2 = f.read()

        self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
        assert_equal(alert_text, alert_text2)
예제 #29
0
    def reorg_back(self):
        # Verify that a block on the old main chain fork has been pruned away
        assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
        with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
            self.nodes[2].verifychain(checklevel=4, nblocks=0)
        self.log.info("Will need to redownload block %d" % self.forkheight)

        # Verify that we have enough history to reorg back to the fork point
        # Although this is more than 288 blocks, because this chain was written more recently
        # and only its other 299 small and 220 large blocks are in the block files after it,
        # it is expected to still be retained
        self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))

        first_reorg_height = self.nodes[2].getblockcount()
        curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
        self.nodes[2].invalidateblock(curchainhash)
        goalbestheight = self.mainchainheight
        goalbesthash = self.mainchainhash2

        # As of 0.10 the current block download logic is not able to reorg to the original chain created in
        # create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to
        # redownload its missing blocks.
        # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
        # because it has all the block data.
        # However it must mine enough blocks to have a more work chain than the reorg_test chain in order
        # to trigger node 2's block download logic.
        # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
        if self.nodes[2].getblockcount() < self.mainchainheight:
            blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
            self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
            self.nodes[0].invalidateblock(curchainhash)
            assert(self.nodes[0].getblockcount() == self.mainchainheight)
            assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
            goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
            goalbestheight = first_reorg_height + 1

        self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
        # Wait for Node 2 to reorg to proper height
        wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
        assert(self.nodes[2].getbestblockhash() == goalbesthash)
        # Verify we can now have the data for a block previously pruned
        assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
예제 #30
0
    def run_test(self):
        self.log.info("test -blocknotify")
        block_count = 10
        blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE)

        # wait at most 10 seconds for expected number of files before reading the content
        wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)

        # directory content should equal the generated blocks hashes
        assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir)))

        if self.is_wallet_compiled():
            self.log.info("test -walletnotify")
            # wait at most 10 seconds for expected number of files before reading the content
            wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)

            # directory content should equal the generated transaction hashes
            txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
            assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
            self.stop_node(1)
            for tx_file in os.listdir(self.walletnotify_dir):
                os.remove(os.path.join(self.walletnotify_dir, tx_file))

            self.log.info("test -walletnotify after rescan")
            # restart node to rescan to force wallet notifications
            self.start_node(1)
            connect_nodes_bi(self.nodes, 0, 1)

            wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)

            # directory content should equal the generated transaction hashes
            txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
            assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))

        # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
        self.log.info("test -alertnotify")
        self.nodes[1].generatetoaddress(41, ADDRESS_BCRT1_UNSPENDABLE)
        self.sync_all()

        # Give bitcoind 10 seconds to write the alert notification
        wait_until(lambda: len(os.listdir(self.alertnotify_dir)), timeout=10)

        for notify_file in os.listdir(self.alertnotify_dir):
            os.remove(os.path.join(self.alertnotify_dir, notify_file))

        # Mine more up-version blocks, should not get more alerts:
        self.nodes[1].generatetoaddress(2, ADDRESS_BCRT1_UNSPENDABLE)
        self.sync_all()

        self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
        assert_equal(len(os.listdir(self.alertnotify_dir)), 0)
예제 #31
0
 def wait_for_disconnect(self, timeout=60):
     test_function = lambda: not self.is_connected
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #32
0
    def run_test(self):
        self.nodes[0].add_p2p_connection(P2PInterface())

        self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
        self.coinbase_txids = [
            self.nodes[0].getblock(b)['tx'][0]
            for b in self.nodes[0].generate(CLTV_HEIGHT - 2)
        ]
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that an invalid-according-to-CLTV transaction can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0],
                                     self.coinbase_txids[0],
                                     self.nodeaddress,
                                     amount=1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1),
                             block_time)
        block.nVersion = 3
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 4")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
        block.nVersion = 3
        block.solve()
        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(),
                   lock=mininode_lock)
        with mininode_lock:
            assert_equal(self.nodes[0].p2p.last_message["reject"].code,
                         REJECT_OBSOLETE)
            assert_equal(self.nodes[0].p2p.last_message["reject"].reason,
                         b'bad-version(0x00000003)')
            assert_equal(self.nodes[0].p2p.last_message["reject"].data,
                         block.sha256)
            del self.nodes[0].p2p.last_message["reject"]

        self.log.info(
            "Test that invalid-according-to-cltv transactions cannot appear in a block"
        )
        block.nVersion = 4

        spendtx = create_transaction(self.nodes[0],
                                     self.coinbase_txids[1],
                                     self.nodeaddress,
                                     amount=1.0)
        cltv_invalidate(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for CLTV by getting it
        # rejected from the mempool for exactly that reason.
        assert_equal([{
            'txid':
            spendtx.hash,
            'allowed':
            False,
            'reject-reason':
            '64: non-mandatory-script-verify-flag (Negative locktime)'
        }],
                     self.nodes[0].testmempoolaccept(
                         rawtxs=[bytes_to_hex_str(spendtx.serialize())],
                         allowhighfees=True))

        # Now we verify that a block with this transaction is also invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(),
                   lock=mininode_lock)
        with mininode_lock:
            assert self.nodes[0].p2p.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(self.nodes[0].p2p.last_message["reject"].data,
                         block.sha256)
            if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(self.nodes[0].p2p.last_message["reject"].reason,
                             b'block-validation-failed')
            else:
                assert b'Negative locktime' in self.nodes[0].p2p.last_message[
                    "reject"].reason

        self.log.info(
            "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted"
        )
        spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
        spendtx.rehash()

        block.vtx.pop(1)
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
예제 #33
0
 def getBlock(self, blkHash):
     self.block = None
     inv = CInv(t=2, h=int(blkHash, 16))
     self.send_message(msg_getdata(inv=[inv]))
     wait_until(lambda: self.block is not None)
     return self.block
    def run_test(self):
        node = self.nodes[0]

        self.log.info('Start with empty mempool, and 200 blocks')
        self.mempool_size = 0
        wait_until(lambda: node.getblockcount() == 200)
        assert_equal(node.getmempoolinfo()['size'], self.mempool_size)

        self.log.info('Should not accept garbage to testmempoolaccept')
        assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
        assert_raises_rpc_error(-8, 'Array must contain exactly one raw transaction for now', lambda: node.testmempoolaccept(rawtxs=['ff00baar', 'ff22']))
        assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))

        self.log.info('A transaction already in the blockchain')
        coin = node.listunspent()[0]  # Pick a random coin(base) to spend
        raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
            inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
            outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
        ))['hex']
        txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, allowhighfees=True)
        node.generate(1)
        self.check_mempool_result(
            result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': '18: txn-already-known'}],
            rawtxs=[raw_tx_in_block],
        )

        self.log.info('A transaction not in the mempool')
        fee = 0.00000700
        raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
            inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}],  # RBF is used later
            outputs=[{node.getnewaddress(): 0.3 - fee}],
        ))['hex']
        tx = CTransaction()
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
        txid_0 = tx.rehash()
        self.check_mempool_result(
            result_expected=[{'txid': txid_0, 'allowed': True}],
            rawtxs=[raw_tx_0],
        )

        self.log.info('A transaction in the mempool')
        node.sendrawtransaction(hexstring=raw_tx_0)
        self.mempool_size = 1
        self.check_mempool_result(
            result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': '18: txn-already-in-mempool'}],
            rawtxs=[raw_tx_0],
        )

        self.log.info('A transaction that replaces a mempool transaction')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
        tx.vout[0].nValue -= int(fee * COIN)  # Double the fee
        tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1  # Now, opt out of RBF
        raw_tx_0 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
        txid_0 = tx.rehash()
        self.check_mempool_result(
            result_expected=[{'txid': txid_0, 'allowed': True}],
            rawtxs=[raw_tx_0],
        )

        self.log.info('A transaction that conflicts with an unconfirmed tx')
        # Send the transaction that replaces the mempool transaction and opts out of replaceability
        node.sendrawtransaction(hexstring=bytes_to_hex_str(tx.serialize()), allowhighfees=True)
        # take original raw_tx_0
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
        tx.vout[0].nValue -= int(4 * fee * COIN)  # Set more fee
        # skip re-signing the tx
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '18: txn-mempool-conflict'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
            allowhighfees=True,
        )

        self.log.info('A transaction with missing inputs, that never existed')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
        tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
        # skip re-signing the tx
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A transaction with missing inputs, that existed once in the past')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
        tx.vin[0].prevout.n = 1  # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
        raw_tx_1 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
        txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, allowhighfees=True)
        # Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
        raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
            inputs=[
                {'txid': txid_0, 'vout': 0},
                {'txid': txid_1, 'vout': 0},
            ],
            outputs=[{node.getnewaddress(): 0.1}]
        ))['hex']
        txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, allowhighfees=True)
        node.generate(1)
        self.mempool_size = 0
        # Now see if we can add the coins back to the utxo set by sending the exact txs again
        self.check_mempool_result(
            result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
            rawtxs=[raw_tx_0],
        )
        self.check_mempool_result(
            result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
            rawtxs=[raw_tx_1],
        )

        self.log.info('Create a signed "reference" tx for later use')
        raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
            inputs=[{'txid': txid_spend_both, 'vout': 0}],
            outputs=[{node.getnewaddress(): 0.05}],
        ))['hex']
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        # Reference tx should be valid on itself
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': True}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A transaction with no outputs')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vout = []
        # Skip re-signing the transaction for context independent checks from now on
        # tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex'])))
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-empty'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A really large transaction')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vin = [tx.vin[0]] * (MAX_BLOCK_BASE_SIZE // len(tx.vin[0].serialize()))
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-oversize'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A transaction with negative output value')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vout[0].nValue *= -1
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-negative'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A transaction with too large output value')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vout[0].nValue = 84000000 * COIN + 1
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-toolarge'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A transaction with too large sum of output values')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vout = [tx.vout[0]] * 2
        tx.vout[0].nValue = 84000000 * COIN
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-txouttotal-toolarge'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A transaction with duplicate inputs')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vin = [tx.vin[0]] * 2
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-inputs-duplicate'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A coinbase transaction')
        # Pick the input of the first tx we signed, so it has to be a coinbase tx
        raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent)))
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: coinbase'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('Some nonstandard transactions')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.nVersion = 3  # A version currently non-standard
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: version'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vout[0].scriptPubKey = CScript([OP_0])  # Some non-standard script
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptpubkey'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vin[0].scriptSig = CScript([OP_HASH160])  # Some not-pushonly scriptSig
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptsig-not-pushonly'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
        num_scripts = 100000 // len(output_p2sh_burn.serialize())  # Use enough outputs to make the tx too large for our policy
        tx.vout = [output_p2sh_burn] * num_scripts
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: tx-size'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vout[0] = output_p2sh_burn
        tx.vout[0].nValue -= 1  # Make output smaller, such that it is dust for our policy
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: dust'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
        tx.vout = [tx.vout[0]] * 2
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: multi-op-return'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A timelocked transaction')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vin[0].nSequence -= 1  # Should be non-max, so locktime is not ignored
        tx.nLockTime = node.getblockcount() + 1
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-final'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
        )

        self.log.info('A transaction that is locked by BIP68 sequence logic')
        tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
        tx.vin[0].nSequence = 2  # We could include it in the second block mined from now, but not the very next one
        # Can skip re-signing the tx because of early rejection
        self.check_mempool_result(
            result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-BIP68-final'}],
            rawtxs=[bytes_to_hex_str(tx.serialize())],
            allowhighfees=True,
        )
예제 #35
0
파일: util.py 프로젝트: foxbetdev/fxb-core
 def request_headers_and_sync(self, locator, hashstop=0):
     self.clear_block_announcement()
     self.get_headers(locator, hashstop)
     wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
     self.clear_block_announcement()
예제 #36
0
    def wait_for_verack(self, timeout=60):
        def test_function():
            return self.message_count["verack"]

        wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #37
0
 def wait_for_disconnect(self, timeout=60):
     test_function = lambda: self.state != "connected"
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #38
0
    def wait_for_block_announcement(self, block_hash, timeout=30):
        def received_hash():
            return (block_hash in self.announced_blockhashes)

        wait_until(received_hash, timeout=timeout, lock=mininode_lock)
예제 #39
0
 def wait_for_verack(self, timeout=60):
     test_function = lambda: self.message_count["verack"]
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #40
0
    def run_test(self):
        node = self.nodes[0]  # convenience reference to the node

        self.bootstrap_p2p()  # Add one p2p connection to the node

        best_block = self.nodes[0].getbestblockhash()
        tip = int(best_block, 16)
        best_block_time = self.nodes[0].getblock(best_block)['time']
        block_time = best_block_time + 1

        self.log.info("Create a new block with an anyone-can-spend coinbase.")
        height = 1
        block = create_block(tip, create_coinbase(height), block_time)
        block.solve()
        # Save the coinbase for later
        block1 = block
        tip = block.sha256
        node.p2p.send_blocks_and_test([block], node, success=True)

        self.log.info("Mature the block.")
        self.nodes[0].generate(100)

        # b'\x64' is OP_NOTIF
        # Transaction will be rejected with code 16 (REJECT_INVALID)
        # and we get disconnected immediately
        self.log.info('Test a transaction that is rejected')
        tx1 = create_tx_with_script(block1.vtx[0],
                                    0,
                                    script_sig=b'\x64' * 35,
                                    amount=50 * COIN - 12000)
        node.p2p.send_txs_and_test([tx1],
                                   node,
                                   success=False,
                                   expect_disconnect=True)

        # Make two p2p connections to provide the node with orphans
        # * p2ps[0] will send valid orphan txs (one with low fee)
        # * p2ps[1] will send an invalid orphan tx (and is later disconnected for that)
        self.reconnect_p2p(num_connections=2)

        self.log.info('Test orphan transaction handling ... ')
        # Create a root transaction that we withhold until all dependend transactions
        # are sent out and in the orphan cache
        SCRIPT_PUB_KEY_OP_TRUE = b'\x51\x75' * 15 + b'\x51'
        tx_withhold = CTransaction()
        tx_withhold.vin.append(
            CTxIn(outpoint=COutPoint(block1.vtx[0].sha256, 0)))
        tx_withhold.vout.append(
            CTxOut(nValue=50 * COIN - 12000,
                   scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
        tx_withhold.calc_sha256()

        # Our first orphan tx with some outputs to create further orphan txs
        tx_orphan_1 = CTransaction()
        tx_orphan_1.vin.append(
            CTxIn(outpoint=COutPoint(tx_withhold.sha256, 0)))
        tx_orphan_1.vout = [
            CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)
        ] * 3
        tx_orphan_1.calc_sha256()

        # A valid transaction with low fee
        tx_orphan_2_no_fee = CTransaction()
        tx_orphan_2_no_fee.vin.append(
            CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 0)))
        tx_orphan_2_no_fee.vout.append(
            CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))

        # A valid transaction with sufficient fee
        tx_orphan_2_valid = CTransaction()
        tx_orphan_2_valid.vin.append(
            CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 1)))
        tx_orphan_2_valid.vout.append(
            CTxOut(nValue=10 * COIN - 12000,
                   scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
        tx_orphan_2_valid.calc_sha256()

        # An invalid transaction with negative fee
        tx_orphan_2_invalid = CTransaction()
        tx_orphan_2_invalid.vin.append(
            CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 2)))
        tx_orphan_2_invalid.vout.append(
            CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))

        self.log.info('Send the orphans ... ')
        # Send valid orphan txs from p2ps[0]
        node.p2p.send_txs_and_test(
            [tx_orphan_1, tx_orphan_2_no_fee, tx_orphan_2_valid],
            node,
            success=False)
        # Send invalid tx from p2ps[1]
        node.p2ps[1].send_txs_and_test([tx_orphan_2_invalid],
                                       node,
                                       success=False)

        assert_equal(0,
                     node.getmempoolinfo()['size'])  # Mempool should be empty
        assert_equal(2, len(node.getpeerinfo()))  # p2ps[1] is still connected

        self.log.info('Send the withhold tx ... ')
        node.p2p.send_txs_and_test([tx_withhold], node, success=True)

        # Transactions that should end up in the mempool
        expected_mempool = {
            t.hash
            for t in [
                tx_withhold,  # The transaction that is the root for all orphans
                tx_orphan_1,  # The orphan transaction that splits the coins
                tx_orphan_2_valid,  # The valid transaction (with sufficient fee)
            ]
        }
        # Transactions that do not end up in the mempool
        # tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
        # tx_orphan_invaid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)

        wait_until(lambda: 1 == len(node.getpeerinfo()),
                   timeout=12)  # p2ps[1] is no longer connected
        assert_equal(expected_mempool, set(node.getrawmempool()))

        # restart node with sending BIP61 messages disabled, check that it disconnects without sending the reject message
        self.log.info(
            'Test a transaction that is rejected, with BIP61 disabled')
        self.restart_node(0, ['-enablebip61=0', '-persistmempool=0'])
        self.reconnect_p2p(num_connections=1)
        node.p2p.send_txs_and_test([tx1],
                                   node,
                                   success=False,
                                   expect_disconnect=True)
        # send_txs_and_test will have waited for disconnect, so we can safely check that no reject has been received
        assert_equal(node.p2p.reject_code_received, None)
예제 #41
0
    def run_test(self):
        node0 = self.nodes[0].add_p2p_connection(P2PInterface())

        # Set node time to 60 days ago
        self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)

        # Generating a chain of 10 blocks
        block_hashes = self.nodes[0].generate(nblocks=10)

        # Create longer chain starting 2 blocks before current tip
        height = len(block_hashes) - 2
        block_hash = block_hashes[height - 1]
        block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
        new_blocks = self.build_chain(5, block_hash, height, block_time)

        # Force reorg to a longer chain
        node0.send_message(msg_headers(new_blocks))
        node0.wait_for_getdata()
        for block in new_blocks:
            node0.send_and_ping(msg_block(block))

        # Check that reorg succeeded
        assert_equal(self.nodes[0].getblockcount(), 13)

        stale_hash = int(block_hashes[-1], 16)

        # Check that getdata request for stale block succeeds
        self.send_block_request(stale_hash, node0)
        test_function = lambda: self.last_block_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Check that getheader request for stale block header succeeds
        self.send_header_request(stale_hash, node0)
        test_function = lambda: self.last_header_equals(stale_hash, node0)
        wait_until(test_function, timeout=3)

        # Longest chain is extended so stale is much older than chain tip
        self.nodes[0].setmocktime(0)
        tip = self.nodes[0].generate(nblocks=1)[0]
        assert_equal(self.nodes[0].getblockcount(), 14)

        # Send getdata & getheaders to refresh last received getheader message
        block_hash = int(tip, 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        # Request for very old stale block should now fail
        self.send_block_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_block_equals(stale_hash, node0)

        # Request for very old stale block header should now fail
        self.send_header_request(stale_hash, node0)
        time.sleep(3)
        assert not self.last_header_equals(stale_hash, node0)

        # Verify we can fetch very old blocks and headers on the active chain
        block_hash = int(block_hashes[2], 16)
        self.send_block_request(block_hash, node0)
        self.send_header_request(block_hash, node0)
        node0.sync_with_ping()

        self.send_block_request(block_hash, node0)
        test_function = lambda: self.last_block_equals(block_hash, node0)
        wait_until(test_function, timeout=3)

        self.send_header_request(block_hash, node0)
        test_function = lambda: self.last_header_equals(block_hash, node0)
        wait_until(test_function, timeout=3)
    def run_test(self):
        self.log.info(
            "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:"
        )
        self.log.info("Mine 4 blocks on Node 0")
        self.nodes[0].generatetoaddress(
            4, self.nodes[0].get_deterministic_priv_key().address)
        assert_equal(self.nodes[0].getblockcount(), 4)
        besthash_n0 = self.nodes[0].getbestblockhash()

        self.log.info("Mine competing 6 blocks on Node 1")
        self.nodes[1].generatetoaddress(
            6, self.nodes[1].get_deterministic_priv_key().address)
        assert_equal(self.nodes[1].getblockcount(), 6)

        self.log.info("Connect nodes to force a reorg")
        connect_nodes_bi(self.nodes, 0, 1)
        sync_blocks(self.nodes[0:2])
        assert_equal(self.nodes[0].getblockcount(), 6)
        badhash = self.nodes[1].getblockhash(2)

        self.log.info(
            "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain"
        )
        self.nodes[0].invalidateblock(badhash)
        assert_equal(self.nodes[0].getblockcount(), 4)
        assert_equal(self.nodes[0].getbestblockhash(), besthash_n0)

        self.log.info("Make sure we won't reorg to a lower work chain:")
        connect_nodes_bi(self.nodes, 1, 2)
        self.log.info("Sync node 2 to node 1 so both have 6 blocks")
        sync_blocks(self.nodes[1:3])
        assert_equal(self.nodes[2].getblockcount(), 6)
        self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
        self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
        assert_equal(self.nodes[1].getblockcount(), 4)
        self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
        self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
        assert_equal(self.nodes[2].getblockcount(), 2)
        self.log.info("..and then mine a block")
        self.nodes[2].generatetoaddress(
            1, self.nodes[2].get_deterministic_priv_key().address)
        self.log.info("Verify all nodes are at the right height")
        wait_until(lambda: self.nodes[2].getblockcount() == 3, timeout=5)
        wait_until(lambda: self.nodes[0].getblockcount() == 4, timeout=5)
        wait_until(lambda: self.nodes[1].getblockcount() == 4, timeout=5)

        self.log.info("Verify that we reconsider all ancestors as well")
        blocks = self.nodes[1].generatetoaddress(10, ADDRESS_BCRT1_UNSPENDABLE)
        assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
        # Invalidate the two blocks at the tip
        self.nodes[1].invalidateblock(blocks[-1])
        self.nodes[1].invalidateblock(blocks[-2])
        assert_equal(self.nodes[1].getbestblockhash(), blocks[-3])
        # Reconsider only the previous tip
        self.nodes[1].reconsiderblock(blocks[-1])
        # Should be back at the tip by now
        assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])

        self.log.info("Verify that we reconsider all descendants")
        blocks = self.nodes[1].generatetoaddress(10, ADDRESS_BCRT1_UNSPENDABLE)
        assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
        # Invalidate the two blocks at the tip
        self.nodes[1].invalidateblock(blocks[-2])
        self.nodes[1].invalidateblock(blocks[-4])
        assert_equal(self.nodes[1].getbestblockhash(), blocks[-5])
        # Reconsider only the previous tip
        self.nodes[1].reconsiderblock(blocks[-4])
        # Should be back at the tip by now
        assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
예제 #43
0
    def run_test(self):
        self.nodes[0].add_p2p_connection(P2PInterface())

        self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
        self.coinbase_txids = [
            self.nodes[0].getblock(b)['tx'][0]
            for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)
        ]
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info(
            "Test that a transaction with non-DER signature can still appear in a block"
        )

        spendtx = create_transaction(self.nodes[0],
                                     self.coinbase_txids[0],
                                     self.nodeaddress,
                                     amount=1.0)
        unDERify(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1),
                             block_time)
        block.set_base_version(2)
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 3")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
        block.set_base_version(2)
        block.rehash()
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=[
                '{}, bad-version(0x00010002)'.format(block.hash)
        ]):
            self.nodes[0].p2p.send_and_ping(msg_block(block))
            assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
            self.nodes[0].p2p.sync_with_ping()

        self.log.info(
            "Test that transactions with non-DER signatures cannot appear in a block"
        )
        block.set_base_version(3)

        spendtx = create_transaction(self.nodes[0],
                                     self.coinbase_txids[1],
                                     self.nodeaddress,
                                     amount=1.0)
        unDERify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # rejected from the mempool for exactly that reason.
        assert_equal([{
            'txid':
            spendtx.hash,
            'allowed':
            False,
            'reject-reason':
            '64: non-mandatory-script-verify-flag (Non-canonical DER signature)'
        }],
                     self.nodes[0].testmempoolaccept(
                         rawtxs=[bytes_to_hex_str(spendtx.serialize())],
                         allowhighfees=True))

        # Now we verify that a block with this transaction is also invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=[
                'CheckInputs on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'
                .format(block.vtx[-1].hash)
        ]):
            self.nodes[0].p2p.send_and_ping(msg_block(block))
            assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
            self.nodes[0].p2p.sync_with_ping()

        wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(),
                   lock=mininode_lock)
        with mininode_lock:
            assert self.nodes[0].p2p.last_message["reject"].code in [
                REJECT_INVALID, REJECT_NONSTANDARD
            ]
            assert_equal(self.nodes[0].p2p.last_message["reject"].data,
                         block.sha256)
            assert b'Non-canonical DER signature' in self.nodes[
                0].p2p.last_message["reject"].reason

        self.log.info(
            "Test that a version 3 block with a DERSIG-compliant transaction is accepted"
        )
        block.vtx[1] = create_transaction(self.nodes[0],
                                          self.coinbase_txids[1],
                                          self.nodeaddress,
                                          amount=1.0)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
예제 #44
0
    def run_test(self):
        self.log.info("Connect nodes both way")
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[1], 0)

        self.log.info("Test setban and listbanned RPCs")

        self.log.info("setban: successfully ban single IP address")
        assert_equal(len(self.nodes[1].getpeerinfo()), 2)  # node1 should have 2 connections to node0 at this point
        self.nodes[1].setban("127.0.0.1", "add")
        wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
        assert_equal(len(self.nodes[1].getpeerinfo()), 0)  # all nodes must be disconnected at this point
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("clearbanned: successfully clear ban list")
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].setban("127.0.0.0/24", "add")

        self.log.info("setban: fail to ban an already banned subnet")
        assert_equal(len(self.nodes[1].listbanned()), 1)
        assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")

        self.log.info("setban: fail to ban an invalid subnet")
        assert_raises_rpc_error(-23, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
        assert_equal(len(self.nodes[1].listbanned()), 1)  # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24

        self.log.info("setban remove: fail to unban a non-banned subnet")
        assert_raises_rpc_error(-1, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 1)

        self.log.info("setban remove: successfully unban subnet")
        self.nodes[1].setban("127.0.0.0/24", "remove")
        assert_equal(len(self.nodes[1].listbanned()), 0)
        self.nodes[1].clearbanned()
        assert_equal(len(self.nodes[1].listbanned()), 0)

        self.log.info("setban: test persistence across node restart")
        self.nodes[1].setban("127.0.0.0/32", "add")
        self.nodes[1].setban("127.0.0.0/24", "add")
        # Set the mocktime so we can control when bans expire
        old_time = int(time.time())
        self.nodes[1].setmocktime(old_time)
        self.nodes[1].setban("192.168.0.1", "add", 1)  # ban for 1 seconds
        self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000)  # ban for 1000 seconds
        listBeforeShutdown = self.nodes[1].listbanned()
        assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
        # Move time forward by 3 seconds so the third ban has expired
        self.nodes[1].setmocktime(old_time + 3)
        assert_equal(len(self.nodes[1].listbanned()), 4)

        self.stop_node(1)
        self.start_node(1)

        listAfterShutdown = self.nodes[1].listbanned()
        assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
        assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
        assert_equal("/19" in listAfterShutdown[2]['address'], True)

        # Clear ban lists
        self.nodes[1].clearbanned()
        self.log.info("Connect nodes both way")
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[1], 0)

        self.log.info("Test disconnectnode RPCs")

        #self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
        #address1 = self.nodes[0].getpeerinfo()[0]['addr']
        #node1 = self.nodes[0].getpeerinfo()[0]['addr']
        #assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)

        self.log.info("disconnectnode: fail to disconnect when calling with junk address")
        assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, "221B Baker Street")

        self.log.info("disconnectnode: successfully disconnect node by address")
        address1 = self.nodes[0].getpeerinfo()[0]['addr']
        self.nodes[0].disconnectnode(address1)
        wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
        assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]

        self.log.info("disconnectnode: successfully reconnect node")
        connect_nodes(self.nodes[0], 1)  # reconnect the node
        assert_equal(len(self.nodes[0].getpeerinfo()), 2)
        assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
예제 #45
0
 def wait_for_getheaders(self, timeout=60):
     test_function = lambda: self.last_message.get("getheaders")
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #46
0
    def test_compactblocks_not_at_tip(self, node, test_node):
        # Test that requesting old compactblocks doesn't work.
        MAX_CMPCTBLOCK_DEPTH = 5
        new_blocks = []
        for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
            test_node.clear_block_announcement()
            new_blocks.append(node.generate(1)[0])
            wait_until(test_node.received_block_announcement,
                       timeout=30,
                       lock=mininode_lock)

        test_node.clear_block_announcement()
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "cmpctblock" in test_node.last_message,
                   timeout=30,
                   lock=mininode_lock)

        test_node.clear_block_announcement()
        node.generate(1)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)
        test_node.clear_block_announcement()
        with mininode_lock:
            test_node.last_message.pop("block", None)
        test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
        wait_until(lambda: "block" in test_node.last_message,
                   timeout=30,
                   lock=mininode_lock)
        with mininode_lock:
            test_node.last_message["block"].block.calc_sha256()
            assert_equal(test_node.last_message["block"].block.sha256,
                         int(new_blocks[0], 16))

        # Generate an old compactblock, and verify that it's not accepted.
        cur_height = node.getblockcount()
        hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
        block = self.build_block_on_tip(node)
        block.hashPrevBlock = hashPrevBlock
        block.solve()

        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(block)
        test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))

        tips = node.getchaintips()
        found = False
        for x in tips:
            if x["hash"] == block.hash:
                assert_equal(x["status"], "headers-only")
                found = True
                break
        assert (found)

        # Requesting this block via getblocktxn should silently fail
        # (to avoid fingerprinting attacks).
        msg = msg_getblocktxn()
        msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
        with mininode_lock:
            test_node.last_message.pop("blocktxn", None)
        test_node.send_and_ping(msg)
        with mininode_lock:
            assert "blocktxn" not in test_node.last_message
예제 #47
0
 def wait_for_block(self, blockhash, timeout=60):
     test_function = lambda: self.last_message.get(
         "block") and self.last_message["block"].block.rehash() == blockhash
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #48
0
    def test_incorrect_blocktxn_response(self, node, test_node, version):
        if (len(self.utxos) == 0):
            self.make_utxos()
        utxo = self.utxos.pop(0)

        block = self.build_block_with_transactions(node, utxo, 10)
        self.utxos.append(
            [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
        # Relay the first 5 transactions from the block in advance
        for tx in block.vtx[1:6]:
            test_node.send_message(msg_tx(tx))
        test_node.sync_with_ping()
        # Make sure all transactions were accepted.
        mempool = node.getrawmempool()
        for tx in block.vtx[1:6]:
            assert (tx.hash in mempool)

        # Send compact block
        comp_block = HeaderAndShortIDs()
        comp_block.initialize_from_block(block,
                                         prefill_list=[0],
                                         use_witness=(version == 2))
        test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
        absolute_indexes = []
        with mininode_lock:
            assert ("getblocktxn" in test_node.last_message)
            absolute_indexes = test_node.last_message[
                "getblocktxn"].block_txn_request.to_absolute()
        assert_equal(absolute_indexes, [6, 7, 8, 9, 10])

        # Now give an incorrect response.
        # Note that it's possible for bitcoind to be smart enough to know we are
        # lying, since it could check to see if the shortid matches what we are
        # sending, and eg disconnect us for misbehavior.  If that behavior
        # change was made, we could just modify this test by having a
        # different peer provide the block further down, so that we are still
        # verifying that the block isn't marked bad permanently. This is good
        # enough for now.
        msg = msg_blocktxn()
        if version == 2:
            msg = msg_witness_blocktxn()
        msg.block_transactions = BlockTransactions(
            block.sha256, [block.vtx[5]] + block.vtx[7:])
        test_node.send_and_ping(msg)

        # Tip should not have updated
        assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)

        # We should receive a getdata request
        wait_until(lambda: "getdata" in test_node.last_message,
                   timeout=10,
                   lock=mininode_lock)
        assert_equal(len(test_node.last_message["getdata"].inv), 1)
        assert (test_node.last_message["getdata"].inv[0].type == 2
                or test_node.last_message["getdata"].inv[0].type
                == 2 | MSG_WITNESS_FLAG)
        assert_equal(test_node.last_message["getdata"].inv[0].hash,
                     block.sha256)

        # Deliver the block
        if version == 2:
            test_node.send_and_ping(msg_witness_block(block))
        else:
            test_node.send_and_ping(msg_block(block))
        assert_equal(int(node.getbestblockhash(), 16), block.sha256)
 def wait_for_addr(self, timeout=5):
     test_function = lambda: self.last_message.get("addr")
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #50
0
    def test_compactblock_construction(self, node, test_node, version,
                                       use_witness_address):
        # Generate a bunch of transactions.
        node.generate(101)
        num_transactions = 25
        address = node.getnewaddress()
        if use_witness_address:
            # Want at least one segwit spend, so move all funds to
            # a witness address.
            address = node.addwitnessaddress(address)
            value_to_send = node.getbalance()
            node.sendtoaddress(address,
                               satoshi_round(value_to_send - Decimal(0.1)))
            node.generate(1)

        segwit_tx_generated = False
        for i in range(num_transactions):
            txid = node.sendtoaddress(address, 0.1)
            hex_tx = node.gettransaction(txid)["hex"]
            tx = FromHex(CTransaction(), hex_tx)
            if not tx.wit.is_null():
                segwit_tx_generated = True

        if use_witness_address:
            assert (segwit_tx_generated)  # check that our test is not broken

        # Wait until we've seen the block announcement for the resulting tip
        tip = int(node.getbestblockhash(), 16)
        test_node.wait_for_block_announcement(tip)

        # Make sure we will receive a fast-announce compact block
        self.request_cb_announcements(test_node, node, version)

        # Now mine a block, and look at the resulting compact block.
        test_node.clear_block_announcement()
        block_hash = int(node.generate(1)[0], 16)

        # Store the raw block in our internal format.
        block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
        for tx in block.vtx:
            tx.calc_sha256()
        block.rehash()

        # Wait until the block was announced (via compact blocks)
        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert ("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)

        # Now fetch the compact block using a normal non-announce getdata
        with mininode_lock:
            test_node.clear_block_announcement()
            inv = CInv(4, block_hash)  # 4 == "CompactBlock"
            test_node.send_message(msg_getdata([inv]))

        wait_until(test_node.received_block_announcement,
                   timeout=30,
                   lock=mininode_lock)

        # Now fetch and check the compact block
        header_and_shortids = None
        with mininode_lock:
            assert ("cmpctblock" in test_node.last_message)
            # Convert the on-the-wire representation to absolute indexes
            header_and_shortids = HeaderAndShortIDs(
                test_node.last_message["cmpctblock"].header_and_shortids)
        self.check_compactblock_construction_from_block(
            version, header_and_shortids, block_hash, block)
예제 #51
0
    def run_test(self):
        self.log.info("test -blocknotify")
        block_count = 10
        blocks = self.nodes[1].generate(block_count)

        # wait at most 10 seconds for expected file size before reading the content
        wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(
            self.block_filename).st_size >= (block_count * 65),
                   timeout=10)

        # file content should equal the generated blocks hashes
        with open(self.block_filename, 'r') as f:
            assert_equal(sorted(blocks), sorted(f.read().splitlines()))

        self.log.info("test -walletnotify")
        # wait at most 10 seconds for expected file size before reading the content
        wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(
            self.tx_filename).st_size >= (block_count * 65),
                   timeout=10)

        # file content should equal the generated transaction hashes
        txids_rpc = list(
            map(lambda t: t['txid'],
                self.nodes[1].listtransactions("*", block_count)))
        with open(self.tx_filename, 'r') as f:
            assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
        os.remove(self.tx_filename)

        self.log.info("test -walletnotify after rescan")
        # restart node to rescan to force wallet notifications
        self.restart_node(1)
        connect_nodes_bi(self.nodes, 0, 1)

        wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(
            self.tx_filename).st_size >= (block_count * 65),
                   timeout=10)

        # file content should equal the generated transaction hashes
        txids_rpc = list(
            map(lambda t: t['txid'],
                self.nodes[1].listtransactions("*", block_count)))
        with open(self.tx_filename, 'r') as f:
            assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))

        # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
        self.log.info("test -alertnotify")
        self.nodes[1].generate(51)
        self.sync_all()

        # Give kabberryd 10 seconds to write the alert notification
        wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.
                   getsize(self.alert_filename),
                   timeout=10)

        with open(self.alert_filename, 'r', encoding='utf8') as f:
            alert_text = f.read()

        # Mine more up-version blocks, should not get more alerts:
        self.nodes[1].generate(2)
        self.sync_all()

        with open(self.alert_filename, 'r', encoding='utf8') as f:
            alert_text2 = f.read()

        self.log.info(
            "-alertnotify should not continue notifying for more unknown version blocks"
        )
        assert_equal(alert_text, alert_text2)
예제 #52
0
    def test_sendcmpct(self,
                       node,
                       test_node,
                       preferred_version,
                       old_node=None):
        # Make sure we get a SENDCMPCT message from our peer
        def received_sendcmpct():
            return (len(test_node.last_sendcmpct) > 0)

        wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
        with mininode_lock:
            # Check that the first version received is the preferred one
            assert_equal(test_node.last_sendcmpct[0].version,
                         preferred_version)
            # And that we receive versions down to 1.
            assert_equal(test_node.last_sendcmpct[-1].version, 1)
            test_node.last_sendcmpct = []

        tip = int(node.getbestblockhash(), 16)

        def check_announcement_of_new_block(node, peer, predicate):
            peer.clear_block_announcement()
            block_hash = int(node.generate(1)[0], 16)
            peer.wait_for_block_announcement(block_hash, timeout=30)
            assert (peer.block_announced)

            with mininode_lock:
                assert predicate(peer), (
                    "block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
                        block_hash, peer.last_message.get("cmpctblock", None),
                        peer.last_message.get("inv", None)))

        # We shouldn't get any block announcements via cmpctblock yet.
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" not in p.last_message)

        # Try one more time, this time after requesting headers.
        test_node.request_headers_and_sync(locator=[tip])
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" not in p.last_message and
            "inv" in p.last_message)

        # Test a few ways of using sendcmpct that should NOT
        # result in compact block announcements.
        # Before each test, sync the headers chain.
        test_node.request_headers_and_sync(locator=[tip])

        # Now try a SENDCMPCT message with too-high version
        sendcmpct = msg_sendcmpct()
        sendcmpct.version = preferred_version + 1
        sendcmpct.announce = True
        test_node.send_and_ping(sendcmpct)
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" not in p.last_message)

        # Headers sync before next test.
        test_node.request_headers_and_sync(locator=[tip])

        # Now try a SENDCMPCT message with valid version, but announce=False
        sendcmpct.version = preferred_version
        sendcmpct.announce = False
        test_node.send_and_ping(sendcmpct)
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" not in p.last_message)

        # Headers sync before next test.
        test_node.request_headers_and_sync(locator=[tip])

        # Finally, try a SENDCMPCT message with announce=True
        sendcmpct.version = preferred_version
        sendcmpct.announce = True
        test_node.send_and_ping(sendcmpct)
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" in p.last_message)

        # Try one more time (no headers sync should be needed!)
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" in p.last_message)

        # Try one more time, after turning on sendheaders
        test_node.send_and_ping(msg_sendheaders())
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" in p.last_message)

        # Try one more time, after sending a version-1, announce=false message.
        sendcmpct.version = preferred_version - 1
        sendcmpct.announce = False
        test_node.send_and_ping(sendcmpct)
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" in p.last_message)

        # Now turn off announcements
        sendcmpct.version = preferred_version
        sendcmpct.announce = False
        test_node.send_and_ping(sendcmpct)
        check_announcement_of_new_block(
            node, test_node, lambda p: "cmpctblock" not in p.last_message and
            "headers" in p.last_message)

        if old_node is not None:
            # Verify that a peer using an older protocol version can receive
            # announcements from this node.
            sendcmpct.version = preferred_version - 1
            sendcmpct.announce = True
            old_node.send_and_ping(sendcmpct)
            # Header sync
            old_node.request_headers_and_sync(locator=[tip])
            check_announcement_of_new_block(
                node, old_node, lambda p: "cmpctblock" in p.last_message)
예제 #53
0
    def run_test(self):
        chain_height = self.nodes[0].getblockcount()
        assert_equal(chain_height, 200)

        self.log.debug("Mine a single block to get out of IBD")
        self.nodes[0].generate(1)
        self.sync_all()

        self.log.debug("Send 5 transactions from node2 (to its own address)")
        for i in range(5):
            last_txid = self.nodes[2].sendtoaddress(
                self.nodes[2].getnewaddress(), Decimal("10"))
        node2_balance = self.nodes[2].getbalance()
        self.sync_all()

        self.log.debug(
            "Verify that node0 and node1 have 5 transactions in their mempools"
        )
        assert_equal(len(self.nodes[0].getrawmempool()), 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 5)

        self.log.debug("Prioritize a transaction on node0")
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'], fees['modified'])
        self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])

        self.log.debug(
            "Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions."
        )
        self.stop_nodes()
        # Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
        # Also don't store the mempool, to keep the datadir clean
        self.start_node(1, extra_args=["-persistmempool=0"])
        self.start_node(0)
        self.start_node(2)
        # Give bitcoind a second to reload the mempool
        wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1)
        wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1)
        # The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        self.log.debug('Verify prioritization is loaded correctly')
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])

        # Verify accounting of mempool transactions after restart is correct
        self.nodes[2].syncwithvalidationinterfacequeue(
        )  # Flush mempool to wallet
        assert_equal(node2_balance, self.nodes[2].getbalance())

        self.log.debug(
            "Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file."
        )
        self.stop_nodes()
        self.start_node(0, extra_args=["-persistmempool=0"])
        # Give bitcoind a second to reload the mempool
        time.sleep(1)
        assert_equal(len(self.nodes[0].getrawmempool()), 0)

        self.log.debug(
            "Stop-start node0. Verify that it has the transactions in its mempool."
        )
        self.stop_nodes()
        self.start_node(0)
        wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)

        mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest',
                                   'mempool.dat')
        mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest',
                                   'mempool.dat')
        self.log.debug(
            "Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it"
        )
        os.remove(mempooldat0)
        self.nodes[0].savemempool()
        assert os.path.isfile(mempooldat0)

        self.log.debug(
            "Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions"
        )
        os.rename(mempooldat0, mempooldat1)
        self.stop_nodes()
        self.start_node(1, extra_args=[])
        wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)

        self.log.debug(
            "Prevent newcoind from writing mempool.dat to disk. Verify that `savemempool` fails"
        )
        # to test the exception we are creating a tmp folder called mempool.dat.new
        # which is an implementation detail that could change and break this test
        mempooldotnew1 = mempooldat1 + '.new'
        os.mkdir(mempooldotnew1)
        assert_raises_rpc_error(-1, "Unable to dump mempool to disk",
                                self.nodes[1].savemempool)
        os.rmdir(mempooldotnew1)
예제 #54
0
    def test_nonnull_locators(self, test_node, inv_node):
        tip = int(self.nodes[0].getbestblockhash(), 16)

        # PART 1
        # 1. Mine a block; expect inv announcements each time
        self.log.info(
            "Part 1: headers don't start before sendheaders message...")
        for i in range(4):
            self.log.debug("Part 1.{}: starting...".format(i))
            old_tip = tip
            tip = self.mine_blocks(1)
            inv_node.check_last_inv_announcement(inv=[tip])
            test_node.check_last_inv_announcement(inv=[tip])
            # Try a few different responses; none should affect next announcement
            if i == 0:
                # first request the block
                test_node.send_get_data([tip])
                test_node.wait_for_block(tip)
            elif i == 1:
                # next try requesting header and block
                test_node.send_get_headers(locator=[old_tip], hashstop=tip)
                test_node.send_get_data([tip])
                test_node.wait_for_block(tip)
                test_node.clear_block_announcements(
                )  # since we requested headers...
            elif i == 2:
                # this time announce own block via headers
                inv_node.clear_block_announcements()
                height = self.nodes[0].getblockcount()
                last_time = self.nodes[0].getblock(
                    self.nodes[0].getbestblockhash())['time']
                block_time = last_time + 1
                new_block = create_block(tip, create_coinbase(height + 1),
                                         block_time)
                new_block.solve()
                test_node.send_header_for_blocks([new_block])
                test_node.wait_for_getdata([new_block.sha256])
                test_node.send_message(msg_block(new_block))
                test_node.sync_with_ping()  # make sure this block is processed
                wait_until(lambda: inv_node.block_announced,
                           timeout=60,
                           lock=mininode_lock)
                inv_node.clear_block_announcements()
                test_node.clear_block_announcements()

        self.log.info("Part 1: success!")
        self.log.info(
            "Part 2: announce blocks with headers after sendheaders message..."
        )
        # PART 2
        # 2. Send a sendheaders message and test that headers announcements
        # commence and keep working.
        test_node.send_message(msg_sendheaders())
        prev_tip = int(self.nodes[0].getbestblockhash(), 16)
        test_node.send_get_headers(locator=[prev_tip], hashstop=0)
        test_node.sync_with_ping()

        # Now that we've synced headers, headers announcements should work
        tip = self.mine_blocks(1)
        inv_node.check_last_inv_announcement(inv=[tip])
        test_node.check_last_headers_announcement(headers=[tip])

        height = self.nodes[0].getblockcount() + 1
        block_time += 10  # Advance far enough ahead
        for i in range(10):
            self.log.debug("Part 2.{}: starting...".format(i))
            # Mine i blocks, and alternate announcing either via
            # inv (of tip) or via headers. After each, new blocks
            # mined by the node should successfully be announced
            # with block header, even though the blocks are never requested
            for j in range(2):
                self.log.debug("Part 2.{}.{}: starting...".format(i, j))
                blocks = []
                for b in range(i + 1):
                    blocks.append(
                        create_block(tip, create_coinbase(height), block_time))
                    blocks[-1].solve()
                    tip = blocks[-1].sha256
                    block_time += 1
                    height += 1
                if j == 0:
                    # Announce via inv
                    test_node.send_block_inv(tip)
                    test_node.wait_for_getheaders()
                    # Should have received a getheaders now
                    test_node.send_header_for_blocks(blocks)
                    # Test that duplicate inv's won't result in duplicate
                    # getdata requests, or duplicate headers announcements
                    [inv_node.send_block_inv(x.sha256) for x in blocks]
                    test_node.wait_for_getdata([x.sha256 for x in blocks])
                    inv_node.sync_with_ping()
                else:
                    # Announce via headers
                    test_node.send_header_for_blocks(blocks)
                    test_node.wait_for_getdata([x.sha256 for x in blocks])
                    # Test that duplicate headers won't result in duplicate
                    # getdata requests (the check is further down)
                    inv_node.send_header_for_blocks(blocks)
                    inv_node.sync_with_ping()
                [test_node.send_message(msg_block(x)) for x in blocks]
                test_node.sync_with_ping()
                inv_node.sync_with_ping()
                # This block should not be announced to the inv node (since it also
                # broadcast it)
                assert "inv" not in inv_node.last_message
                assert "headers" not in inv_node.last_message
                tip = self.mine_blocks(1)
                inv_node.check_last_inv_announcement(inv=[tip])
                test_node.check_last_headers_announcement(headers=[tip])
                height += 1
                block_time += 1

        self.log.info("Part 2: success!")

        self.log.info(
            "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..."
        )

        # PART 3.  Headers announcements can stop after large reorg, and resume after
        # getheaders or inv from peer.
        for j in range(2):
            self.log.debug("Part 3.{}: starting...".format(j))
            # First try mining a reorg that can propagate with header announcement
            new_block_hashes = self.mine_reorg(length=7)
            tip = new_block_hashes[-1]
            inv_node.check_last_inv_announcement(inv=[tip])
            test_node.check_last_headers_announcement(headers=new_block_hashes)

            block_time += 8

            # Mine a too-large reorg, which should be announced with a single inv
            new_block_hashes = self.mine_reorg(length=8)
            tip = new_block_hashes[-1]
            inv_node.check_last_inv_announcement(inv=[tip])
            test_node.check_last_inv_announcement(inv=[tip])

            block_time += 9

            fork_point = self.nodes[0].getblock(
                "%064x" % new_block_hashes[0])["previousblockhash"]
            fork_point = int(fork_point, 16)

            # Use getblocks/getdata
            test_node.send_getblocks(locator=[fork_point])
            test_node.check_last_inv_announcement(inv=new_block_hashes)
            test_node.send_get_data(new_block_hashes)
            test_node.wait_for_block(new_block_hashes[-1])

            for i in range(3):
                self.log.debug("Part 3.{}.{}: starting...".format(j, i))

                # Mine another block, still should get only an inv
                tip = self.mine_blocks(1)
                inv_node.check_last_inv_announcement(inv=[tip])
                test_node.check_last_inv_announcement(inv=[tip])
                if i == 0:
                    # Just get the data -- shouldn't cause headers announcements to resume
                    test_node.send_get_data([tip])
                    test_node.wait_for_block(tip)
                elif i == 1:
                    # Send a getheaders message that shouldn't trigger headers announcements
                    # to resume (best header sent will be too old)
                    test_node.send_get_headers(locator=[fork_point],
                                               hashstop=new_block_hashes[1])
                    test_node.send_get_data([tip])
                    test_node.wait_for_block(tip)
                elif i == 2:
                    # This time, try sending either a getheaders to trigger resumption
                    # of headers announcements, or mine a new block and inv it, also
                    # triggering resumption of headers announcements.
                    test_node.send_get_data([tip])
                    test_node.wait_for_block(tip)
                    if j == 0:
                        test_node.send_get_headers(locator=[tip], hashstop=0)
                        test_node.sync_with_ping()
                    else:
                        test_node.send_block_inv(tip)
                        test_node.sync_with_ping()
            # New blocks should now be announced with header
            tip = self.mine_blocks(1)
            inv_node.check_last_inv_announcement(inv=[tip])
            test_node.check_last_headers_announcement(headers=[tip])

        self.log.info("Part 3: success!")

        self.log.info("Part 4: Testing direct fetch behavior...")
        tip = self.mine_blocks(1)
        height = self.nodes[0].getblockcount() + 1
        last_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time']
        block_time = last_time + 1

        # Create 2 blocks.  Send the blocks, then send the headers.
        blocks = []
        for b in range(2):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1
            inv_node.send_message(msg_block(blocks[-1]))

        inv_node.sync_with_ping()  # Make sure blocks are processed
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks)
        test_node.sync_with_ping()
        # should not have received any getdata messages
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        # This time, direct fetch should work
        blocks = []
        for b in range(3):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        test_node.send_header_for_blocks(blocks)
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks],
                                   timeout=DIRECT_FETCH_RESPONSE_TIME)

        [test_node.send_message(msg_block(x)) for x in blocks]

        test_node.sync_with_ping()

        # Now announce a header that forks the last two blocks
        tip = blocks[0].sha256
        height -= 2
        blocks = []

        # Create extra blocks for later
        for b in range(20):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        # Announcing one block on fork should not trigger direct fetch
        # (less work than tip)
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks[0:1])
        test_node.sync_with_ping()
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        # Announcing one more block on fork should trigger direct fetch for
        # both blocks (same work as tip)
        test_node.send_header_for_blocks(blocks[1:2])
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]],
                                   timeout=DIRECT_FETCH_RESPONSE_TIME)

        # Announcing 16 more headers should trigger direct fetch for 14 more
        # blocks
        test_node.send_header_for_blocks(blocks[2:18])
        test_node.sync_with_ping()
        test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]],
                                   timeout=DIRECT_FETCH_RESPONSE_TIME)

        # Announcing 1 more header should not trigger any response
        test_node.last_message.pop("getdata", None)
        test_node.send_header_for_blocks(blocks[18:19])
        test_node.sync_with_ping()
        with mininode_lock:
            assert "getdata" not in test_node.last_message

        self.log.info("Part 4: success!")

        # Now deliver all those blocks we announced.
        [test_node.send_message(msg_block(x)) for x in blocks]

        self.log.info("Part 5: Testing handling of unconnecting headers")
        # First we test that receipt of an unconnecting header doesn't prevent
        # chain sync.
        for i in range(10):
            self.log.debug("Part 5.{}: starting...".format(i))
            test_node.last_message.pop("getdata", None)
            blocks = []
            # Create two more blocks.
            for j in range(2):
                blocks.append(
                    create_block(tip, create_coinbase(height), block_time))
                blocks[-1].solve()
                tip = blocks[-1].sha256
                block_time += 1
                height += 1
            # Send the header of the second block -> this won't connect.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[1]])
            test_node.wait_for_getheaders()
            test_node.send_header_for_blocks(blocks)
            test_node.wait_for_getdata([x.sha256 for x in blocks])
            [test_node.send_message(msg_block(x)) for x in blocks]
            test_node.sync_with_ping()
            assert_equal(int(self.nodes[0].getbestblockhash(), 16),
                         blocks[1].sha256)

        blocks = []
        # Now we test that if we repeatedly don't send connecting headers, we
        # don't go into an infinite loop trying to get them to connect.
        MAX_UNCONNECTING_HEADERS = 10
        for j in range(MAX_UNCONNECTING_HEADERS + 1):
            blocks.append(
                create_block(tip, create_coinbase(height), block_time))
            blocks[-1].solve()
            tip = blocks[-1].sha256
            block_time += 1
            height += 1

        for i in range(1, MAX_UNCONNECTING_HEADERS):
            # Send a header that doesn't connect, check that we get a getheaders.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[i]])
            test_node.wait_for_getheaders()

        # Next header will connect, should re-set our count:
        test_node.send_header_for_blocks([blocks[0]])

        # Remove the first two entries (blocks[1] would connect):
        blocks = blocks[2:]

        # Now try to see how many unconnecting headers we can send
        # before we get disconnected.  Should be 5*MAX_UNCONNECTING_HEADERS
        for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
            # Send a header that doesn't connect, check that we get a getheaders.
            with mininode_lock:
                test_node.last_message.pop("getheaders", None)
            test_node.send_header_for_blocks([blocks[i % len(blocks)]])
            test_node.wait_for_getheaders()

        # Eventually this stops working.
        test_node.send_header_for_blocks([blocks[-1]])

        # Should get disconnected
        test_node.wait_for_disconnect()

        self.log.info("Part 5: success!")

        # Finally, check that the inv node never received a getdata request,
        # throughout the test
        assert "getdata" not in inv_node.last_message
예제 #55
0
    def run_test(self):
        # Check that there's no UTXO on none of the nodes
        assert_equal(len(self.nodes[0].listunspent()), 0)
        assert_equal(len(self.nodes[1].listunspent()), 0)
        assert_equal(len(self.nodes[2].listunspent()), 0)

        self.log.info("Mining blocks...")

        self.nodes[0].generate(1)

        walletinfo = self.nodes[0].getwalletinfo()
        assert_equal(walletinfo['immature_balance'], 50)
        assert_equal(walletinfo['balance'], 0)

        self.sync_all([self.nodes[0:3]])
        self.nodes[1].generate(101)
        self.sync_all([self.nodes[0:3]])

        assert_equal(self.nodes[0].getbalance(), 50)
        assert_equal(self.nodes[1].getbalance(), 50)
        assert_equal(self.nodes[2].getbalance(), 0)

        # Check that only first and second nodes have UTXOs
        utxos = self.nodes[0].listunspent()
        assert_equal(len(utxos), 1)
        assert_equal(len(self.nodes[1].listunspent()), 1)
        assert_equal(len(self.nodes[2].listunspent()), 0)

        self.log.info("test gettxout")
        confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"]
        # First, outputs that are unspent both in the chain and in the
        # mempool should appear with or without include_mempool
        txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=False)
        assert_equal(txout['value'], 50)
        txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=True)
        assert_equal(txout['value'], 50)

        # Send 21 BTC from 0 to 2 using sendtoaddress call.
        self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
        mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)

        self.log.info("test gettxout (second part)")
        # utxo spent in mempool should be visible if you exclude mempool
        # but invisible if you include mempool
        txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False)
        assert_equal(txout['value'], 50)
        txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True)
        assert txout is None
        # new utxo from mempool should be invisible if you exclude mempool
        # but visible if you include mempool
        txout = self.nodes[0].gettxout(mempool_txid, 0, False)
        assert txout is None
        txout1 = self.nodes[0].gettxout(mempool_txid, 0, True)
        txout2 = self.nodes[0].gettxout(mempool_txid, 1, True)
        # note the mempool tx will have randomly assigned indices
        # but 10 will go to node2 and the rest will go to node0
        balance = self.nodes[0].getbalance()
        assert_equal(set([txout1['value'], txout2['value']]), set([10, balance]))
        walletinfo = self.nodes[0].getwalletinfo()
        assert_equal(walletinfo['immature_balance'], 0)

        # Have node0 mine a block, thus it will collect its own fee.
        self.nodes[0].generate(1)
        self.sync_all([self.nodes[0:3]])

        # Exercise locking of unspent outputs
        unspent_0 = self.nodes[2].listunspent()[0]
        unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
        assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0])
        self.nodes[2].lockunspent(False, [unspent_0])
        assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0])
        assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
        assert_equal([unspent_0], self.nodes[2].listlockunspent())
        self.nodes[2].lockunspent(True, [unspent_0])
        assert_equal(len(self.nodes[2].listlockunspent()), 0)
        assert_raises_rpc_error(-8, "txid must be of length 64 (not 34, for '0000000000000000000000000000000000')",
                                self.nodes[2].lockunspent, False,
                                [{"txid": "0000000000000000000000000000000000", "vout": 0}])
        assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')",
                                self.nodes[2].lockunspent, False,
                                [{"txid": "ZZZ0000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
        assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction",
                                self.nodes[2].lockunspent, False,
                                [{"txid": "0000000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
        assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds",
                                self.nodes[2].lockunspent, False,
                                [{"txid": unspent_0["txid"], "vout": 999}])

        # An output should be unlocked when spent
        unspent_0 = self.nodes[1].listunspent()[0]
        self.nodes[1].lockunspent(False, [unspent_0])
        tx = self.nodes[1].createrawtransaction([unspent_0], { self.nodes[1].getnewaddress() : 1 })
        tx = self.nodes[1].fundrawtransaction(tx)['hex']
        tx = self.nodes[1].signrawtransactionwithwallet(tx)["hex"]
        self.nodes[1].sendrawtransaction(tx)
        assert_equal(len(self.nodes[1].listlockunspent()), 0)

        # Have node1 generate 100 blocks (so node0 can recover the fee)
        self.nodes[1].generate(100)
        self.sync_all([self.nodes[0:3]])

        # node0 should end up with 100 btc in block rewards plus fees, but
        # minus the 21 plus fees sent to node2
        assert_equal(self.nodes[0].getbalance(), 100 - 21)
        assert_equal(self.nodes[2].getbalance(), 21)

        # Node0 should have two unspent outputs.
        # Create a couple of transactions to send them to node2, submit them through
        # node1, and make sure both node0 and node2 pick them up properly:
        node0utxos = self.nodes[0].listunspent(1)
        assert_equal(len(node0utxos), 2)

        # create both transactions
        txns_to_send = []
        for utxo in node0utxos:
            inputs = []
            outputs = {}
            inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
            outputs[self.nodes[2].getnewaddress()] = utxo["amount"] - 3
            raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
            txns_to_send.append(self.nodes[0].signrawtransactionwithwallet(raw_tx))

        # Have node 1 (miner) send the transactions
        self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
        self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)

        # Have node1 mine a block to confirm transactions:
        self.nodes[1].generate(1)
        self.sync_all([self.nodes[0:3]])

        assert_equal(self.nodes[0].getbalance(), 0)
        assert_equal(self.nodes[2].getbalance(), 94)

        # Verify that a spent output cannot be locked anymore
        spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]}
        assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0])

        # Send 10 BTC normal
        address = self.nodes[0].getnewaddress("test")
        fee_per_byte = Decimal('0.001') / 1000
        self.nodes[2].settxfee(fee_per_byte * 1000)
        txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
        self.nodes[2].generate(1)
        self.sync_all([self.nodes[0:3]])
        node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
        assert_equal(self.nodes[0].getbalance(), Decimal('10'))

        # Send 10 BTC with subtract fee from amount
        txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
        self.nodes[2].generate(1)
        self.sync_all([self.nodes[0:3]])
        node_2_bal -= Decimal('10')
        assert_equal(self.nodes[2].getbalance(), node_2_bal)
        node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))

        # Sendmany 10 BTC
        txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [])
        self.nodes[2].generate(1)
        self.sync_all([self.nodes[0:3]])
        node_0_bal += Decimal('10')
        node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
        assert_equal(self.nodes[0].getbalance(), node_0_bal)

        # Sendmany 10 BTC with subtract fee from amount
        txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [address])
        self.nodes[2].generate(1)
        self.sync_all([self.nodes[0:3]])
        node_2_bal -= Decimal('10')
        assert_equal(self.nodes[2].getbalance(), node_2_bal)
        node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))

        # Test ResendWalletTransactions:
        # Create a couple of transactions, then start up a fourth
        # node (nodes[3]) and ask nodes[0] to rebroadcast.
        # EXPECT: nodes[3] should have those transactions in its mempool.
        txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
        txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        sync_mempools(self.nodes[0:2])

        self.start_node(3)
        connect_nodes_bi(self.nodes, 0, 3)
        sync_blocks(self.nodes)

        relayed = self.nodes[0].resendwallettransactions()
        assert_equal(set(relayed), {txid1, txid2})
        sync_mempools(self.nodes)

        assert txid1 in self.nodes[3].getrawmempool()

        # check if we can list zero value tx as available coins
        # 1. create raw_tx
        # 2. hex-changed one output to 0.0
        # 3. sign and send
        # 4. check if recipient (node0) can list the zero value tx
        usp = self.nodes[1].listunspent(query_options={'minimumAmount': '49.998'})[0]
        inputs = [{"txid": usp['txid'], "vout": usp['vout']}]
        outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}

        raw_tx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000")  # replace 11.11 with 0.0 (int32)
        signed_raw_tx = self.nodes[1].signrawtransactionwithwallet(raw_tx)
        decoded_raw_tx = self.nodes[1].decoderawtransaction(signed_raw_tx['hex'])
        zero_value_txid = decoded_raw_tx['txid']
        self.nodes[1].sendrawtransaction(signed_raw_tx['hex'])

        self.sync_all()
        self.nodes[1].generate(1)  # mine a block
        self.sync_all()

        unspent_txs = self.nodes[0].listunspent()  # zero value tx must be in listunspents output
        found = False
        for uTx in unspent_txs:
            if uTx['txid'] == zero_value_txid:
                found = True
                assert_equal(uTx['amount'], Decimal('0'))
        assert found

        # do some -walletbroadcast tests
        self.stop_nodes()
        self.start_node(0, ["-walletbroadcast=0"])
        self.start_node(1, ["-walletbroadcast=0"])
        self.start_node(2, ["-walletbroadcast=0"])
        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 1, 2)
        connect_nodes_bi(self.nodes, 0, 2)
        self.sync_all([self.nodes[0:3]])

        txid_not_broadcast = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
        tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
        self.nodes[1].generate(1)  # mine a block, tx should not be in there
        self.sync_all([self.nodes[0:3]])
        assert_equal(self.nodes[2].getbalance(), node_2_bal)  # should not be changed because tx was not broadcasted

        # now broadcast from another node, mine a block, sync, and check the balance
        self.nodes[1].sendrawtransaction(tx_obj_not_broadcast['hex'])
        self.nodes[1].generate(1)
        self.sync_all([self.nodes[0:3]])
        node_2_bal += 2
        tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
        assert_equal(self.nodes[2].getbalance(), node_2_bal)

        # create another tx
        self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)

        # restart the nodes with -walletbroadcast=1
        self.stop_nodes()
        self.start_node(0)
        self.start_node(1)
        self.start_node(2)
        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 1, 2)
        connect_nodes_bi(self.nodes, 0, 2)
        sync_blocks(self.nodes[0:3])

        self.nodes[0].generate(1)
        sync_blocks(self.nodes[0:3])
        node_2_bal += 2

        # tx should be added to balance because after restarting the nodes tx should be broadcast
        assert_equal(self.nodes[2].getbalance(), node_2_bal)

        # send a tx with value in a string (PR#6380 +)
        txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
        tx_obj = self.nodes[0].gettransaction(txid)
        assert_equal(tx_obj['amount'], Decimal('-2'))

        txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
        tx_obj = self.nodes[0].gettransaction(txid)
        assert_equal(tx_obj['amount'], Decimal('-0.0001'))

        # check if JSON parser can handle scientific notation in strings
        txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
        tx_obj = self.nodes[0].gettransaction(txid)
        assert_equal(tx_obj['amount'], Decimal('-0.0001'))

        # General checks for errors from incorrect inputs
        # This will raise an exception because the amount type is wrong
        assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")

        # This will raise an exception since generate does not accept a string
        assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")

        # This will raise an exception for the invalid private key format
        assert_raises_rpc_error(-5, "Invalid private key encoding", self.nodes[0].importprivkey, "invalid")

        # This will raise an exception for importing an address with the PS2H flag
        temp_address = self.nodes[1].getnewaddress()
        assert_raises_rpc_error(-5, "Cannot use the p2sh flag with an address - use a script instead", self.nodes[0].importaddress, temp_address, "label", False, True)

        # This will raise an exception for attempting to dump the private key of an address you do not own
        assert_raises_rpc_error(-3, "Address does not refer to a key", self.nodes[0].dumpprivkey, temp_address)

        # This will raise an exception for attempting to get the private key of an invalid Bitcoin address
        assert_raises_rpc_error(-5, "Invalid Goldcoin address", self.nodes[0].dumpprivkey, "invalid")

        # This will raise an exception for attempting to set a label for an invalid Bitcoin address
        assert_raises_rpc_error(-5, "Invalid Goldcoin address", self.nodes[0].setlabel, "invalid address", "label")

        # This will raise an exception for importing an invalid address
        assert_raises_rpc_error(-5, "Invalid Goldcoin address or script", self.nodes[0].importaddress, "invalid")

        # This will raise an exception for attempting to import a pubkey that isn't in hex
        assert_raises_rpc_error(-5, "Pubkey must be a hex string", self.nodes[0].importpubkey, "not hex")

        # This will raise an exception for importing an invalid pubkey
        assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f")

        # Import address and private key to check correct behavior of spendable unspents
        # 1. Send some coins to generate new UTXO
        address_to_import = self.nodes[2].getnewaddress()
        txid = self.nodes[0].sendtoaddress(address_to_import, 1)
        self.nodes[0].generate(1)
        self.sync_all([self.nodes[0:3]])

        # 2. Import address from node2 to node1
        self.nodes[1].importaddress(address_to_import)

        # 3. Validate that the imported address is watch-only on node1
        assert self.nodes[1].getaddressinfo(address_to_import)["iswatchonly"]

        # 4. Check that the unspents after import are not spendable
        assert_array_result(self.nodes[1].listunspent(),
                            {"address": address_to_import},
                            {"spendable": False})

        # 5. Import private key of the previously imported address on node1
        priv_key = self.nodes[2].dumpprivkey(address_to_import)
        self.nodes[1].importprivkey(priv_key)

        # 6. Check that the unspents are now spendable on node1
        assert_array_result(self.nodes[1].listunspent(),
                            {"address": address_to_import},
                            {"spendable": True})

        # Mine a block from node0 to an address from node1
        coinbase_addr = self.nodes[1].getnewaddress()
        block_hash = self.nodes[0].generatetoaddress(1, coinbase_addr)[0]
        coinbase_txid = self.nodes[0].getblock(block_hash)['tx'][0]
        self.sync_all([self.nodes[0:3]])

        # Check that the txid and balance is found by node1
        self.nodes[1].gettransaction(coinbase_txid)

        # check if wallet or blockchain maintenance changes the balance
        self.sync_all([self.nodes[0:3]])
        blocks = self.nodes[0].generate(2)
        self.sync_all([self.nodes[0:3]])
        balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
        block_count = self.nodes[0].getblockcount()

        # Check modes:
        #   - True: unicode escaped as \u....
        #   - False: unicode directly as UTF-8
        for mode in [True, False]:
            self.nodes[0].rpc.ensure_ascii = mode
            # unicode check: Basic Multilingual Plane, Supplementary Plane respectively
            for label in [u'б€б‹аБаА', u'№…Ё']:
                addr = self.nodes[0].getnewaddress()
                self.nodes[0].setlabel(addr, label)
                assert_equal(self.nodes[0].getaddressinfo(addr)['label'], label)
                assert label in self.nodes[0].listlabels()
        self.nodes[0].rpc.ensure_ascii = True  # restore to default

        # maintenance tests
        maintenance = [
            '-rescan',
            '-reindex',
            '-zapwallettxes=1',
            '-zapwallettxes=2',
            # disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
            # '-salvagewallet',
        ]
        chainlimit = 6
        for m in maintenance:
            self.log.info("check " + m)
            self.stop_nodes()
            # set lower ancestor limit for later
            self.start_node(0, [m, "-limitancestorcount=" + str(chainlimit)])
            self.start_node(1, [m, "-limitancestorcount=" + str(chainlimit)])
            self.start_node(2, [m, "-limitancestorcount=" + str(chainlimit)])
            if m == '-reindex':
                # reindex will leave rpc warm up "early"; Wait for it to finish
                wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
            assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])

        # Exercise listsinceblock with the last two blocks
        coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
        assert_equal(coinbase_tx_1["lastblock"], blocks[1])
        assert_equal(len(coinbase_tx_1["transactions"]), 1)
        assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
        assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)

        # ==Check that wallet prefers to use coins that don't exceed mempool limits =====

        # Get all non-zero utxos together
        chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
        singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
        self.nodes[0].generate(1)
        node0_balance = self.nodes[0].getbalance()
        # Split into two chains
        rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], {chain_addrs[0]: node0_balance / 2 - Decimal('0.01'), chain_addrs[1]: node0_balance / 2 - Decimal('0.01')})
        signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
        singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
        self.nodes[0].generate(1)

        # Make a long chain of unconfirmed payments without hitting mempool limit
        # Each tx we make leaves only one output of change on a chain 1 longer
        # Since the amount to send is always much less than the outputs, we only ever need one output
        # So we should be able to generate exactly chainlimit txs for each original output
        sending_addr = self.nodes[1].getnewaddress()
        txid_list = []
        for i in range(chainlimit * 2):
            txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
        assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2)
        assert_equal(len(txid_list), chainlimit * 2)

        # Without walletrejectlongchains, we will still generate a txid
        # The tx will be stored in the wallet but not accepted to the mempool
        extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
        assert extra_txid not in self.nodes[0].getrawmempool()
        assert extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]
        self.nodes[0].abandontransaction(extra_txid)
        total_txs = len(self.nodes[0].listtransactions("*", 99999))

        # Try with walletrejectlongchains
        # Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
        self.stop_node(0)
        self.start_node(0, extra_args=["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)])

        # wait for loadmempool
        timeout = 10
        while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit * 2):
            time.sleep(0.5)
            timeout -= 0.5
        assert_equal(len(self.nodes[0].getrawmempool()), chainlimit * 2)

        node0_balance = self.nodes[0].getbalance()
        # With walletrejectlongchains we will not create the tx and store it in our wallet.
        assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))

        # Verify nothing new in wallet
        assert_equal(total_txs, len(self.nodes[0].listtransactions("*", 99999)))

        # Test getaddressinfo on external address. Note that these addresses are taken from disablewallet.py
        assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].getaddressinfo, "3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy")
        address_info = self.nodes[0].getaddressinfo("mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
        assert_equal(address_info['address'], "mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
        assert_equal(address_info["scriptPubKey"], "76a9144e3854046c7bd1594ac904e4793b6a45b36dea0988ac")
        assert not address_info["ismine"]
        assert not address_info["iswatchonly"]
        assert not address_info["isscript"]
        assert not address_info["ischange"]

        # Test getaddressinfo 'ischange' field on change address.
        self.nodes[0].generate(1)
        destination = self.nodes[1].getnewaddress()
        txid = self.nodes[0].sendtoaddress(destination, 0.123)
        tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
        output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]]
        assert len(output_addresses) > 1
        for address in output_addresses:
            ischange = self.nodes[0].getaddressinfo(address)['ischange']
            assert_equal(ischange, address != destination)
            if ischange:
                change = address
        self.nodes[0].setlabel(change, 'foobar')
        assert_equal(self.nodes[0].getaddressinfo(change)['ischange'], False)
예제 #56
0
 def wait_for_block_announcement(self, block_hash, timeout=60):
     test_function = lambda: self.last_blockhash_announced == block_hash
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
예제 #57
0
    def run_test(self):
        self.nodes[0].add_p2p_connection(P2PInterface())

        self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
        self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
        self.nodeaddress = self.nodes[0].getnewaddress()

        self.log.info("Test that a transaction with non-DER signature can still appear in a block")

        spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
                self.nodeaddress, amount=1.0)
        unDERify(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
        block.nVersion = 2
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 3")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
        block.nVersion = 2
        block.rehash()
        block.solve()
        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
        with mininode_lock:
            assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
            assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)')
            assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
            del self.nodes[0].p2p.last_message["reject"]

        self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
        block.nVersion = 3

        spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
                self.nodeaddress, amount=1.0)
        unDERify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # rejected from the mempool for exactly that reason.
        assert_equal(
            [{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)'}],
            self.nodes[0].testmempoolaccept(rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)
        )

        # Now we verify that a block with this transaction is also invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)

        wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
        with mininode_lock:
            # We can receive different reject messages depending on whether
            # sthcoind is running with multiple script check threads. If script
            # check threads are not in use, then transaction script validation
            # happens sequentially, and sthcoind produces more specific reject
            # reasons.
            assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
            assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
            if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
                # Generic rejection when a block is invalid
                assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
            else:
                assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason

        self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
        block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.nodes[0].p2p.send_and_ping(msg_block(block))
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
예제 #58
0
 def sync_with_ping(self, timeout=60):
     self.send_message(msg_ping(nonce=self.ping_counter))
     test_function = lambda: self.last_message.get(
         "pong") and self.last_message["pong"].nonce == self.ping_counter
     wait_until(test_function, timeout=timeout, lock=mininode_lock)
     self.ping_counter += 1
예제 #59
0
 def close(self, timeout=10):
     """Close the connections and network event loop."""
     self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
     wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
     self.network_event_loop.close()
     self.join(timeout)
예제 #60
0
    def run_test(self):
        """Main test logic"""

        # Create P2P connections will wait for a verack to make sure the connection is fully up
        self.nodes[0].add_p2p_connection(BaseNode())

        # Generating a block on one of the nodes will get us out of IBD
        blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
        self.sync_all(self.nodes[0:2])

        # Notice above how we called an RPC by calling a method with the same
        # name on the node object. Notice also how we used a keyword argument
        # to specify a named RPC argument. Neither of those are defined on the
        # node object. Instead there's some __getattr__() magic going on under
        # the covers to dispatch unrecognised attribute calls to the RPC
        # interface.

        # Logs are nice. Do plenty of them. They can be used in place of comments for
        # breaking the test into sub-sections.
        self.log.info("Starting test!")

        self.log.info("Calling a custom function")
        custom_function()

        self.log.info("Calling a custom method")
        self.custom_method()

        self.log.info("Create some blocks")
        self.tip = int(self.nodes[0].getbestblockhash(), 16)
        self.block_time = self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())['time'] + 1

        height = self.nodes[0].getblockcount()

        for i in range(10):
            # Use the mininode and blocktools functionality to manually build a block
            # Calling the generate() rpc is easier, but this allows us to exactly
            # control the blocks and transactions.
            block = create_block(self.tip, create_coinbase(height + 1),
                                 self.block_time)
            block.solve()
            block_message = msg_block(block)
            # Send message is used to send a P2P message to the node over our P2PInterface
            self.nodes[0].p2p.send_message(block_message)
            self.tip = block.sha256
            blocks.append(self.tip)
            self.block_time += 1
            height += 1

        self.log.info(
            "Wait for node1 to reach current tip (height 11) using RPC")
        self.nodes[1].waitforblockheight(11)

        self.log.info("Connect node2 and node1")
        connect_nodes(self.nodes[1], 2)

        self.log.info("Wait for node2 to receive all the blocks from node1")
        self.sync_all()

        self.log.info("Add P2P connection to node2")
        self.nodes[0].disconnect_p2ps()

        self.nodes[2].add_p2p_connection(BaseNode())

        self.log.info("Test that node2 propagates all the blocks to us")

        getdata_request = msg_getdata()
        for block in blocks:
            getdata_request.inv.append(CInv(2, block))
        self.nodes[2].p2p.send_message(getdata_request)

        # wait_until() will loop until a predicate condition is met. Use it to test properties of the
        # P2PInterface objects.
        wait_until(lambda: sorted(blocks) == sorted(
            list(self.nodes[2].p2p.block_receive_map.keys())),
                   timeout=5,
                   lock=mininode_lock)

        self.log.info("Check that each block was received only once")
        # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
        # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
        # and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
        with mininode_lock:
            for block in self.nodes[2].p2p.block_receive_map.values():
                assert_equal(block, 1)