Ejemplo n.º 1
0
    def run_test(self):
        gen_node = self.nodes[0]  # The block and tx generating node
        miniwallet = MiniWallet(gen_node)
        # Add enough mature utxos to the wallet, so that all txs spend confirmed coins
        miniwallet.generate(1)
        gen_node.generate(100)

        inbound_peer = self.nodes[0].add_p2p_connection(P2PNode())  # An "attacking" inbound peer

        MAX_REPEATS = 100
        self.log.info("Running test up to {} times.".format(MAX_REPEATS))
        for i in range(MAX_REPEATS):
            self.log.info('Run repeat {}'.format(i + 1))
            txid = miniwallet.send_self_transfer(from_node=gen_node)['wtxid']

            want_tx = msg_getdata()
            want_tx.inv.append(CInv(t=MSG_TX, h=int(txid, 16)))
            with p2p_lock:
                inbound_peer.last_message.pop('notfound', None)
            inbound_peer.send_and_ping(want_tx)

            if inbound_peer.last_message.get('notfound'):
                self.log.debug('tx {} was not yet announced to us.'.format(txid))
                self.log.debug("node has responded with a notfound message. End test.")
                assert_equal(inbound_peer.last_message['notfound'].vec[0].hash, int(txid, 16))
                with p2p_lock:
                    inbound_peer.last_message.pop('notfound')
                break
            else:
                self.log.debug('tx {} was already announced to us. Try test again.'.format(txid))
                assert int(txid, 16) in [inv.hash for inv in inbound_peer.last_message['inv'].inv]
Ejemplo n.º 2
0
    def run_test(self):
        wallet = MiniWallet(self.nodes[0])

        wallet.generate(200)
        chain_height = self.nodes[0].getblockcount()
        assert_equal(chain_height, 200)

        # Coinbase at height chain_height-100+1 ok in mempool, should
        # get mined. Coinbase at height chain_height-100+2 is
        # too immature to spend.
        b = [self.nodes[0].getblockhash(n) for n in range(101, 103)]
        coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
        utxo_101 = wallet.get_utxo(txid=coinbase_txids[0])
        utxo_102 = wallet.get_utxo(txid=coinbase_txids[1])

        spend_101_id = wallet.send_self_transfer(
            from_node=self.nodes[0], utxo_to_spend=utxo_101)["txid"]

        # coinbase at height 102 should be too immature to spend
        assert_raises_rpc_error(
            -26, "bad-txns-premature-spend-of-coinbase",
            lambda: wallet.send_self_transfer(from_node=self.nodes[0],
                                              utxo_to_spend=utxo_102))

        # mempool should have just spend_101:
        assert_equal(self.nodes[0].getrawmempool(), [spend_101_id])

        # mine a block, spend_101 should get confirmed
        self.nodes[0].generate(1)
        assert_equal(set(self.nodes[0].getrawmempool()), set())

        # ... and now height 102 can be spent:
        spend_102_id = wallet.send_self_transfer(
            from_node=self.nodes[0], utxo_to_spend=utxo_102)["txid"]
        assert_equal(self.nodes[0].getrawmempool(), [spend_102_id])
Ejemplo n.º 3
0
    def run_test(self):
        node = self.nodes[0]
        wallet = MiniWallet(node)

        # Add enough mature utxos to the wallet so that all txs spend confirmed coins
        wallet.generate(3)
        node.generate(100)

        # Spend block 1/2/3's coinbase transactions
        # Mine a block
        # Create three more transactions, spending the spends
        # Mine another block
        # ... make sure all the transactions are confirmed
        # Invalidate both blocks
        # ... make sure all the transactions are put back in the mempool
        # Mine a new block
        # ... make sure all the transactions are confirmed again
        blocks = []
        spends1_ids = [
            wallet.send_self_transfer(from_node=node)['txid'] for _ in range(3)
        ]
        blocks.extend(node.generate(1))
        spends2_ids = [
            wallet.send_self_transfer(from_node=node)['txid'] for _ in range(3)
        ]
        blocks.extend(node.generate(1))

        spends_ids = set(spends1_ids + spends2_ids)

        # mempool should be empty, all txns confirmed
        assert_equal(set(node.getrawmempool()), set())
        confirmed_txns = set(
            node.getblock(blocks[0])['tx'] + node.getblock(blocks[1])['tx'])
        # Checks that all spend txns are contained in the mined blocks
        assert (spends_ids < confirmed_txns)

        # Use invalidateblock to re-org back
        node.invalidateblock(blocks[0])

        # All txns should be back in mempool with 0 confirmations
        assert_equal(set(node.getrawmempool()), spends_ids)

        # Generate another block, they should all get mined
        node.generate(1)
        # mempool should be empty, all txns confirmed
        assert_equal(set(node.getrawmempool()), set())
        confirmed_txns = set(
            node.getblock(blocks[0])['tx'] + node.getblock(blocks[1])['tx'])
        assert (spends_ids < confirmed_txns)
Ejemplo n.º 4
0
    def run_test(self):
        self.log.info("Test that mempool.dat is compatible between versions")

        old_node, new_node = self.nodes
        new_wallet = MiniWallet(new_node)
        new_wallet.generate(1)
        new_node.generate(100)
        # Sync the nodes to ensure old_node has the block that contains the coinbase that new_wallet will spend.
        # Otherwise, because coinbases are only valid in a block and not as loose txns, if the nodes aren't synced
        # unbroadcasted_tx won't pass old_node's `MemPoolAccept::PreChecks`.
        self.connect_nodes(0, 1)
        self.sync_blocks()
        recipient = old_node.getnewaddress()
        self.stop_node(1)

        self.log.info("Add a transaction to mempool on old node and shutdown")
        old_tx_hash = old_node.sendtoaddress(recipient, 0.0001)
        assert old_tx_hash in old_node.getrawmempool()
        self.stop_node(0)

        self.log.info("Move mempool.dat from old to new node")
        old_node_mempool = os.path.join(old_node.datadir, self.chain,
                                        'mempool.dat')
        new_node_mempool = os.path.join(new_node.datadir, self.chain,
                                        'mempool.dat')
        os.rename(old_node_mempool, new_node_mempool)

        self.log.info("Start new node and verify mempool contains the tx")
        self.start_node(1)
        assert old_tx_hash in new_node.getrawmempool()

        self.log.info(
            "Add unbroadcasted tx to mempool on new node and shutdown")
        unbroadcasted_tx_hash = new_wallet.send_self_transfer(
            from_node=new_node)['txid']
        assert unbroadcasted_tx_hash in new_node.getrawmempool()
        mempool = new_node.getrawmempool(True)
        assert mempool[unbroadcasted_tx_hash]['unbroadcast']
        self.stop_node(1)

        self.log.info("Move mempool.dat from new to old node")
        os.rename(new_node_mempool, old_node_mempool)

        self.log.info(
            "Start old node again and verify mempool contains both txs")
        self.start_node(0, ['-nowallet'])
        assert old_tx_hash in old_node.getrawmempool()
        assert unbroadcasted_tx_hash in old_node.getrawmempool()
Ejemplo n.º 5
0
    def _test_getblock(self):
        node = self.nodes[0]

        miniwallet = MiniWallet(node)
        miniwallet.generate(5)
        node.generate(100)

        fee_per_byte = Decimal('0.00000010')
        fee_per_kb = 1000 * fee_per_byte

        miniwallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node)
        blockhash = node.generate(1)[0]

        self.log.info(
            "Test that getblock with verbosity 1 doesn't include fee")
        block = node.getblock(blockhash, 1)
        assert 'fee' not in block['tx'][1]

        self.log.info(
            'Test that getblock with verbosity 2 includes expected fee')
        block = node.getblock(blockhash, 2)
        tx = block['tx'][1]
        assert 'fee' in tx
        assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)

        self.log.info(
            "Test that getblock with verbosity 2 still works with pruned Undo data"
        )
        datadir = get_datadir_path(self.options.tmpdir, 0)

        def move_block_file(old, new):
            old_path = os.path.join(datadir, self.chain, 'blocks', old)
            new_path = os.path.join(datadir, self.chain, 'blocks', new)
            os.rename(old_path, new_path)

        # Move instead of deleting so we can restore chain state afterwards
        move_block_file('rev00000.dat', 'rev_wrong')

        block = node.getblock(blockhash, 2)
        assert 'fee' not in block['tx'][1]

        # Restore chain state
        move_block_file('rev_wrong', 'rev00000.dat')

        assert 'previousblockhash' not in node.getblock(node.getblockhash(0))
        assert 'nextblockhash' not in node.getblock(node.getbestblockhash())
    def test_muhash_implementation(self):
        self.log.info("Test MuHash implementation consistency")

        node = self.nodes[0]
        wallet = MiniWallet(node)
        mocktime = node.getblockheader(node.getblockhash(0))['time'] + 1
        node.setmocktime(mocktime)

        # Generate 100 blocks and remove the first since we plan to spend its
        # coinbase
        block_hashes = wallet.generate(1) + node.generate(99)
        blocks = list(
            map(lambda block: FromHex(CBlock(), node.getblock(block, False)),
                block_hashes))
        blocks.pop(0)

        # Create a spending transaction and mine a block which includes it
        txid = wallet.send_self_transfer(from_node=node)['txid']
        tx_block = node.generateblock(output=wallet.get_address(),
                                      transactions=[txid])
        blocks.append(FromHex(CBlock(), node.getblock(tx_block['hash'],
                                                      False)))

        # Serialize the outputs that should be in the UTXO set and add them to
        # a MuHash object
        muhash = MuHash3072()

        for height, block in enumerate(blocks):
            # The Genesis block coinbase is not part of the UTXO set and we
            # spent the first mined block
            height += 2

            for tx in block.vtx:
                for n, tx_out in enumerate(tx.vout):
                    coinbase = 1 if not tx.vin[0].prevout.hash else 0

                    # Skip witness commitment
                    if (coinbase and n > 0):
                        continue

                    data = COutPoint(int(tx.rehash(), 16), n).serialize()
                    data += struct.pack("<i", height * 2 + coinbase)
                    data += tx_out.serialize()

                    muhash.insert(data)

        finalized = muhash.digest()
        node_muhash = node.gettxoutsetinfo("muhash")['muhash']

        assert_equal(finalized[::-1].hex(), node_muhash)

        self.log.info("Test deterministic UTXO set hash results")
        assert_equal(
            node.gettxoutsetinfo()['hash_serialized_2'],
            "5b1b44097406226c0eb8e1362cd17a1f346522cf9390a8175a57a5262cb1963f")
        assert_equal(
            node.gettxoutsetinfo("muhash")['muhash'],
            "4b8803075d7151d06fad3e88b68ba726886794873fbfa841d12aefb2cc2b881b")
Ejemplo n.º 7
0
    def test_muhash_implementation(self):
        self.log.info("Test MuHash implementation consistency")

        node = self.nodes[0]
        wallet = MiniWallet(node)
        mocktime = node.getblockheader(node.getblockhash(0))['time'] + 1
        node.setmocktime(mocktime)

        # Generate 100 blocks and remove the first since we plan to spend its
        # coinbase
        block_hashes = wallet.generate(1) + node.generate(99)
        blocks = list(
            map(lambda block: from_hex(CBlock(), node.getblock(block, False)),
                block_hashes))
        blocks.pop(0)

        # Create a spending transaction and mine a block which includes it
        txid = wallet.send_self_transfer(from_node=node)['txid']
        tx_block = node.generateblock(output=wallet.get_address(),
                                      transactions=[txid])
        blocks.append(
            from_hex(CBlock(), node.getblock(tx_block['hash'], False)))

        # Serialize the outputs that should be in the UTXO set and add them to
        # a MuHash object
        muhash = MuHash3072()

        for height, block in enumerate(blocks):
            # The Genesis block coinbase is not part of the UTXO set and we
            # spent the first mined block
            height += 2

            for tx in block.vtx:
                for n, tx_out in enumerate(tx.vout):
                    coinbase = 1 if not tx.vin[0].prevout.hash else 0

                    # Skip witness commitment
                    if (coinbase and n > 0):
                        continue

                    data = COutPoint(int(tx.rehash(), 16), n).serialize()
                    data += struct.pack("<i", height * 2 + coinbase)
                    data += tx_out.serialize()

                    muhash.insert(data)

        finalized = muhash.digest()
        node_muhash = node.gettxoutsetinfo("muhash")['muhash']

        assert_equal(finalized[::-1].hex(), node_muhash)

        self.log.info("Test deterministic UTXO set hash results")
        assert_equal(
            node.gettxoutsetinfo()['hash_serialized_2'],
            "03f3bedef7a3e64686e13b57ec08b1ada40528d8e01f64e077750e225ddb8c07")
        assert_equal(
            node.gettxoutsetinfo("muhash")['muhash'],
            "69ebd7142d443a89c227637ef9a21c05287a98f0acdd40ba7e3ef79d1f4e412d")
Ejemplo n.º 8
0
    def test_feefilter(self):
        node1 = self.nodes[1]
        node0 = self.nodes[0]
        miniwallet = MiniWallet(node1)
        # Add enough mature utxos to the wallet, so that all txs spend confirmed coins
        miniwallet.generate(5)
        node1.generate(100)

        conn = self.nodes[0].add_p2p_connection(TestP2PConn())

        self.log.info("Test txs paying 0.2 sat/byte are received by test connection")
        txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000200'), from_node=node1)['wtxid'] for _ in range(3)]
        conn.wait_for_invs_to_match(txids)
        conn.clear_invs()

        # Set a fee filter of 0.15 sat/byte on test connection
        conn.send_and_ping(msg_feefilter(150))

        self.log.info("Test txs paying 0.15 sat/byte are received by test connection")
        txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000150'), from_node=node1)['wtxid'] for _ in range(3)]
        conn.wait_for_invs_to_match(txids)
        conn.clear_invs()

        self.log.info("Test txs paying 0.1 sat/byte are no longer received by test connection")
        txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00000100'), from_node=node1)['wtxid'] for _ in range(3)]
        self.sync_mempools()  # must be sure node 0 has received all txs

        # Send one transaction from node0 that should be received, so that we
        # we can sync the test on receipt (if node1's txs were relayed, they'd
        # be received by the time this node0 tx is received). This is
        # unfortunately reliant on the current relay behavior where we batch up
        # to 35 entries in an inv, which means that when this next transaction
        # is eligible for relay, the prior transactions from node1 are eligible
        # as well.
        txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00020000'), from_node=node0)['wtxid'] for _ in range(1)]
        conn.wait_for_invs_to_match(txids)
        conn.clear_invs()
        self.sync_mempools()  # must be sure node 1 has received all txs

        self.log.info("Remove fee filter and check txs are received again")
        conn.send_and_ping(msg_feefilter(0))
        txids = [miniwallet.send_self_transfer(fee_rate=Decimal('0.00020000'), from_node=node1)['wtxid'] for _ in range(3)]
        conn.wait_for_invs_to_match(txids)
        conn.clear_invs()
Ejemplo n.º 9
0
class BIP68_112_113Test(FujicoinTestFramework):
    def set_test_params(self):
        self.num_nodes = 1
        self.setup_clean_chain = True
        self.extra_args = [[
            '[email protected]',
            '-par=1',  # Use only one script thread to get the exact reject reason for testing
        ]]
        self.supports_cli = False

    def create_self_transfer_from_utxo(self, input_tx):
        utxo = self.miniwallet.get_utxo(txid=input_tx.rehash(), mark_as_spent=False)
        tx = self.miniwallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo)['tx']
        return tx

    def create_bip112special(self, input, txversion):
        tx = self.create_self_transfer_from_utxo(input)
        tx.nVersion = txversion
        self.miniwallet.sign_tx(tx)
        tx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig)))
        return tx

    def create_bip112emptystack(self, input, txversion):
        tx = self.create_self_transfer_from_utxo(input)
        tx.nVersion = txversion
        self.miniwallet.sign_tx(tx)
        tx.vin[0].scriptSig = CScript([OP_CHECKSEQUENCEVERIFY] + list(CScript(tx.vin[0].scriptSig)))
        return tx

    def send_generic_input_tx(self, coinbases):
        input_txid = self.nodes[0].getblock(coinbases.pop(), 2)['tx'][0]['txid']
        utxo_to_spend = self.miniwallet.get_utxo(txid=input_txid)
        return self.miniwallet.send_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_to_spend)['tx']

    def create_bip68txs(self, bip68inputs, txversion, locktime_delta=0):
        """Returns a list of bip68 transactions with different bits set."""
        txs = []
        assert len(bip68inputs) >= 16
        for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
            locktime = relative_locktime(sdf, srhb, stf, srlb)
            tx = self.create_self_transfer_from_utxo(bip68inputs[i])
            tx.nVersion = txversion
            tx.vin[0].nSequence = locktime + locktime_delta
            self.miniwallet.sign_tx(tx)
            tx.rehash()
            txs.append({'tx': tx, 'sdf': sdf, 'stf': stf})

        return txs

    def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta=0):
        """Returns a list of bip68 transactions with different bits set."""
        txs = []
        assert len(bip112inputs) >= 16
        for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
            locktime = relative_locktime(sdf, srhb, stf, srlb)
            tx = self.create_self_transfer_from_utxo(bip112inputs[i])
            if (varyOP_CSV):  # if varying OP_CSV, nSequence is fixed
                tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta
            else:  # vary nSequence instead, OP_CSV is fixed
                tx.vin[0].nSequence = locktime + locktime_delta
            tx.nVersion = txversion
            self.miniwallet.sign_tx(tx)
            if (varyOP_CSV):
                tx.vin[0].scriptSig = CScript([locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig)))
            else:
                tx.vin[0].scriptSig = CScript([BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig)))
            tx.rehash()
            txs.append({'tx': tx, 'sdf': sdf, 'stf': stf})
        return txs

    def generate_blocks(self, number):
        test_blocks = []
        for _ in range(number):
            block = self.create_test_block([])
            test_blocks.append(block)
            self.last_block_time += 600
            self.tip = block.sha256
            self.tipheight += 1
        return test_blocks

    def create_test_block(self, txs):
        block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
        block.nVersion = 4
        block.vtx.extend(txs)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()
        return block

    def send_blocks(self, blocks, success=True, reject_reason=None):
        """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.

        Call with success = False if the tip shouldn't advance to the most recent block."""
        self.helper_peer.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason)

    def run_test(self):
        self.helper_peer = self.nodes[0].add_p2p_connection(P2PDataStore())
        self.miniwallet = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK)

        self.log.info("Generate blocks in the past for coinbase outputs.")
        long_past_time = int(time.time()) - 600 * 1000  # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
        self.nodes[0].setmocktime(long_past_time - 100)  # enough so that the generated blocks will still all be before long_past_time
        self.coinbase_blocks = self.miniwallet.generate(COINBASE_BLOCK_COUNT)  # blocks generated for inputs
        self.nodes[0].setmocktime(0)  # set time back to present so yielded blocks aren't in the future as we advance last_block_time
        self.tipheight = COINBASE_BLOCK_COUNT  # height of the next block to build
        self.last_block_time = long_past_time
        self.tip = int(self.nodes[0].getbestblockhash(), 16)

        # Activation height is hardcoded
        # We advance to block height five below BIP112 activation for the following tests
        test_blocks = self.generate_blocks(CSV_ACTIVATION_HEIGHT-5 - COINBASE_BLOCK_COUNT)
        self.send_blocks(test_blocks)
        assert not softfork_active(self.nodes[0], 'csv')

        # Inputs at height = 431
        #
        # Put inputs for all tests in the chain at height 431 (tip now = 430) (time increases by 600s per block)
        # Note we reuse inputs for v1 and v2 txs so must test these separately
        # 16 normal inputs
        bip68inputs = []
        for _ in range(16):
            bip68inputs.append(self.send_generic_input_tx(self.coinbase_blocks))

        # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112basicinputs = []
        for _ in range(2):
            inputs = []
            for _ in range(16):
                inputs.append(self.send_generic_input_tx(self.coinbase_blocks))
            bip112basicinputs.append(inputs)

        # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112diverseinputs = []
        for _ in range(2):
            inputs = []
            for _ in range(16):
                inputs.append(self.send_generic_input_tx(self.coinbase_blocks))
            bip112diverseinputs.append(inputs)

        # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
        bip112specialinput = self.send_generic_input_tx(self.coinbase_blocks)
        # 1 special input with (empty stack) OP_CSV (actually will be prepended to spending scriptSig)
        bip112emptystackinput = self.send_generic_input_tx(self.coinbase_blocks)

        # 1 normal input
        bip113input = self.send_generic_input_tx(self.coinbase_blocks)

        self.nodes[0].setmocktime(self.last_block_time + 600)
        inputblockhash = self.nodes[0].generate(1)[0]  # 1 block generated for inputs to be in chain at height 431
        self.nodes[0].setmocktime(0)
        self.tip = int(inputblockhash, 16)
        self.tipheight += 1
        self.last_block_time += 600
        assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), TESTING_TX_COUNT + 1)

        # 2 more version 4 blocks
        test_blocks = self.generate_blocks(2)
        self.send_blocks(test_blocks)

        assert_equal(self.tipheight, CSV_ACTIVATION_HEIGHT - 2)
        self.log.info("Height = {}, CSV not yet active (will activate for block {}, not {})".format(self.tipheight, CSV_ACTIVATION_HEIGHT, CSV_ACTIVATION_HEIGHT - 1))
        assert not softfork_active(self.nodes[0], 'csv')

        # Test both version 1 and version 2 transactions for all tests
        # BIP113 test transaction will be modified before each use to put in appropriate block time
        bip113tx_v1 = self.create_self_transfer_from_utxo(bip113input)
        bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
        bip113tx_v1.nVersion = 1
        bip113tx_v2 = self.create_self_transfer_from_utxo(bip113input)
        bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
        bip113tx_v2.nVersion = 2

        # For BIP68 test all 16 relative sequence locktimes
        bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
        bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)

        # For BIP112 test:
        # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
        bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
        bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
        # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
        bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
        bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
        # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
        bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
        bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
        # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
        bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
        bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
        # -1 OP_CSV OP_DROP input
        bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
        bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
        # (empty stack) OP_CSV input
        bip112tx_emptystack_v1 = self.create_bip112emptystack(bip112emptystackinput, 1)
        bip112tx_emptystack_v2 = self.create_bip112emptystack(bip112emptystackinput, 2)

        self.log.info("TESTING")

        self.log.info("Pre-Soft Fork Tests. All txs should pass.")
        self.log.info("Test version 1 txs")

        success_txs = []
        # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        self.miniwallet.sign_tx(bip113tx_v1)
        success_txs.append(bip113tx_v1)
        success_txs.append(bip112tx_special_v1)
        success_txs.append(bip112tx_emptystack_v1)
        # add BIP 68 txs
        success_txs.extend(all_rlt_txs(bip68txs_v1))
        # add BIP 112 with seq=10 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
        # try BIP 112 with seq=9 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("Test version 2 txs")

        success_txs = []
        # BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        self.miniwallet.sign_tx(bip113tx_v2)
        success_txs.append(bip113tx_v2)
        success_txs.append(bip112tx_special_v2)
        success_txs.append(bip112tx_emptystack_v2)
        # add BIP 68 txs
        success_txs.extend(all_rlt_txs(bip68txs_v2))
        # add BIP 112 with seq=10 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
        # try BIP 112 with seq=9 txs
        success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
        success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # 1 more version 4 block to get us to height 432 so the fork should now be active for the next block
        assert not softfork_active(self.nodes[0], 'csv')
        test_blocks = self.generate_blocks(1)
        self.send_blocks(test_blocks)
        assert softfork_active(self.nodes[0], 'csv')

        self.log.info("Post-Soft Fork Tests.")

        self.log.info("BIP 113 tests")
        # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        self.miniwallet.sign_tx(bip113tx_v1)
        bip113tx_v1.rehash()
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5  # = MTP of prior block (not <) but < time put on current block
        self.miniwallet.sign_tx(bip113tx_v2)
        bip113tx_v2.rehash()
        for bip113tx in [bip113tx_v1, bip113tx_v2]:
            self.send_blocks([self.create_test_block([bip113tx])], success=False, reject_reason='bad-txns-nonfinal')

        # BIP 113 tests should now pass if the locktime is < MTP
        bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1  # < MTP of prior block
        self.miniwallet.sign_tx(bip113tx_v1)
        bip113tx_v1.rehash()
        bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1  # < MTP of prior block
        self.miniwallet.sign_tx(bip113tx_v2)
        bip113tx_v2.rehash()
        for bip113tx in [bip113tx_v1, bip113tx_v2]:
            self.send_blocks([self.create_test_block([bip113tx])])
            self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # Next block height = 437 after 4 blocks of random version
        test_blocks = self.generate_blocks(4)
        self.send_blocks(test_blocks)

        self.log.info("BIP 68 tests")
        self.log.info("Test version 1 txs - all should still pass")

        success_txs = []
        success_txs.extend(all_rlt_txs(bip68txs_v1))
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("Test version 2 txs")

        # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
        bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']]
        self.send_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
        bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
        for tx in bip68timetxs:
            self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')

        bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
        for tx in bip68heighttxs:
            self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')

        # Advance one block to 438
        test_blocks = self.generate_blocks(1)
        self.send_blocks(test_blocks)

        # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
        bip68success_txs.extend(bip68timetxs)
        self.send_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        for tx in bip68heighttxs:
            self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')

        # Advance one block to 439
        test_blocks = self.generate_blocks(1)
        self.send_blocks(test_blocks)

        # All BIP 68 txs should pass
        bip68success_txs.extend(bip68heighttxs)
        self.send_blocks([self.create_test_block(bip68success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        self.log.info("BIP 112 tests")
        self.log.info("Test version 1 txs")

        # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
        self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False,
                         reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
        self.send_blocks([self.create_test_block([bip112tx_emptystack_v1])], success=False,
                         reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass

        success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']]
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
        fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
        fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if not tx['sdf']]
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
        for tx in fail_txs:
            self.send_blocks([self.create_test_block([tx])], success=False,
                             reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')

        self.log.info("Test version 2 txs")

        # -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
        self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False,
                         reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
        self.send_blocks([self.create_test_block([bip112tx_emptystack_v2])], success=False,
                         reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
        success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']]

        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##

        # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
        fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
        for tx in fail_txs:
            self.send_blocks([self.create_test_block([tx])], success=False,
                             reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')

        # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
        fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
        for tx in fail_txs:
            self.send_blocks([self.create_test_block([tx])], success=False,
                             reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')

        # If sequencelock types mismatch, tx should fail
        fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
        fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
        for tx in fail_txs:
            self.send_blocks([self.create_test_block([tx])], success=False,
                             reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')

        # Remaining txs should pass, just test masking works properly
        success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
        success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']]
        self.send_blocks([self.create_test_block(success_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())

        # Additional test, of checking that comparison of two time types works properly
        time_txs = []
        for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
            tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG
            self.miniwallet.sign_tx(tx)
            tx.rehash()
            time_txs.append(tx)

        self.send_blocks([self.create_test_block(time_txs)])
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
Ejemplo n.º 10
0
    def run_test(self):
        miniwallet = MiniWallet(self.nodes[0])
        # Add enough mature utxos to the wallet, so that all txs spend confirmed coins
        miniwallet.generate(5)
        self.nodes[0].generate(100)
        self.sync_all()

        chain_height = self.nodes[1].getblockcount()
        assert_equal(chain_height, 105)

        txid1 = miniwallet.send_self_transfer(from_node=self.nodes[0])['txid']
        txid2 = miniwallet.send_self_transfer(from_node=self.nodes[0])['txid']
        # This will raise an exception because the transaction is not yet in a block
        assert_raises_rpc_error(-5, "Transaction not yet in block",
                                self.nodes[0].gettxoutproof, [txid1])

        self.nodes[0].generate(1)
        blockhash = self.nodes[0].getblockhash(chain_height + 1)
        self.sync_all()

        txlist = []
        blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
        txlist.append(blocktxn[1])
        txlist.append(blocktxn[2])

        assert_equal(
            self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1
                                                                        ])),
            [txid1])
        assert_equal(
            self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof(
                [txid1, txid2])), txlist)
        assert_equal(
            self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof(
                [txid1, txid2], blockhash)), txlist)

        txin_spent = miniwallet.get_utxo()  # Get the change from txid2
        tx3 = miniwallet.send_self_transfer(from_node=self.nodes[0],
                                            utxo_to_spend=txin_spent)
        txid3 = tx3['txid']
        self.nodes[0].generate(1)
        self.sync_all()

        txid_spent = txin_spent["txid"]
        txid_unspent = txid1  # Input was change from txid2, so txid1 should be unspent

        # Invalid txids
        assert_raises_rpc_error(
            -8,
            "txid must be of length 64 (not 32, for '00000000000000000000000000000000')",
            self.nodes[0].gettxoutproof, ["00000000000000000000000000000000"],
            blockhash)
        assert_raises_rpc_error(
            -8,
            "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')",
            self.nodes[0].gettxoutproof, [
                "ZZZ0000000000000000000000000000000000000000000000000000000000000"
            ], blockhash)
        # Invalid blockhashes
        assert_raises_rpc_error(
            -8,
            "blockhash must be of length 64 (not 32, for '00000000000000000000000000000000')",
            self.nodes[0].gettxoutproof, [txid_spent],
            "00000000000000000000000000000000")
        assert_raises_rpc_error(
            -8,
            "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')",
            self.nodes[0].gettxoutproof, [txid_spent],
            "ZZZ0000000000000000000000000000000000000000000000000000000000000")
        # We can't find the block from a fully-spent tx
        assert_raises_rpc_error(-5, "Transaction not yet in block",
                                self.nodes[0].gettxoutproof, [txid_spent])
        # We can get the proof if we specify the block
        assert_equal(
            self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof(
                [txid_spent], blockhash)), [txid_spent])
        # We can't get the proof if we specify a non-existent block
        assert_raises_rpc_error(
            -5, "Block not found", self.nodes[0].gettxoutproof, [txid_spent],
            "0000000000000000000000000000000000000000000000000000000000000000")
        # We can get the proof if the transaction is unspent
        assert_equal(
            self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof(
                [txid_unspent])), [txid_unspent])
        # We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
        assert_equal(
            sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof(
                [txid1, txid2]))), sorted(txlist))
        assert_equal(
            sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof(
                [txid2, txid1]))), sorted(txlist))
        # We can always get a proof if we have a -txindex
        assert_equal(
            self.nodes[0].verifytxoutproof(self.nodes[1].gettxoutproof(
                [txid_spent])), [txid_spent])
        # We can't get a proof if we specify transactions from different blocks
        assert_raises_rpc_error(
            -5, "Not all transactions found in specified or retrieved block",
            self.nodes[0].gettxoutproof, [txid1, txid3])
        # Test empty list
        assert_raises_rpc_error(-8, "Parameter 'txids' cannot be empty",
                                self.nodes[0].gettxoutproof, [])
        # Test duplicate txid
        assert_raises_rpc_error(-8, 'Invalid parameter, duplicated txid',
                                self.nodes[0].gettxoutproof, [txid1, txid1])

        # Now we'll try tweaking a proof.
        proof = self.nodes[1].gettxoutproof([txid1, txid2])
        assert txid1 in self.nodes[0].verifytxoutproof(proof)
        assert txid2 in self.nodes[1].verifytxoutproof(proof)

        tweaked_proof = FromHex(CMerkleBlock(), proof)

        # Make sure that our serialization/deserialization is working
        assert txid1 in self.nodes[0].verifytxoutproof(ToHex(tweaked_proof))

        # Check to see if we can go up the merkle tree and pass this off as a
        # single-transaction block
        tweaked_proof.txn.nTransactions = 1
        tweaked_proof.txn.vHash = [tweaked_proof.header.hashMerkleRoot]
        tweaked_proof.txn.vBits = [True] + [False] * 7

        for n in self.nodes:
            assert not n.verifytxoutproof(ToHex(tweaked_proof))
Ejemplo n.º 11
0
    def run_test(self):
        peer = self.nodes[0].add_p2p_connection(P2PInterface())
        wallet = MiniWallet(self.nodes[0], raw_script=True)

        self.test_cltv_info(is_active=False)

        self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
        wallet.generate(10)
        self.nodes[0].generate(CLTV_HEIGHT - 2 - 10)

        self.log.info("Test that invalid-according-to-CLTV transactions can still appear in a block")

        # create one invalid tx per CLTV failure reason (5 in total) and collect them
        invalid_ctlv_txs = []
        for i in range(5):
            spendtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx']
            spendtx = cltv_invalidate(self.nodes[0], spendtx, i)
            invalid_ctlv_txs.append(spendtx)

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time)
        block.nVersion = 3
        block.vtx.extend(invalid_ctlv_txs)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        self.test_cltv_info(is_active=False)  # Not active as of current tip and next block does not need to obey rules
        peer.send_and_ping(msg_block(block))
        self.test_cltv_info(is_active=True)  # Not active as of current tip, but next block must obey rules
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 4")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
        block.nVersion = 3
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]):
            peer.send_and_ping(msg_block(block))
            assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
            peer.sync_with_ping()

        self.log.info("Test that invalid-according-to-CLTV transactions cannot appear in a block")
        block.nVersion = 4
        block.vtx.append(CTransaction()) # dummy tx after coinbase that will be replaced later

        # create and test one invalid tx per CLTV failure reason (5 in total)
        for i in range(5):
            spendtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx']
            spendtx = cltv_invalidate(self.nodes[0], spendtx, i)

            expected_cltv_reject_reason = [
                "non-mandatory-script-verify-flag (Operation not valid with the current stack size)",
                "non-mandatory-script-verify-flag (Negative locktime)",
                "non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
                "non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
                "non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
            ][i]
            # First we show that this tx is valid except for CLTV by getting it
            # rejected from the mempool for exactly that reason.
            assert_equal(
                [{
                    'txid': spendtx.hash,
                    'wtxid': spendtx.getwtxid(),
                    'allowed': False,
                    'reject-reason': expected_cltv_reject_reason,
                }],
                self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
            )

            # Now we verify that a block with this transaction is also invalid.
            block.vtx[1] = spendtx
            block.hashMerkleRoot = block.calc_merkle_root()
            block.solve()

            with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with {}'.format(
                                                block.vtx[-1].hash, expected_cltv_reject_reason)]):
                peer.send_and_ping(msg_block(block))
                assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
                peer.sync_with_ping()

        self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
        spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)

        block.vtx.pop(1)
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.solve()

        self.test_cltv_info(is_active=True)  # Not active as of current tip, but next block must obey rules
        peer.send_and_ping(msg_block(block))
        self.test_cltv_info(is_active=True)  # Active as of current tip
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
Ejemplo n.º 12
0
class P2PBlocksOnly(MicroBitcoinTestFramework):
    def set_test_params(self):
        self.setup_clean_chain = True
        self.num_nodes = 1
        self.extra_args = [["-blocksonly"]]

    def run_test(self):
        self.miniwallet = MiniWallet(self.nodes[0])
        # Add enough mature utxos to the wallet, so that all txs spend confirmed coins
        self.miniwallet.generate(2)
        self.nodes[0].generate(COINBASE_MATURITY)

        self.blocksonly_mode_tests()
        self.blocks_relay_conn_tests()

    def blocksonly_mode_tests(self):
        self.log.info("Tests with node running in -blocksonly mode")
        assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False)

        self.nodes[0].add_p2p_connection(P2PInterface())
        tx, txid, wtxid, tx_hex = self.check_p2p_tx_violation()

        self.log.info(
            'Check that txs from rpc are not rejected and relayed to other peers'
        )
        tx_relay_peer = self.nodes[0].add_p2p_connection(P2PInterface())
        assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True)

        assert_equal(self.nodes[0].testmempoolaccept([tx_hex])[0]['allowed'],
                     True)
        with self.nodes[0].assert_debug_log(
            ['received getdata for: wtx {} peer=1'.format(wtxid)]):
            self.nodes[0].sendrawtransaction(tx_hex)
            tx_relay_peer.wait_for_tx(txid)
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)

        self.log.info("Restarting node 0 with relay permission and blocksonly")
        self.restart_node(
            0,
            ["-persistmempool=0", "[email protected]", "-blocksonly"])
        assert_equal(self.nodes[0].getrawmempool(), [])
        first_peer = self.nodes[0].add_p2p_connection(P2PInterface())
        second_peer = self.nodes[0].add_p2p_connection(P2PInterface())
        peer_1_info = self.nodes[0].getpeerinfo()[0]
        assert_equal(peer_1_info['permissions'], ['relay'])
        peer_2_info = self.nodes[0].getpeerinfo()[1]
        assert_equal(peer_2_info['permissions'], ['relay'])
        assert_equal(self.nodes[0].testmempoolaccept([tx_hex])[0]['allowed'],
                     True)

        self.log.info(
            'Check that the tx from first_peer with relay-permission is relayed to others (ie.second_peer)'
        )
        with self.nodes[0].assert_debug_log(["received getdata"]):
            # Note that normally, first_peer would never send us transactions since we're a blocksonly node.
            # By activating blocksonly, we explicitly tell our peers that they should not send us transactions,
            # and MicroBitcoin Core respects that choice and will not send transactions.
            # But if, for some reason, first_peer decides to relay transactions to us anyway, we should relay them to
            # second_peer since we gave relay permission to first_peer.
            # See https://github.com/MicroBitcoinOrg/MicroBitcoin/issues/19943 for details.
            first_peer.send_message(msg_tx(tx))
            self.log.info(
                'Check that the peer with relay-permission is still connected after sending the transaction'
            )
            assert_equal(first_peer.is_connected, True)
            second_peer.wait_for_tx(txid)
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
        self.log.info(
            "Relay-permission peer's transaction is accepted and relayed")

        self.nodes[0].disconnect_p2ps()
        self.nodes[0].generate(1)

    def blocks_relay_conn_tests(self):
        self.log.info(
            'Tests with node in normal mode with block-relay-only connections')
        self.restart_node(0, ["-noblocksonly"])  # disables blocks only mode
        assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], True)

        # Ensure we disconnect if a block-relay-only connection sends us a transaction
        self.nodes[0].add_outbound_p2p_connection(
            P2PInterface(), p2p_idx=0, connection_type="block-relay-only")
        assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], False)
        _, txid, _, tx_hex = self.check_p2p_tx_violation(index=2)

        self.log.info(
            "Check that txs from RPC are not sent to blockrelay connection")
        conn = self.nodes[0].add_outbound_p2p_connection(
            P2PTxInvStore(), p2p_idx=1, connection_type="block-relay-only")

        self.nodes[0].sendrawtransaction(tx_hex)

        # Bump time forward to ensure nNextInvSend timer pops
        self.nodes[0].setmocktime(int(time.time()) + 60)

        conn.sync_send_with_ping()
        assert (int(txid, 16) not in conn.get_invs())

    def check_p2p_tx_violation(self, index=1):
        self.log.info(
            'Check that txs from P2P are rejected and result in disconnect')
        input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(index),
                                            2)['tx'][0]['txid']
        utxo_to_spend = self.miniwallet.get_utxo(txid=input_txid)
        spendtx = self.miniwallet.create_self_transfer(
            from_node=self.nodes[0], utxo_to_spend=utxo_to_spend)

        with self.nodes[0].assert_debug_log(
            ['transaction sent in violation of protocol peer=0']):
            self.nodes[0].p2ps[0].send_message(msg_tx(spendtx['tx']))
            self.nodes[0].p2ps[0].wait_for_disconnect()
            assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)

        # Remove the disconnected peer
        del self.nodes[0].p2ps[0]

        return spendtx['tx'], spendtx['txid'], spendtx['wtxid'], spendtx['hex']
Ejemplo n.º 13
0
class BIP66Test(BitcoinTestFramework):
    def set_test_params(self):
        self.num_nodes = 1
        self.extra_args = [[
            '[email protected]',
            '-par=1',  # Use only one script thread to get the exact log msg for testing
        ]]
        self.setup_clean_chain = True
        self.rpc_timeout = 240

    def create_tx(self, input_txid):
        utxo_to_spend = self.miniwallet.get_utxo(txid=input_txid,
                                                 mark_as_spent=False)
        return self.miniwallet.create_self_transfer(
            from_node=self.nodes[0], utxo_to_spend=utxo_to_spend)['tx']

    def test_dersig_info(self, *, is_active):
        assert_equal(
            self.nodes[0].getblockchaininfo()['softforks']['bip66'],
            {
                "active": is_active,
                "height": DERSIG_HEIGHT,
                "type": "buried",
            },
        )

    def run_test(self):
        peer = self.nodes[0].add_p2p_connection(P2PInterface())
        self.miniwallet = MiniWallet(self.nodes[0],
                                     mode=MiniWalletMode.RAW_P2PK)

        self.test_dersig_info(is_active=False)

        self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
        self.coinbase_txids = [
            self.nodes[0].getblock(b)['tx'][0]
            for b in self.miniwallet.generate(DERSIG_HEIGHT - 2)
        ]

        self.log.info(
            "Test that a transaction with non-DER signature can still appear in a block"
        )

        spendtx = self.create_tx(self.coinbase_txids[0])
        unDERify(spendtx)
        spendtx.rehash()

        tip = self.nodes[0].getbestblockhash()
        block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
        block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1),
                             block_time)
        block.nVersion = 2
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.test_dersig_info(
            is_active=False
        )  # Not active as of current tip and next block does not need to obey rules
        peer.send_and_ping(msg_block(block))
        self.test_dersig_info(
            is_active=True
        )  # Not active as of current tip, but next block must obey rules
        assert_equal(self.nodes[0].getbestblockhash(), block.hash)

        self.log.info("Test that blocks must now be at least version 3")
        tip = block.sha256
        block_time += 1
        block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
        block.nVersion = 2
        block.rehash()
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=[
                '{}, bad-version(0x00000002)'.format(block.hash)
        ]):
            peer.send_and_ping(msg_block(block))
            assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
            peer.sync_with_ping()

        self.log.info(
            "Test that transactions with non-DER signatures cannot appear in a block"
        )
        block.nVersion = 3

        spendtx = self.create_tx(self.coinbase_txids[1])
        unDERify(spendtx)
        spendtx.rehash()

        # First we show that this tx is valid except for DERSIG by getting it
        # rejected from the mempool for exactly that reason.
        assert_equal(
            [{
                'txid':
                spendtx.hash,
                'wtxid':
                spendtx.getwtxid(),
                'allowed':
                False,
                'reject-reason':
                'non-mandatory-script-verify-flag (Non-canonical DER signature)',
            }],
            self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()],
                                            maxfeerate=0),
        )

        # Now we verify that a block with this transaction is also invalid.
        block.vtx.append(spendtx)
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        with self.nodes[0].assert_debug_log(expected_msgs=[
                'CheckInputScripts on {} failed with non-mandatory-script-verify-flag (Non-canonical DER signature)'
                .format(block.vtx[-1].hash)
        ]):
            peer.send_and_ping(msg_block(block))
            assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
            peer.sync_with_ping()

        self.log.info(
            "Test that a version 3 block with a DERSIG-compliant transaction is accepted"
        )
        block.vtx[1] = self.create_tx(self.coinbase_txids[1])
        block.hashMerkleRoot = block.calc_merkle_root()
        block.rehash()
        block.solve()

        self.test_dersig_info(
            is_active=True
        )  # Not active as of current tip, but next block must obey rules
        peer.send_and_ping(msg_block(block))
        self.test_dersig_info(is_active=True)  # Active as of current tip
        assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
Ejemplo n.º 14
0
class NetTest(MicroBitcoinTestFramework):
    def set_test_params(self):
        self.setup_clean_chain = True
        self.num_nodes = 2
        self.extra_args = [["-minrelaytxfee=0.00001000"],
                           ["-minrelaytxfee=0.00000500"]]
        self.supports_cli = False

    def run_test(self):
        # We need miniwallet to make a transaction
        self.wallet = MiniWallet(self.nodes[0])
        self.wallet.generate(1)
        # Get out of IBD for the minfeefilter and getpeerinfo tests.
        self.nodes[0].generate(COINBASE_MATURITY + 1)

        # By default, the test framework sets up an addnode connection from
        # node 1 --> node0. By connecting node0 --> node 1, we're left with
        # the two nodes being connected both ways.
        # Topology will look like: node0 <--> node1
        self.connect_nodes(0, 1)
        self.sync_all()

        self.test_connection_count()
        self.test_getpeerinfo()
        self.test_getnettotals()
        self.test_getnetworkinfo()
        self.test_getaddednodeinfo()
        self.test_service_flags()
        self.test_getnodeaddresses()
        self.test_addpeeraddress()

    def test_connection_count(self):
        self.log.info("Test getconnectioncount")
        # After using `connect_nodes` to connect nodes 0 and 1 to each other.
        assert_equal(self.nodes[0].getconnectioncount(), 2)

    def test_getpeerinfo(self):
        self.log.info("Test getpeerinfo")
        # Create a few getpeerinfo last_block/last_transaction values.
        self.wallet.send_self_transfer(
            from_node=self.nodes[0]
        )  # Make a transaction so we can see it in the getpeerinfo results
        self.nodes[1].generate(1)
        self.sync_all()
        time_now = int(time.time())
        peer_info = [x.getpeerinfo() for x in self.nodes]
        # Verify last_block and last_transaction keys/values.
        for node, peer, field in product(range(self.num_nodes), range(2),
                                         ['last_block', 'last_transaction']):
            assert field in peer_info[node][peer].keys()
            if peer_info[node][peer][field] != 0:
                assert_approx(peer_info[node][peer][field], time_now, vspan=60)
        # check both sides of bidirectional connection between nodes
        # the address bound to on one side will be the source address for the other node
        assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
        assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
        assert_equal(peer_info[0][0]['minfeefilter'], Decimal("0.00000500"))
        assert_equal(peer_info[1][0]['minfeefilter'], Decimal("0.00001000"))
        # check the `servicesnames` field
        for info in peer_info:
            assert_net_servicesnames(int(info[0]["services"], 0x10),
                                     info[0]["servicesnames"])

        assert_equal(peer_info[0][0]['connection_type'], 'inbound')
        assert_equal(peer_info[0][1]['connection_type'], 'manual')

        assert_equal(peer_info[1][0]['connection_type'], 'manual')
        assert_equal(peer_info[1][1]['connection_type'], 'inbound')

        # Check dynamically generated networks list in getpeerinfo help output.
        assert "(ipv4, ipv6, onion, i2p, not_publicly_routable)" in self.nodes[
            0].help("getpeerinfo")

    def test_getnettotals(self):
        self.log.info("Test getnettotals")
        # Test getnettotals and getpeerinfo by doing a ping. The bytes
        # sent/received should increase by at least the size of one ping (32
        # bytes) and one pong (32 bytes).
        net_totals_before = self.nodes[0].getnettotals()
        peer_info_before = self.nodes[0].getpeerinfo()

        self.nodes[0].ping()
        self.wait_until(lambda: (self.nodes[0].getnettotals()[
            'totalbytessent'] >= net_totals_before['totalbytessent'] + 32 * 2),
                        timeout=1)
        self.wait_until(lambda: (self.nodes[0].getnettotals()[
            'totalbytesrecv'] >= net_totals_before['totalbytesrecv'] + 32 * 2),
                        timeout=1)

        for peer_before in peer_info_before:
            peer_after = lambda: next(p for p in self.nodes[0].getpeerinfo()
                                      if p['id'] == peer_before['id'])
            self.wait_until(
                lambda: peer_after()['bytesrecv_per_msg'].get('pong', 0) >=
                peer_before['bytesrecv_per_msg'].get('pong', 0) + 32,
                timeout=1)
            self.wait_until(
                lambda: peer_after()['bytessent_per_msg'].get('ping', 0) >=
                peer_before['bytessent_per_msg'].get('ping', 0) + 32,
                timeout=1)

    def test_getnetworkinfo(self):
        self.log.info("Test getnetworkinfo")
        info = self.nodes[0].getnetworkinfo()
        assert_equal(info['networkactive'], True)
        assert_equal(info['connections'], 2)
        assert_equal(info['connections_in'], 1)
        assert_equal(info['connections_out'], 1)

        with self.nodes[0].assert_debug_log(
                expected_msgs=['SetNetworkActive: false\n']):
            self.nodes[0].setnetworkactive(state=False)
        assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
        # Wait a bit for all sockets to close
        self.wait_until(
            lambda: self.nodes[0].getnetworkinfo()['connections'] == 0,
            timeout=3)

        with self.nodes[0].assert_debug_log(
                expected_msgs=['SetNetworkActive: true\n']):
            self.nodes[0].setnetworkactive(state=True)
        # Connect nodes both ways.
        self.connect_nodes(0, 1)
        self.connect_nodes(1, 0)

        info = self.nodes[0].getnetworkinfo()
        assert_equal(info['networkactive'], True)
        assert_equal(info['connections'], 2)
        assert_equal(info['connections_in'], 1)
        assert_equal(info['connections_out'], 1)

        # check the `servicesnames` field
        network_info = [node.getnetworkinfo() for node in self.nodes]
        for info in network_info:
            assert_net_servicesnames(int(info["localservices"], 0x10),
                                     info["localservicesnames"])

        # Check dynamically generated networks list in getnetworkinfo help output.
        assert "(ipv4, ipv6, onion, i2p)" in self.nodes[0].help(
            "getnetworkinfo")

    def test_getaddednodeinfo(self):
        self.log.info("Test getaddednodeinfo")
        assert_equal(self.nodes[0].getaddednodeinfo(), [])
        # add a node (node2) to node0
        ip_port = "127.0.0.1:{}".format(p2p_port(2))
        self.nodes[0].addnode(node=ip_port, command='add')
        # check that the node has indeed been added
        added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
        assert_equal(len(added_nodes), 1)
        assert_equal(added_nodes[0]['addednode'], ip_port)
        # check that node cannot be added again
        assert_raises_rpc_error(-23,
                                "Node already added",
                                self.nodes[0].addnode,
                                node=ip_port,
                                command='add')
        # check that node can be removed
        self.nodes[0].addnode(node=ip_port, command='remove')
        assert_equal(self.nodes[0].getaddednodeinfo(), [])
        # check that trying to remove the node again returns an error
        assert_raises_rpc_error(-24,
                                "Node could not be removed",
                                self.nodes[0].addnode,
                                node=ip_port,
                                command='remove')
        # check that a non-existent node returns an error
        assert_raises_rpc_error(-24, "Node has not been added",
                                self.nodes[0].getaddednodeinfo, '1.1.1.1')

    def test_service_flags(self):
        self.log.info("Test service flags")
        self.nodes[0].add_p2p_connection(P2PInterface(),
                                         services=(1 << 4) | (1 << 63))
        assert_equal(['UNKNOWN[2^4]', 'UNKNOWN[2^63]'],
                     self.nodes[0].getpeerinfo()[-1]['servicesnames'])
        self.nodes[0].disconnect_p2ps()

    def test_getnodeaddresses(self):
        self.log.info("Test getnodeaddresses")
        self.nodes[0].add_p2p_connection(P2PInterface())
        services = NODE_NETWORK | NODE_WITNESS

        # Add an IPv6 address to the address manager.
        ipv6_addr = "1233:3432:2434:2343:3234:2345:6546:4534"
        self.nodes[0].addpeeraddress(address=ipv6_addr, port=8333)

        # Add 10,000 IPv4 addresses to the address manager. Due to the way bucket
        # and bucket positions are calculated, some of these addresses will collide.
        imported_addrs = []
        for i in range(10000):
            first_octet = i >> 8
            second_octet = i % 256
            a = f"{first_octet}.{second_octet}.1.1"
            imported_addrs.append(a)
            self.nodes[0].addpeeraddress(a, 8333)

        # Fetch the addresses via the RPC and test the results.
        assert_equal(len(self.nodes[0].getnodeaddresses()),
                     1)  # default count is 1
        assert_equal(len(self.nodes[0].getnodeaddresses(count=2)), 2)
        assert_equal(
            len(self.nodes[0].getnodeaddresses(network="ipv4", count=8)), 8)

        # Maximum possible addresses in AddrMan is 10000. The actual number will
        # usually be less due to bucket and bucket position collisions.
        node_addresses = self.nodes[0].getnodeaddresses(0, "ipv4")
        assert_greater_than(len(node_addresses), 5000)
        assert_greater_than(10000, len(node_addresses))
        for a in node_addresses:
            assert_greater_than(a["time"], 1527811200)  # 1st June 2018
            assert_equal(a["services"], services)
            assert a["address"] in imported_addrs
            assert_equal(a["port"], 8333)
            assert_equal(a["network"], "ipv4")

        # Test the IPv6 address.
        res = self.nodes[0].getnodeaddresses(0, "ipv6")
        assert_equal(len(res), 1)
        assert_equal(res[0]["address"], ipv6_addr)
        assert_equal(res[0]["network"], "ipv6")
        assert_equal(res[0]["port"], 8333)
        assert_equal(res[0]["services"], services)

        # Test for the absence of onion and I2P addresses.
        for network in ["onion", "i2p"]:
            assert_equal(self.nodes[0].getnodeaddresses(0, network), [])

        # Test invalid arguments.
        assert_raises_rpc_error(-8, "Address count out of range",
                                self.nodes[0].getnodeaddresses, -1)
        assert_raises_rpc_error(-8, "Network not recognized: Foo",
                                self.nodes[0].getnodeaddresses, 1, "Foo")

    def test_addpeeraddress(self):
        self.log.info("Test addpeeraddress")
        node = self.nodes[1]

        self.log.debug("Test that addpeerinfo is a hidden RPC")
        # It is hidden from general help, but its detailed help may be called directly.
        assert "addpeerinfo" not in node.help()
        assert "addpeerinfo" in node.help("addpeerinfo")

        self.log.debug("Test that adding an empty address fails")
        assert_equal(node.addpeeraddress(address="", port=8333),
                     {"success": False})
        assert_equal(node.getnodeaddresses(count=0), [])

        self.log.debug("Test that adding a valid address succeeds")
        assert_equal(node.addpeeraddress(address="1.2.3.4", port=8333),
                     {"success": True})
        addrs = node.getnodeaddresses(count=0)
        assert_equal(len(addrs), 1)
        assert_equal(addrs[0]["address"], "1.2.3.4")
        assert_equal(addrs[0]["port"], 8333)

        self.log.debug(
            "Test that adding the same address again when already present fails"
        )
        assert_equal(node.addpeeraddress(address="1.2.3.4", port=8333),
                     {"success": False})
        assert_equal(len(node.getnodeaddresses(count=0)), 1)
Ejemplo n.º 15
0
class NetTest(SyscoinTestFramework):
    def set_test_params(self):
        self.setup_clean_chain = True
        self.num_nodes = 2
        self.extra_args = [["-minrelaytxfee=0.00001000"],
                           ["-minrelaytxfee=0.00000500"]]
        self.supports_cli = False

    def run_test(self):
        # We need miniwallet to make a transaction
        self.wallet = MiniWallet(self.nodes[0])
        self.wallet.generate(1)
        # Get out of IBD for the minfeefilter and getpeerinfo tests.
        self.nodes[0].generate(101)

        # By default, the test framework sets up an addnode connection from
        # node 1 --> node0. By connecting node0 --> node 1, we're left with
        # the two nodes being connected both ways.
        # Topology will look like: node0 <--> node1
        self.connect_nodes(0, 1)
        self.sync_all()

        self.test_connection_count()
        self.test_getpeerinfo()
        self.test_getnettotals()
        self.test_getnetworkinfo()
        self.test_getaddednodeinfo()
        self.test_service_flags()
        self.test_getnodeaddresses()

    def test_connection_count(self):
        self.log.info("Test getconnectioncount")
        # After using `connect_nodes` to connect nodes 0 and 1 to each other.
        assert_equal(self.nodes[0].getconnectioncount(), 2)

    def test_getpeerinfo(self):
        self.log.info("Test getpeerinfo")
        # Create a few getpeerinfo last_block/last_transaction values.
        self.wallet.send_self_transfer(
            from_node=self.nodes[0]
        )  # Make a transaction so we can see it in the getpeerinfo results
        self.nodes[1].generate(1)
        self.sync_all()
        time_now = int(time.time())
        peer_info = [x.getpeerinfo() for x in self.nodes]
        # Verify last_block and last_transaction keys/values.
        for node, peer, field in product(range(self.num_nodes), range(2),
                                         ['last_block', 'last_transaction']):
            assert field in peer_info[node][peer].keys()
            if peer_info[node][peer][field] != 0:
                assert_approx(peer_info[node][peer][field], time_now, vspan=60)
        # check both sides of bidirectional connection between nodes
        # the address bound to on one side will be the source address for the other node
        assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
        assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
        assert_equal(peer_info[0][0]['minfeefilter'], Decimal("0.00000500"))
        assert_equal(peer_info[1][0]['minfeefilter'], Decimal("0.00001000"))
        # check the `servicesnames` field
        for info in peer_info:
            assert_net_servicesnames(int(info[0]["services"], 0x10),
                                     info[0]["servicesnames"])

        assert_equal(peer_info[0][0]['connection_type'], 'inbound')
        assert_equal(peer_info[0][1]['connection_type'], 'manual')

        assert_equal(peer_info[1][0]['connection_type'], 'manual')
        assert_equal(peer_info[1][1]['connection_type'], 'inbound')

        # Check dynamically generated networks list in getpeerinfo help output.
        assert "(ipv4, ipv6, onion, i2p, not_publicly_routable)" in self.nodes[
            0].help("getpeerinfo")

    def test_getnettotals(self):
        self.log.info("Test getnettotals")
        # Test getnettotals and getpeerinfo by doing a ping. The bytes
        # sent/received should increase by at least the size of one ping (32
        # bytes) and one pong (32 bytes).
        net_totals_before = self.nodes[0].getnettotals()
        peer_info_before = self.nodes[0].getpeerinfo()

        self.nodes[0].ping()
        self.wait_until(lambda: (self.nodes[0].getnettotals()[
            'totalbytessent'] >= net_totals_before['totalbytessent'] + 32 * 2),
                        timeout=1)
        self.wait_until(lambda: (self.nodes[0].getnettotals()[
            'totalbytesrecv'] >= net_totals_before['totalbytesrecv'] + 32 * 2),
                        timeout=1)

        for peer_before in peer_info_before:
            peer_after = lambda: next(p for p in self.nodes[0].getpeerinfo()
                                      if p['id'] == peer_before['id'])
            self.wait_until(
                lambda: peer_after()['bytesrecv_per_msg'].get('pong', 0) >=
                peer_before['bytesrecv_per_msg'].get('pong', 0) + 32,
                timeout=1)
            self.wait_until(
                lambda: peer_after()['bytessent_per_msg'].get('ping', 0) >=
                peer_before['bytessent_per_msg'].get('ping', 0) + 32,
                timeout=1)

    def test_getnetworkinfo(self):
        self.log.info("Test getnetworkinfo")
        info = self.nodes[0].getnetworkinfo()
        assert_equal(info['networkactive'], True)
        assert_equal(info['connections'], 2)
        assert_equal(info['connections_in'], 1)
        assert_equal(info['connections_out'], 1)

        with self.nodes[0].assert_debug_log(
                expected_msgs=['SetNetworkActive: false\n']):
            self.nodes[0].setnetworkactive(state=False)
        assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
        # Wait a bit for all sockets to close
        self.wait_until(
            lambda: self.nodes[0].getnetworkinfo()['connections'] == 0,
            timeout=3)

        with self.nodes[0].assert_debug_log(
                expected_msgs=['SetNetworkActive: true\n']):
            self.nodes[0].setnetworkactive(state=True)
        # Connect nodes both ways.
        self.connect_nodes(0, 1)
        self.connect_nodes(1, 0)

        info = self.nodes[0].getnetworkinfo()
        assert_equal(info['networkactive'], True)
        assert_equal(info['connections'], 2)
        assert_equal(info['connections_in'], 1)
        assert_equal(info['connections_out'], 1)

        # check the `servicesnames` field
        network_info = [node.getnetworkinfo() for node in self.nodes]
        for info in network_info:
            assert_net_servicesnames(int(info["localservices"], 0x10),
                                     info["localservicesnames"])

        # Check dynamically generated networks list in getnetworkinfo help output.
        assert "(ipv4, ipv6, onion, i2p)" in self.nodes[0].help(
            "getnetworkinfo")

    def test_getaddednodeinfo(self):
        self.log.info("Test getaddednodeinfo")
        assert_equal(self.nodes[0].getaddednodeinfo(), [])
        # add a node (node2) to node0
        ip_port = "127.0.0.1:{}".format(p2p_port(2))
        self.nodes[0].addnode(node=ip_port, command='add')
        # check that the node has indeed been added
        added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
        assert_equal(len(added_nodes), 1)
        assert_equal(added_nodes[0]['addednode'], ip_port)
        # check that node cannot be added again
        assert_raises_rpc_error(-23,
                                "Node already added",
                                self.nodes[0].addnode,
                                node=ip_port,
                                command='add')
        # check that node can be removed
        self.nodes[0].addnode(node=ip_port, command='remove')
        assert_equal(self.nodes[0].getaddednodeinfo(), [])
        # check that trying to remove the node again returns an error
        assert_raises_rpc_error(-24,
                                "Node could not be removed",
                                self.nodes[0].addnode,
                                node=ip_port,
                                command='remove')
        # check that a non-existent node returns an error
        assert_raises_rpc_error(-24, "Node has not been added",
                                self.nodes[0].getaddednodeinfo, '1.1.1.1')

    def test_service_flags(self):
        self.log.info("Test service flags")
        self.nodes[0].add_p2p_connection(P2PInterface(),
                                         services=(1 << 4) | (1 << 63))
        assert_equal(['UNKNOWN[2^4]', 'UNKNOWN[2^63]'],
                     self.nodes[0].getpeerinfo()[-1]['servicesnames'])
        self.nodes[0].disconnect_p2ps()

    def test_getnodeaddresses(self):
        self.log.info("Test getnodeaddresses")
        self.nodes[0].add_p2p_connection(P2PInterface())

        # Add some addresses to the Address Manager over RPC. Due to the way
        # bucket and bucket position are calculated, some of these addresses
        # will collide.
        imported_addrs = []
        for i in range(10000):
            first_octet = i >> 8
            second_octet = i % 256
            a = "{}.{}.1.1".format(first_octet, second_octet)  # IPV4
            imported_addrs.append(a)
            # SYSCOIN
            self.nodes[0].addpeeraddress(a, 8369)

        # Obtain addresses via rpc call and check they were ones sent in before.
        #
        # Maximum possible addresses in addrman is 10000, although actual
        # number will usually be less due to bucket and bucket position
        # collisions.
        node_addresses = self.nodes[0].getnodeaddresses(0)
        assert_greater_than(len(node_addresses), 5000)
        assert_greater_than(10000, len(node_addresses))
        for a in node_addresses:
            assert_greater_than(a["time"], 1527811200)  # 1st June 2018
            assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS)
            assert a["address"] in imported_addrs
            assert_equal(a["port"], 8369)
            assert_equal(a["network"], "ipv4")

        node_addresses = self.nodes[0].getnodeaddresses(1)
        assert_equal(len(node_addresses), 1)

        assert_raises_rpc_error(-8, "Address count out of range",
                                self.nodes[0].getnodeaddresses, -1)

        # addrman's size cannot be known reliably after insertion, as hash collisions may occur
        # so only test that requesting a large number of addresses returns less than that
        LARGE_REQUEST_COUNT = 10000
        node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT)
        assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
Ejemplo n.º 16
0
class MempoolExpiryTest(shitecoinTestFramework):
    def set_test_params(self):
        self.num_nodes = 1
        self.setup_clean_chain = True

    def test_transaction_expiry(self, timeout):
        """Tests that a transaction expires after the expiry timeout and its
        children are removed as well."""
        node = self.nodes[0]
        self.wallet = MiniWallet(node)

        # Add enough mature utxos to the wallet so that all txs spend confirmed coins.
        self.wallet.generate(4)
        node.generate(100)

        # Send a parent transaction that will expire.
        parent_txid = self.wallet.send_self_transfer(from_node=node)['txid']
        parent_utxo = self.wallet.get_utxo(txid=parent_txid)
        independent_utxo = self.wallet.get_utxo()

        # Ensure the transactions we send to trigger the mempool check spend utxos that are independent of
        # the transactions being tested for expiration.
        trigger_utxo1 = self.wallet.get_utxo()
        trigger_utxo2 = self.wallet.get_utxo()

        # Set the mocktime to the arrival time of the parent transaction.
        entry_time = node.getmempoolentry(parent_txid)['time']
        node.setmocktime(entry_time)

        # Let half of the timeout elapse and broadcast the child transaction spending the parent transaction.
        half_expiry_time = entry_time + int(60 * 60 * timeout / 2)
        node.setmocktime(half_expiry_time)
        child_txid = self.wallet.send_self_transfer(
            from_node=node, utxo_to_spend=parent_utxo)['txid']
        assert_equal(parent_txid,
                     node.getmempoolentry(child_txid)['depends'][0])
        self.log.info('Broadcast child transaction after {} hours.'.format(
            timedelta(seconds=(half_expiry_time - entry_time))))

        # Broadcast another (independent) transaction.
        independent_txid = self.wallet.send_self_transfer(
            from_node=node, utxo_to_spend=independent_utxo)['txid']

        # Let most of the timeout elapse and check that the parent tx is still
        # in the mempool.
        nearly_expiry_time = entry_time + 60 * 60 * timeout - 5
        node.setmocktime(nearly_expiry_time)
        # Broadcast a transaction as the expiry of transactions in the mempool is only checked
        # when a new transaction is added to the mempool.
        self.wallet.send_self_transfer(from_node=node,
                                       utxo_to_spend=trigger_utxo1)
        self.log.info('Test parent tx not expired after {} hours.'.format(
            timedelta(seconds=(nearly_expiry_time - entry_time))))
        assert_equal(entry_time, node.getmempoolentry(parent_txid)['time'])

        # Transaction should be evicted from the mempool after the expiry time
        # has passed.
        expiry_time = entry_time + 60 * 60 * timeout + 5
        node.setmocktime(expiry_time)
        # Again, broadcast a transaction so the expiry of transactions in the mempool is checked.
        self.wallet.send_self_transfer(from_node=node,
                                       utxo_to_spend=trigger_utxo2)
        self.log.info('Test parent tx expiry after {} hours.'.format(
            timedelta(seconds=(expiry_time - entry_time))))
        assert_raises_rpc_error(-5, 'Transaction not in mempool',
                                node.getmempoolentry, parent_txid)

        # The child transaction should be removed from the mempool as well.
        self.log.info('Test child tx is evicted as well.')
        assert_raises_rpc_error(-5, 'Transaction not in mempool',
                                node.getmempoolentry, child_txid)

        # Check that the independent tx is still in the mempool.
        self.log.info(
            'Test the independent tx not expired after {} hours.'.format(
                timedelta(seconds=(expiry_time - half_expiry_time))))
        assert_equal(half_expiry_time,
                     node.getmempoolentry(independent_txid)['time'])

    def run_test(self):
        self.log.info('Test default mempool expiry timeout of %d hours.' %
                      DEFAULT_MEMPOOL_EXPIRY)
        self.test_transaction_expiry(DEFAULT_MEMPOOL_EXPIRY)

        self.log.info('Test custom mempool expiry timeout of %d hours.' %
                      CUSTOM_MEMPOOL_EXPIRY)
        self.restart_node(0, ['-mempoolexpiry=%d' % CUSTOM_MEMPOOL_EXPIRY])
        self.test_transaction_expiry(CUSTOM_MEMPOOL_EXPIRY)
Ejemplo n.º 17
0
    def test_muhash_implementation(self):
        self.log.info("Test MuHash implementation consistency")

        node = self.nodes[0]
        wallet = MiniWallet(node)
        mocktime = node.getblockheader(node.getblockhash(0))['time'] + 1
        node.setmocktime(mocktime)

        # Generate 100 blocks and remove the first since we plan to spend its
        # coinbase
        block_hashes = wallet.generate(1) + node.generate(99)
        blocks = list(
            map(lambda block: FromHex(CBlock(), node.getblock(block, False)),
                block_hashes))
        blocks.pop(0)

        # Create a spending transaction and mine a block which includes it
        txid = wallet.send_self_transfer(from_node=node)['txid']
        tx_block = node.generateblock(output=wallet.get_address(),
                                      transactions=[txid])
        blocks.append(FromHex(CBlock(), node.getblock(tx_block['hash'],
                                                      False)))

        # Unlike upstream, Xaya allows spending the genesis block's coinbase,
        # so we have to include that into the UTXO set.
        genesis = FromHex(CBlock(), node.getblock(node.getblockhash(0), False))
        blocks = [genesis] + blocks

        # Serialize the outputs that should be in the UTXO set and add them to
        # a MuHash object
        muhash = MuHash3072()

        for height, block in enumerate(blocks):
            # We spent the first mined block (after the genesis block).
            if height > 0:
                height += 1

            for tx in block.vtx:
                for n, tx_out in enumerate(tx.vout):
                    coinbase = 1 if not tx.vin[0].prevout.hash else 0

                    # Skip witness commitment
                    if (coinbase and n > 0):
                        continue

                    data = COutPoint(int(tx.rehash(), 16), n).serialize()
                    data += struct.pack("<i", height * 2 + coinbase)
                    data += tx_out.serialize()

                    muhash.insert(data)

        finalized = muhash.digest()
        node_muhash = node.gettxoutsetinfo("muhash")['muhash']

        assert_equal(finalized[::-1].hex(), node_muhash)

        # The values differ from upstream since in Xaya the genesis block's coinbase
        # is part of the UTXO set.
        self.log.info("Test deterministic UTXO set hash results")
        assert_equal(
            node.gettxoutsetinfo()['hash_serialized_2'],
            "450cb0874edb935d7243d3e83ea2dfe463729a7f08bbe701ab830f3927ce88da")
        assert_equal(
            node.gettxoutsetinfo("muhash")['muhash'],
            "5de773dfb84089156402f41bbfddf27652a3cf136e2bed2986a7ce6bc6db4a80")