Example #1
0
    def reorg_test(self):
        # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
        # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain

        height = self.nodes[1].getblockcount()
        self.log.info("Current block height: %d" % height)

        self.forkheight = height - 287
        self.forkhash = self.nodes[1].getblockhash(self.forkheight)
        self.log.info("Invalidating block %s at height %d" % (self.forkhash, self.forkheight))
        self.nodes[1].invalidateblock(self.forkhash)

        # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
        # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
        mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
        curhash = self.nodes[1].getblockhash(self.forkheight - 1)
        while curhash != mainchainhash:
            self.nodes[1].invalidateblock(curhash)
            curhash = self.nodes[1].getblockhash(self.forkheight - 1)

        assert self.nodes[1].getblockcount() == self.forkheight - 1
        self.log.info("New best height: %d" % self.nodes[1].getblockcount())

        # Disconnect node1 and generate the new chain
        disconnect_nodes(self.nodes[0], 1)
        disconnect_nodes(self.nodes[1], 2)

        self.log.info("Generating new longer chain of 300 more blocks")
        self.nodes[1].generate(300)

        self.log.info("Reconnect nodes")
        connect_nodes(self.nodes[0], 1)
        connect_nodes(self.nodes[1], 2)
        self.sync_blocks(self.nodes[0:3], timeout=120)

        self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
        self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir))

        self.log.info("Mine 220 more large blocks so we have requisite history")

        mine_large_blocks(self.nodes[0], 220)

        usage = calc_usage(self.prunedir)
        self.log.info("Usage should be below target: %d" % usage)
        assert_greater_than(550, usage)
Example #2
0
    def create_chain_with_staleblocks(self):
        # Create stale blocks in manageable sized chunks
        self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")

        for j in range(12):
            # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
            # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
            disconnect_nodes(self.nodes[0], 1)
            disconnect_nodes(self.nodes[0], 2)
            # Mine 24 blocks in node 1
            mine_large_blocks(self.nodes[1], 24)

            # Reorg back with 25 block chain from node 0
            mine_large_blocks(self.nodes[0], 25)

            # Create connections in the order so both nodes can see the reorg at the same time
            connect_nodes(self.nodes[0], 1)
            connect_nodes(self.nodes[0], 2)
            self.sync_blocks(self.nodes[0:3])

        self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
Example #3
0
    def run_test(self):
        # Create two chains by disconnecting nodes 0 & 1, mining, then reconnecting
        disconnect_nodes(self.nodes[0], 1)

        self.nodes[0].generate(3)
        self.nodes[1].generate(4)

        assert_equal(self.nodes[0].getblockcount(), 3)
        chain0_hashes = [self.nodes[0].getblockhash(block_height) for block_height in range(4)]

        # Reorg node 0 to a new chain
        connect_nodes(self.nodes[0], 1)
        sync_blocks(self.nodes)

        assert_equal(self.nodes[0].getblockcount(), 4)
        chain1_hashes = [self.nodes[0].getblockhash(block_height) for block_height in range(4)]

        # Test getblockfilter returns a filter for all blocks and filter types on active chain
        for block_hash in chain1_hashes:
            for filter_type in FILTER_TYPES:
                result = self.nodes[0].getblockfilter(block_hash, filter_type)
                assert_is_hex_string(result['filter'])

        # Test getblockfilter returns a filter for all blocks and filter types on stale chain
        for block_hash in chain0_hashes:
            for filter_type in FILTER_TYPES:
                result = self.nodes[0].getblockfilter(block_hash, filter_type)
                assert_is_hex_string(result['filter'])

        # Test getblockfilter with unknown block
        bad_block_hash = "0123456789abcdef" * 4
        assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockfilter, bad_block_hash, "basic")

        # Test getblockfilter with undefined filter type
        genesis_hash = self.nodes[0].getblockhash(0)
        assert_raises_rpc_error(-5, "Unknown filtertype", self.nodes[0].getblockfilter, genesis_hash, "unknown")
Example #4
0
    def test_utxo_conversion(self):
        mining_node = self.nodes[2]
        offline_node = self.nodes[0]
        online_node = self.nodes[1]

        # Disconnect offline node from others
        disconnect_nodes(offline_node, 1)
        disconnect_nodes(online_node, 0)
        disconnect_nodes(offline_node, 2)
        disconnect_nodes(mining_node, 0)

        # Mine a transaction that credits the offline address
        offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
        online_addr = online_node.getnewaddress(address_type="p2sh-segwit")
        online_node.importaddress(offline_addr, "", False)
        mining_node.sendtoaddress(address=offline_addr, amount=1.0)
        mining_node.generate(nblocks=1)
        sync_blocks([mining_node, online_node])

        # Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
        utxos = online_node.listunspent(addresses=[offline_addr])
        raw = online_node.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
        psbt = online_node.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
        assert("non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0])

        # Have the offline node sign the PSBT (which will update the UTXO to segwit)
        signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
        assert("witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0])

        # Make sure we can mine the resulting transaction
        txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
        mining_node.generate(1)
        sync_blocks([mining_node, online_node])
        assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)

        # Reconnect
        connect_nodes_bi(self.nodes, 0, 1)
        connect_nodes_bi(self.nodes, 0, 2)
Example #5
0
    def test_getfinalizationstate(self):
        def create_deposit(finalizer, node):
            connect_nodes(finalizer, node.index)
            payto = finalizer.getnewaddress('', 'legacy')
            txid = finalizer.deposit(payto, 1500)
            wait_until(lambda: txid in node.getrawmempool())
            disconnect_nodes(finalizer, node.index)

        node = self.nodes[0]
        finalizer1 = self.nodes[1]
        finalizer2 = self.nodes[2]

        self.setup_stake_coins(node, finalizer1, finalizer2)

        # initial setup
        # F
        # e0
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 0)
        assert_equal(state['lastJustifiedEpoch'], 0)
        assert_equal(state['lastFinalizedEpoch'], 0)
        assert_equal(state['validators'], 0)

        # start epoch=1
        # F
        # e0 - e1[1]
        connect_nodes(node, finalizer1.index)
        connect_nodes(node, finalizer2.index)
        generate_block(node)
        sync_blocks([node, finalizer1, finalizer2])
        disconnect_nodes(node, finalizer1.index)
        disconnect_nodes(node, finalizer2.index)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 1)
        assert_equal(state['lastJustifiedEpoch'], 0)
        assert_equal(state['lastFinalizedEpoch'], 0)
        assert_equal(state['validators'], 0)

        self.log.info('initial finalization state is correct')

        # add finalizer1
        create_deposit(finalizer1, node)

        # test instant justification 1
        # F
        # e0 - e1
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 5)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 1)
        assert_equal(state['lastJustifiedEpoch'], 0)
        assert_equal(state['lastFinalizedEpoch'], 0)
        assert_equal(state['validators'], 0)
        self.log.info('finalization state includes new validators')

        # test instant justification 2
        # F    J
        # e0 - e1 - e2
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 10)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 2)
        assert_equal(state['lastJustifiedEpoch'], 1)
        assert_equal(state['lastFinalizedEpoch'], 0)
        assert_equal(state['validators'], 0)
        self.log.info('instant finalization 1 is correct')

        # test instant justification 3
        # F    F    J
        # e0 - e1 - e2 - e3
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 15)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 3)
        assert_equal(state['lastJustifiedEpoch'], 2)
        assert_equal(state['lastFinalizedEpoch'], 1)
        assert_equal(state['validators'], 0)
        self.log.info('instant finalization 2 is correct')

        # test instant justification 4
        # F    F    F    J
        # e0 - e1 - e2 - e3 - e4
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 20)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 1)
        assert_equal(state['currentDynastyStartsAtEpoch'], 4)
        assert_equal(state['currentEpoch'], 4)
        assert_equal(state['lastJustifiedEpoch'], 3)
        assert_equal(state['lastFinalizedEpoch'], 2)
        assert_equal(state['validators'], 0)
        self.log.info('instant finalization 3 is correct')

        # test instant justification 5 (must be last one)
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 25)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 2)
        assert_equal(state['currentDynastyStartsAtEpoch'], 5)
        assert_equal(state['currentEpoch'], 5)
        assert_equal(state['lastJustifiedEpoch'], 4)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 0)

        # no justification
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6
        generate_block(node)
        assert_equal(node.getblockcount(), 26)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 6)
        assert_equal(state['lastJustifiedEpoch'], 4)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)

        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 30)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 6)
        assert_equal(state['lastJustifiedEpoch'], 4)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)

        # no justification
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7[31]
        generate_block(node)
        assert_equal(node.getblockcount(), 31)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 7)
        assert_equal(state['lastJustifiedEpoch'], 4)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)
        self.log.info('finalization state without justification is correct')

        # create first justification
        # F    F    F    F    J         J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7[31, 32]
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node)

        assert_equal(node.getblockcount(), 32)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 7)
        assert_equal(state['lastJustifiedEpoch'], 6)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)
        self.log.info('finalization state after justification is correct')

        # skip 1 justification
        # F    F    F    F    J         J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9[41]
        generate_block(node, count=9)
        assert_equal(node.getblockcount(), 41)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 9)
        assert_equal(state['lastJustifiedEpoch'], 6)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)
        self.log.info('finalization state without justification is correct')

        # create finalization
        # F    F    F    J              J         J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9[41, 42]
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node)
        assert_equal(node.getblockcount(), 42)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 9)
        assert_equal(state['lastJustifiedEpoch'], 8)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)

        # F    F    F    F    J         J         F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10[46, 47]
        generate_block(node, count=4)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node)
        assert_equal(node.getblockcount(), 47)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 10)
        assert_equal(state['lastJustifiedEpoch'], 9)
        assert_equal(state['lastFinalizedEpoch'], 8)
        assert_equal(state['validators'], 1)

        # F    F    F    F    J         J         F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10
        generate_block(node, count=3)
        assert_equal(node.getblockcount(), 50)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 10)
        assert_equal(state['lastJustifiedEpoch'], 9)
        assert_equal(state['lastFinalizedEpoch'], 8)
        assert_equal(state['validators'], 1)

        self.log.info('finalization state after finalization is correct')

        # F    F    F    F    J              J    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11[51]
        generate_block(node)
        assert_equal(node.getblockcount(), 51)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 4)
        assert_equal(state['currentDynastyStartsAtEpoch'], 11)
        assert_equal(state['currentEpoch'], 11)
        assert_equal(state['lastJustifiedEpoch'], 9)
        assert_equal(state['lastFinalizedEpoch'], 8)
        assert_equal(state['validators'], 1)
        self.log.info('dynasty after finalization is updated correctly')

        # add finalizer2 deposit at dynasty=5. will vote at dynasty=8
        create_deposit(finalizer2, node)

        # F    F    F    F    J              J    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 55)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 4)
        assert_equal(state['currentDynastyStartsAtEpoch'], 11)
        assert_equal(state['currentEpoch'], 11)
        assert_equal(state['lastJustifiedEpoch'], 10)
        assert_equal(state['lastFinalizedEpoch'], 9)
        assert_equal(state['validators'], 1)

        # F    F    F    F    J              J    F    F    F     J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11 - e12
        generate_block(node)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 60)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 5)
        assert_equal(state['currentDynastyStartsAtEpoch'], 12)
        assert_equal(state['currentEpoch'], 12)
        assert_equal(state['lastJustifiedEpoch'], 11)
        assert_equal(state['lastFinalizedEpoch'], 10)
        assert_equal(state['validators'], 1)

        # F    F    F    J                   J    F    F    F     F     J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11 - e12 - e13
        generate_block(node)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 65)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 6)
        assert_equal(state['currentDynastyStartsAtEpoch'], 13)
        assert_equal(state['currentEpoch'], 13)
        assert_equal(state['lastJustifiedEpoch'], 12)
        assert_equal(state['lastFinalizedEpoch'], 11)
        assert_equal(state['validators'], 1)

        # F    F    F    F    J              J    F    F    F     F     J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11 - e12 - e13 - e14[66]
        generate_block(node)
        assert_equal(node.getblockcount(), 66)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 7)
        assert_equal(state['currentDynastyStartsAtEpoch'], 14)
        assert_equal(state['currentEpoch'], 14)
        assert_equal(state['lastJustifiedEpoch'], 12)
        assert_equal(state['lastFinalizedEpoch'], 11)
        assert_equal(state['validators'], 2)
        self.log.info('new deposit was activated correctly')

        # F    F    F    F    J              J    F    F    F     F     F     J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11 - e12 - e13 - e14
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node)
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 70)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 7)
        assert_equal(state['currentDynastyStartsAtEpoch'], 14)
        assert_equal(state['currentEpoch'], 14)
        assert_equal(state['lastJustifiedEpoch'], 13)
        assert_equal(state['lastFinalizedEpoch'], 12)
        assert_equal(state['validators'], 2)
        self.log.info('new finalizer votes')
 def setup_network(self):
     # Start with split network:
     super().setup_network()
     disconnect_nodes(self.nodes[1], 2)
     disconnect_nodes(self.nodes[2], 1)
Example #7
0
 def setup_network(self):
     # Start with split network:
     super(TxnMallTest, self).setup_network()
     disconnect_nodes(self.nodes[1], 2)
     disconnect_nodes(self.nodes[2], 1)
Example #8
0
    def run_test(self):
        def total_fees(*txids):
            total = 0
            for txid in txids:
                ctx = FromHex(CTransaction(),
                              self.nodes[0].getrawtransaction(txid))
                total += self.nodes[0].calculate_fee_from_txid(txid)

            return satoshi_round(total)

        self.nodes[1].generate(100)
        sync_blocks(self.nodes)
        balance = self.nodes[0].getbalance()
        txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                          Decimal("10"))
        txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                          Decimal("10"))
        txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                          Decimal("10"))

        sync_mempools(self.nodes)
        self.nodes[1].generate(1)

        # Can not abandon non-wallet transaction
        assert_raises_rpc_error(
            -5, 'Invalid or non-wallet transaction id',
            lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
        # Can not abandon confirmed transaction
        assert_raises_rpc_error(
            -5, 'Transaction not eligible for abandonment',
            lambda: self.nodes[0].abandontransaction(txid=txA))

        sync_blocks(self.nodes)
        newbalance = self.nodes[0].getbalance()

        # no more than fees lost
        assert (balance - newbalance <= total_fees(txA, txB, txC))
        balance = newbalance

        # Disconnect nodes so node0's transactions don't get into node1's mempool
        disconnect_nodes(self.nodes[0], self.nodes[1])

        # Identify the 10btc outputs
        nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(
            txA, 1)["vout"]) if vout["value"] == Decimal("10"))
        nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(
            txB, 1)["vout"]) if vout["value"] == Decimal("10"))
        nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(
            txC, 1)["vout"]) if vout["value"] == Decimal("10"))

        inputs = []
        # spend 10btc outputs from txA and txB
        inputs.append({"txid": txA, "vout": nA})
        inputs.append({"txid": txB, "vout": nB})
        outputs = {}

        outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
        outputs[self.nodes[1].getnewaddress()] = Decimal("5")
        signed = self.nodes[0].signrawtransactionwithwallet(
            self.nodes[0].createrawtransaction(inputs, outputs))
        txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])

        # Identify the 14.99998btc output
        nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(
            txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))

        # Create a child tx spending AB1 and C
        inputs = []
        # Amount 14.99998 BCH
        inputs.append({"txid": txAB1, "vout": nAB})
        # Amount 10 BCH
        inputs.append({"txid": txC, "vout": nC})
        outputs = {}
        outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
        signed2 = self.nodes[0].signrawtransactionwithwallet(
            self.nodes[0].createrawtransaction(inputs, outputs))
        txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])

        # Create a child tx spending ABC2
        signed3_change = Decimal("24.999")
        inputs = [{"txid": txABC2, "vout": 0}]
        outputs = {self.nodes[0].getnewaddress(): signed3_change}
        signed3 = self.nodes[0].signrawtransactionwithwallet(
            self.nodes[0].createrawtransaction(inputs, outputs))
        # note tx is never directly referenced, only abandoned as a child of the above
        self.nodes[0].sendrawtransaction(signed3["hex"])

        # In mempool txs from self should increase balance from change
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("30") + signed3_change)
        balance = newbalance

        # Restart the node with a higher min relay fee so the parent tx is no longer in mempool
        # TODO: redo with eviction
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])

        # Verify txs no longer in either node's mempool
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        # Transactions which are not in the mempool should only reduce wallet balance.
        # Transaction inputs should still be spent, but the change not yet received.
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - signed3_change)
        # Unconfirmed received funds that are not in mempool also shouldn't show
        # up in unconfirmed balance.  Note that the transactions stored in the wallet
        # are not necessarily in the node's mempool.
        unconfbalance = self.nodes[0].getunconfirmedbalance(
        ) + self.nodes[0].getbalance()
        assert_equal(unconfbalance, newbalance)
        # Unconfirmed transactions which are not in the mempool should also
        # not be in listunspent
        assert (not txABC2
                in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
        balance = newbalance

        # Abandon original transaction and verify inputs are available again
        # including that the child tx was also abandoned
        self.nodes[0].abandontransaction(txAB1)
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance + Decimal("30"))
        balance = newbalance

        # Verify that even with a low min relay fee, the tx is not re-accepted
        # from wallet on startup once abandoned.
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        assert_equal(self.nodes[0].getbalance(), balance)

        # If the transaction is re-sent the wallet also unabandons it.   The
        # change should be available, and it's child transaction should remain
        # abandoned.
        # NOTE: Abandoned transactions are internal to the wallet, and tracked
        # separately from other indices.
        self.nodes[0].sendrawtransaction(signed["hex"])
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
        balance = newbalance

        # Send child tx again so it is not longer abandoned.
        self.nodes[0].sendrawtransaction(signed2["hex"])
        newbalance = self.nodes[0].getbalance()
        assert_equal(
            newbalance,
            balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
        balance = newbalance

        # Reset to a higher relay fee so that we abandon a transaction
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("24.9996"))
        balance = newbalance

        # Create a double spend of AB1. Spend it again from only A's 10 output.
        # Mine double spend from node 1.
        inputs = []
        inputs.append({"txid": txA, "vout": nA})
        outputs = {}
        outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
        tx = self.nodes[0].createrawtransaction(inputs, outputs)
        signed = self.nodes[0].signrawtransactionwithwallet(tx)
        self.nodes[1].sendrawtransaction(signed["hex"])
        self.nodes[1].generate(1)

        connect_nodes(self.nodes[0], self.nodes[1])
        sync_blocks(self.nodes)

        # Verify that B and C's 10 BCH outputs are available for spending again because AB1 is now conflicted
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance + Decimal("20"))
        balance = newbalance

        # There is currently a minor bug around this and so this test doesn't work.  See Issue #7315
        # Invalidate the block with the double spend and B's 10 BCH output should no longer be available
        # Don't think C's should either
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        newbalance = self.nodes[0].getbalance()
        #assert_equal(newbalance, balance - Decimal("10"))
        self.log.info(
            "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer"
        )
        self.log.info(
            "conflicted has not resumed causing its inputs to be seen as spent.  See Issue #7315"
        )
        self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
Example #9
0
    def run_test(self):
        self.nodes[0].generate(2)
        self.sync_all()
        assert_equal(self.nodes[1].getblockcount(), 202)
        taddr1 = self.nodes[1].getnewaddress()
        saplingAddr0 = self.nodes[0].getnewshieldaddress()
        saplingAddr1 = self.nodes[1].getnewshieldaddress()

        # Verify addresses
        assert (saplingAddr0 in self.nodes[0].listshieldaddresses())
        assert (saplingAddr1 in self.nodes[1].listshieldaddresses())

        # Verify balance
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr0),
                     Decimal('0'))
        assert_equal(self.nodes[1].getshieldbalance(saplingAddr1),
                     Decimal('0'))
        assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('0'))

        recipients = [{"address": saplingAddr0, "amount": Decimal('10')}]

        # Try fee too low
        fee_too_low = 0.001
        self.log.info("Trying to send a transaction with fee too low...")
        assert_raises_rpc_error(
            -4, "Fee set (%.3f) too low. Must be at least" % fee_too_low,
            self.nodes[0].rawshieldsendmany, "from_transparent", recipients, 1,
            fee_too_low)

        # Try fee too high.
        fee_too_high = 20
        self.log.info(
            "Good. It was not possible. Now try a tx with fee too high...")
        assert_raises_rpc_error(
            -4, "The transaction fee is too high: %.2f >" % fee_too_high,
            self.nodes[0].rawshieldsendmany, "from_transparent", recipients, 1,
            fee_too_high)

        # Trying to send a rawtx with low fee directly
        self.log.info("Good. It was not possible. Now try with a raw tx...")
        self.restart_node(0,
                          extra_args=self.extra_args[0] +
                          ['-minrelaytxfee=0.0000001'])
        rawtx_hex = self.nodes[0].rawshieldsendmany("from_transparent",
                                                    recipients, 1)
        self.restart_node(0, extra_args=self.extra_args[0])
        connect_nodes(self.nodes[0], 1)
        assert_raises_rpc_error(-26, "insufficient fee",
                                self.nodes[0].sendrawtransaction, rawtx_hex)
        self.log.info("Good. Not accepted in the mempool.")

        # Fixed fee
        fee = 0.05

        # Node 0 shields some funds
        # taddr -> Sapling
        self.log.info("TX 1: shield funds from specified transparent address.")
        mytxid1 = self.nodes[0].shieldsendmany(
            get_coinstake_address(self.nodes[0]), recipients, 1, fee)

        # shield more funds automatically selecting the transparent inputs
        self.log.info("TX 2: shield funds from any transparent address.")
        mytxid2 = self.nodes[0].shieldsendmany("from_transparent", recipients,
                                               1, fee)

        # Verify priority of tx is INF_PRIORITY, defined as 1E+25 (10000000000000000000000000)
        self.check_tx_priority([mytxid1, mytxid2])
        self.log.info("Priority for tx1 and tx2 checks out")

        self.nodes[2].generate(1)
        self.sync_all()

        # shield more funds creating and then sending a raw transaction
        self.log.info(
            "TX 3: shield funds creating and sending raw transaction.")
        tx_hex = self.nodes[0].rawshieldsendmany("from_transparent",
                                                 recipients, 1, fee)

        # Check SPORK_20 for sapling maintenance mode
        SPORK_20 = "SPORK_20_SAPLING_MAINTENANCE"
        self.activate_spork(0, SPORK_20)
        self.wait_for_spork(True, SPORK_20)
        assert_raises_rpc_error(-26, "bad-tx-sapling-maintenance",
                                self.nodes[0].sendrawtransaction, tx_hex)
        self.log.info("Good. Not accepted when SPORK_20 is active.")

        # Try with RPC...
        assert_raises_rpc_error(-8,
                                "Invalid parameter, Sapling not active yet",
                                self.nodes[0].shieldsendmany,
                                "from_transparent", recipients, 1, fee)

        # Disable SPORK_20 and retry
        sleep(5)
        self.deactivate_spork(0, SPORK_20)
        self.wait_for_spork(False, SPORK_20)
        mytxid3 = self.nodes[0].sendrawtransaction(tx_hex)
        self.log.info("Good. Accepted when SPORK_20 is not active.")

        # Verify priority of tx is INF_PRIORITY, defined as 1E+25 (10000000000000000000000000)
        self.check_tx_priority([mytxid3])
        self.log.info("Priority for tx3 checks out")

        self.nodes[2].generate(1)
        self.sync_all()

        # Verify balance
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr0),
                     Decimal('30'))
        assert_equal(self.nodes[1].getshieldbalance(saplingAddr1),
                     Decimal('0'))
        assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('0'))
        self.log.info("Balances check out")

        # Now disconnect the block, activate SPORK_20, and try to reconnect it
        disconnect_nodes(self.nodes[0], 1)
        tip_hash = self.nodes[0].getbestblockhash()
        self.nodes[0].invalidateblock(tip_hash)
        assert tip_hash != self.nodes[0].getbestblockhash()
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr0),
                     Decimal('20'))
        self.log.info(
            "Now trying to connect block with shield tx, when SPORK_20 is active"
        )
        self.activate_spork(0, SPORK_20)
        self.nodes[0].reconsiderblock(tip_hash)
        assert tip_hash != self.nodes[0].getbestblockhash(
        )  # Block NOT connected
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr0),
                     Decimal('20'))
        self.log.info("Good. Not possible.")

        # Deactivate SPORK_20 and reconnect
        sleep(1)
        self.deactivate_spork(0, SPORK_20)
        self.nodes[0].reconsiderblock(tip_hash)
        assert_equal(tip_hash,
                     self.nodes[0].getbestblockhash())  # Block connected
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr0),
                     Decimal('30'))
        self.log.info(
            "Reconnected after deactivation of SPORK_20. Balance restored.")
        connect_nodes(self.nodes[0], 1)

        # Node 0 sends some shield funds to node 1
        # Sapling -> Sapling
        #         -> Sapling (change)
        self.log.info(
            "TX 4: shield transaction from specified sapling address.")
        recipients4 = [{"address": saplingAddr1, "amount": Decimal('10')}]
        mytxid4 = self.nodes[0].shieldsendmany(saplingAddr0, recipients4, 1,
                                               fee)
        self.check_tx_priority([mytxid4])

        self.nodes[2].generate(1)
        self.sync_all()

        # Send more shield funds (this time with automatic selection of the source)
        self.log.info("TX 5: shield transaction from any sapling address.")
        recipients5 = [{"address": saplingAddr1, "amount": Decimal('5')}]
        mytxid5 = self.nodes[0].shieldsendmany("from_shield", recipients5, 1,
                                               fee)
        self.check_tx_priority([mytxid5])

        self.nodes[2].generate(1)
        self.sync_all()

        # Send more shield funds (with create + send raw transaction)
        self.log.info("TX 6: shield raw transaction.")
        tx_hex = self.nodes[0].rawshieldsendmany("from_shield", recipients5, 1,
                                                 fee)
        mytxid6 = self.nodes[0].sendrawtransaction(tx_hex)
        self.check_tx_priority([mytxid6])

        self.nodes[2].generate(1)
        self.sync_all()

        # Shield more funds to a different address to verify multi-source notes spending
        saplingAddr2 = self.nodes[0].getnewshieldaddress()
        self.log.info(
            "TX 7: shield funds to later verify multi source notes spending.")
        recipients = [{"address": saplingAddr2, "amount": Decimal('10')}]
        mytxid7 = self.nodes[0].shieldsendmany(
            get_coinstake_address(self.nodes[0]), recipients, 1, fee)
        self.check_tx_priority([mytxid7])

        self.nodes[2].generate(5)
        self.sync_all()

        # Verify multi-source notes spending
        tAddr0 = self.nodes[0].getnewaddress()
        self.log.info("TX 8: verifying multi source notes spending.")
        recipients = [{"address": tAddr0, "amount": Decimal('11')}]
        mytxid8 = self.nodes[0].shieldsendmany("from_shield", recipients, 1,
                                               fee)
        self.check_tx_priority([mytxid8])

        self.nodes[2].generate(1)
        self.sync_all()

        # Verify balance
        assert_equal(
            self.nodes[0].getshieldbalance(saplingAddr0),
            Decimal('4.9'))  # 30 received - (20 sent + 0.15 fee) - 4.95 sent
        assert_equal(self.nodes[1].getshieldbalance(saplingAddr1),
                     Decimal('20'))  # 20 received
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr2),
                     Decimal('3.9'))  # 10 received - 10 sent + 3.9 change
        assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('0'))
        assert_equal(self.nodes[0].getshieldbalance(), Decimal('8.8'))
        self.log.info("Balances check out")

        # Node 1 sends some shield funds to node 0, as well as unshielding
        # Sapling -> Sapling
        #         -> taddr
        #         -> Sapling (change)
        self.log.info("TX 10: deshield funds from specified sapling address.")
        recipients7 = [{"address": saplingAddr0, "amount": Decimal('8')}]
        recipients7.append({"address": taddr1, "amount": Decimal('10')})
        mytxid7 = self.nodes[1].shieldsendmany(saplingAddr1, recipients7, 1,
                                               fee)
        self.check_tx_priority([mytxid7])

        self.nodes[2].generate(1)
        self.sync_all()

        # Verify balance
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr0),
                     Decimal('12.9'))  # 4.9 prev balance + 8 received
        assert_equal(self.nodes[1].getshieldbalance(saplingAddr1),
                     Decimal('1.95'))  # 20 prev balance - (18 sent + 0.05 fee)
        assert_equal(self.nodes[1].getreceivedbyaddress(taddr1), Decimal('10'))
        self.log.info("Balances check out")

        # Verify existence of Sapling related JSON fields
        resp = self.nodes[0].getrawtransaction(mytxid7, 1)
        assert_equal(
            Decimal(resp['valueBalance']),
            Decimal('10.05'))  # 20 shield input - 8 shield spend - 1.95 change
        assert_equal(len(resp['vShieldSpend']), 3)
        assert_equal(len(resp['vShieldOutput']), 2)
        assert ('bindingSig' in resp)
        shieldedSpend = resp['vShieldSpend'][0]
        assert ('cv' in shieldedSpend)
        assert ('anchor' in shieldedSpend)
        assert ('nullifier' in shieldedSpend)
        assert ('rk' in shieldedSpend)
        assert ('proof' in shieldedSpend)
        assert ('spendAuthSig' in shieldedSpend)
        shieldedOutput = resp['vShieldOutput'][0]
        assert ('cv' in shieldedOutput)
        assert ('cmu' in shieldedOutput)
        assert ('ephemeralKey' in shieldedOutput)
        assert ('encCiphertext' in shieldedOutput)
        assert ('outCiphertext' in shieldedOutput)
        assert ('proof' in shieldedOutput)
        self.log.info("Raw transaction decoding checks out")

        # Verify importing a spending key will update the nullifiers and witnesses correctly
        self.log.info("Checking exporting/importing a spending key...")
        sk0 = self.nodes[0].exportsaplingkey(saplingAddr0)
        saplingAddrInfo0 = self.nodes[2].importsaplingkey(sk0, "yes")
        assert_equal(saplingAddrInfo0["address"], saplingAddr0)
        assert_equal(
            self.nodes[2].getshieldbalance(saplingAddrInfo0["address"]),
            Decimal('12.9'))
        sk1 = self.nodes[1].exportsaplingkey(saplingAddr1)
        saplingAddrInfo1 = self.nodes[2].importsaplingkey(sk1, "yes")
        assert_equal(saplingAddrInfo1["address"], saplingAddr1)
        assert_equal(
            self.nodes[2].getshieldbalance(saplingAddrInfo1["address"]),
            Decimal('1.95'))

        # Verify importing a viewing key will update the nullifiers and witnesses correctly
        self.log.info("Checking exporting/importing a viewing key...")
        extfvk0 = self.nodes[0].exportsaplingviewingkey(saplingAddr0)
        saplingAddrInfo0 = self.nodes[3].importsaplingviewingkey(
            extfvk0, "yes")
        assert_equal(saplingAddrInfo0["address"], saplingAddr0)
        assert_equal(
            Decimal(self.nodes[3].getshieldbalance(saplingAddrInfo0["address"],
                                                   1, True)), Decimal('12.9'))
        extfvk1 = self.nodes[1].exportsaplingviewingkey(saplingAddr1)
        saplingAddrInfo1 = self.nodes[3].importsaplingviewingkey(
            extfvk1, "yes")
        assert_equal(saplingAddrInfo1["address"], saplingAddr1)
        assert_equal(
            self.nodes[3].getshieldbalance(saplingAddrInfo1["address"], 1,
                                           True), Decimal('1.95'))
        # no balance in the wallet
        assert_equal(self.nodes[3].getshieldbalance(), Decimal('0'))
        # watch only balance
        assert_equal(self.nodes[3].getshieldbalance("*", 1, True),
                     Decimal('14.85'))

        # Now shield some funds using sendmany
        self.log.info(
            "TX11: Shielding coins to multiple destinations with sendmany RPC..."
        )
        prev_balance = self.nodes[0].getbalance()
        recipients8 = {
            saplingAddr0: Decimal('8'),
            saplingAddr1: Decimal('1'),
            saplingAddr2: Decimal('0.5')
        }
        mytxid11 = self.nodes[0].sendmany("", recipients8)
        self.check_tx_priority([mytxid11])
        self.log.info("Done. Checking details and balances...")

        # Decrypted transaction details should be correct
        pt = self.nodes[0].viewshieldtransaction(mytxid11)
        fee = pt["fee"]
        assert_equal(pt['txid'], mytxid11)
        assert_equal(len(pt['spends']), 0)
        assert_equal(len(pt['outputs']), 3)
        found = [False] * 3
        for out in pt['outputs']:
            assert_equal(pt['outputs'].index(out), out['output'])
            if out['address'] == saplingAddr0:
                assert_equal(out['outgoing'], False)
                assert_equal(out['value'], Decimal('8'))
                found[0] = True
            elif out['address'] == saplingAddr1:
                assert_equal(out['outgoing'], True)
                assert_equal(out['value'], Decimal('1'))
                found[1] = True
            else:
                assert_equal(out['address'], saplingAddr2)
                assert_equal(out['outgoing'], False)
                assert_equal(out['value'], Decimal('0.5'))
                found[2] = True
        assert_equal(found, [True] * 3)

        # Verify balance
        self.nodes[2].generate(1)
        self.sync_all()
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr0),
                     Decimal('20.9'))  # 12.9 prev balance + 8 received
        assert_equal(self.nodes[1].getshieldbalance(saplingAddr1),
                     Decimal('2.95'))  # 1.95 prev balance + 1 received
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr2),
                     Decimal('4.4'))  # 3.9 prev balance + 0.5 received
        # Balance of node 0 is: prev_balance - 1 FRAG (+fee) sent externally +  250 FRAG matured coinbase
        assert_equal(
            self.nodes[0].getbalance(),
            satoshi_round(prev_balance + Decimal('249') - Decimal(fee)))

        # Now shield some funds using sendtoaddress
        self.log.info("TX12: Shielding coins with sendtoaddress RPC...")
        prev_balance = self.nodes[0].getbalance()
        mytxid12 = self.nodes[0].sendtoaddress(saplingAddr0, Decimal('10'))
        self.check_tx_priority([mytxid12])
        self.log.info("Done. Checking details and balances...")

        # Decrypted transaction details should be correct
        pt = self.nodes[0].viewshieldtransaction(mytxid12)
        fee = pt["fee"]
        assert_equal(pt['txid'], mytxid12)
        assert_equal(len(pt['spends']), 0)
        assert_equal(len(pt['outputs']), 1)
        out = pt['outputs'][0]
        assert_equal(out['address'], saplingAddr0)
        assert_equal(out['outgoing'], False)
        assert_equal(out['value'], Decimal('10'))

        # Verify balance
        self.nodes[2].generate(1)
        self.sync_all()
        assert_equal(self.nodes[0].getshieldbalance(saplingAddr0),
                     Decimal('30.9'))  # 20.9 prev balance + 10 received

        self.log.info("All good.")
Example #10
0
 def disconnect_all(self):
     disconnect_nodes(self.nodes[0], 1)
     disconnect_nodes(self.nodes[1], 0)
     disconnect_nodes(self.nodes[2], 1)
     disconnect_nodes(self.nodes[2], 0)
     disconnect_nodes(self.nodes[0], 2)
     disconnect_nodes(self.nodes[1], 2)
Example #11
0
    def run_test(self):
        # Send a tx from which to conflict outputs later
        txid_conflict_from = self.nodes[0].sendtoaddress(
            self.nodes[0].getnewaddress(), Decimal("10"))
        self.nodes[0].generate(1)
        self.sync_blocks()

        # Disconnect node1 from others to reorg its chain later
        disconnect_nodes(self.nodes[0], 1)
        disconnect_nodes(self.nodes[1], 2)
        connect_nodes(self.nodes[0], 2)

        # Send a tx to be unconfirmed later
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                           Decimal("10"))
        tx = self.nodes[0].gettransaction(txid)
        self.nodes[0].generate(4)
        tx_before_reorg = self.nodes[0].gettransaction(txid)
        assert_equal(tx_before_reorg["confirmations"], 4)

        # Disconnect node0 from node2 to broadcast a conflict on their respective chains
        disconnect_nodes(self.nodes[0], 2)
        nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(
            txid_conflict_from)["details"]
                  if tx_out["amount"] == Decimal("10"))
        inputs = []
        inputs.append({"txid": txid_conflict_from, "vout": nA})
        outputs_1 = {}
        outputs_2 = {}

        # Create a conflicted tx broadcast on node0 chain and conflicting tx broadcast on node1 chain. Both spend from txid_conflict_from
        outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998")
        outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998")
        conflicted = self.nodes[0].signrawtransactionwithwallet(
            self.nodes[0].createrawtransaction(inputs, outputs_1))
        conflicting = self.nodes[0].signrawtransactionwithwallet(
            self.nodes[0].createrawtransaction(inputs, outputs_2))

        conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"])
        self.nodes[0].generate(1)
        conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"])
        self.nodes[2].generate(9)

        # Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted
        connect_nodes(self.nodes[0], 2)
        self.sync_blocks([self.nodes[0], self.nodes[2]])
        conflicted = self.nodes[0].gettransaction(conflicted_txid)
        conflicting = self.nodes[0].gettransaction(conflicting_txid)
        assert_equal(conflicted["confirmations"], -9)
        assert_equal(conflicted["walletconflicts"][0], conflicting["txid"])

        # Node0 wallet is shutdown
        self.stop_node(0)
        self.start_node(0)

        # The block chain re-orgs and the tx is included in a different block
        self.nodes[1].generate(9)
        self.nodes[1].sendrawtransaction(tx["hex"])
        self.nodes[1].generate(1)
        self.nodes[1].sendrawtransaction(conflicted["hex"])
        self.nodes[1].generate(1)

        # Node0 wallet file is loaded on longest sync'ed node1
        self.stop_node(1)
        self.nodes[0].backupwallet(
            os.path.join(self.nodes[0].datadir, 'wallet.bak'))
        shutil.copyfile(
            os.path.join(self.nodes[0].datadir, 'wallet.bak'),
            os.path.join(self.nodes[1].datadir, 'regtest', 'wallet.dat'))
        self.start_node(1)
        tx_after_reorg = self.nodes[1].gettransaction(txid)
        # Check that normal confirmed tx is confirmed again but with different blockhash
        assert_equal(tx_after_reorg["confirmations"], 2)
        assert (tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"])
        conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid)
        # Check that conflicted tx is confirmed again with blockhash different than previously conflicting tx
        assert_equal(conflicted_after_reorg["confirmations"], 1)
        assert (conflicting["blockhash"] !=
                conflicted_after_reorg["blockhash"])
Example #12
0
    def run_test(self):
        self.nodes[0].generate(5)
        sync_blocks(self.nodes)
        self.nodes[1].generate(110)
        sync_blocks(self.nodes)
        balance = self.nodes[0].getbalance()
        txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
        txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
        txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 10)
        sync_mempools(self.nodes)
        self.nodes[1].generate(1)

        sync_blocks(self.nodes)
        newbalance = self.nodes[0].getbalance()
        assert (balance - newbalance < Decimal("0.001")
                )  #no more than fees lost
        balance = newbalance

        # Disconnect nodes so node0's transactions don't get into node1's mempool
        disconnect_nodes(self.nodes[0], 1)

        # Identify the 10btc outputs
        nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(
            txA, 1)["vout"]) if vout["value"] == 10)
        nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(
            txB, 1)["vout"]) if vout["value"] == 10)
        nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(
            txC, 1)["vout"]) if vout["value"] == 10)

        inputs = []
        # spend 10btc outputs from txA and txB
        inputs.append({"txid": txA, "vout": nA})
        inputs.append({"txid": txB, "vout": nB})
        outputs = {}

        outputs[self.nodes[0].getnewaddress()] = 14.99998
        outputs[self.nodes[1].getnewaddress()] = 5
        signed = self.nodes[0].signrawtransaction(
            self.nodes[0].createrawtransaction(inputs, outputs))
        txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])

        # Identify the 14.99998btc output
        nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(
            txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))

        #Create a child tx spending AB1 and C
        inputs = []
        inputs.append({"txid": txAB1, "vout": nAB})
        inputs.append({"txid": txC, "vout": nC})
        outputs = {}
        outputs[self.nodes[0].getnewaddress()] = 24.9996
        signed2 = self.nodes[0].signrawtransaction(
            self.nodes[0].createrawtransaction(inputs, outputs))
        txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])

        # Create a child tx spending ABC2
        inputs = []
        inputs.append({"txid": txABC2, "vout": 0})
        outputs = {}
        outputs[self.nodes[0].getnewaddress()] = 24.999
        signed3 = self.nodes[0].signrawtransaction(
            self.nodes[0].createrawtransaction(inputs, outputs))
        # note tx is never directly referenced, only abandoned as a child of the above
        self.nodes[0].sendrawtransaction(signed3["hex"])

        # In mempool txs from self should increase balance from change
        newbalance = self.nodes[0].getbalance()
        assert_equal(
            newbalance,
            Decimal(round(balance - Decimal("30") + Decimal(24.999), 8)))
        balance = newbalance

        # Restart the node with a higher min relay fee so the parent tx is no longer in mempool
        # TODO: redo with eviction
        # Note had to make sure tx did not have AllowFree priority
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])

        # Verify txs no longer in mempool
        assert_equal(len(self.nodes[0].getrawmempool()), 0)

        # Not in mempool txs from self should only reduce balance
        # inputs are still spent, but change not received
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("24.999"))
        # Unconfirmed received funds that are not in mempool, also shouldn't show
        # up in unconfirmed balance
        unconfbalance = self.nodes[0].getunconfirmedbalance(
        ) + self.nodes[0].getbalance()
        assert_equal(unconfbalance, newbalance)
        # Also shouldn't show up in listunspent
        assert (not txABC2
                in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
        balance = newbalance

        # Abandon original transaction and verify inputs are available again
        # including that the child tx was also abandoned
        self.nodes[0].abandontransaction(txAB1)
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance + Decimal("30"))
        balance = newbalance

        # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        assert_equal(self.nodes[0].getbalance(), balance)

        # But if its received again then it is unabandoned
        # And since now in mempool, the change is available
        # But its child tx remains abandoned
        self.nodes[0].sendrawtransaction(signed["hex"])
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
        balance = newbalance

        # Send child tx again so its unabandoned
        self.nodes[0].sendrawtransaction(signed2["hex"])
        newbalance = self.nodes[0].getbalance()
        assert_equal(
            newbalance,
            balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
        balance = newbalance

        # Remove using high relay fee again
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("24.9996"))
        balance = newbalance

        # Create a double spend of AB1 by spending again from only A's 10 output
        # Mine double spend from node 1
        inputs = []
        inputs.append({"txid": txA, "vout": nA})
        outputs = {}
        outputs[self.nodes[1].getnewaddress()] = 9.9999
        tx = self.nodes[0].createrawtransaction(inputs, outputs)
        signed = self.nodes[0].signrawtransaction(tx)
        self.nodes[1].sendrawtransaction(signed["hex"])
        self.nodes[1].generate(1)

        connect_nodes(self.nodes[0], 1)
        sync_blocks(self.nodes)

        # Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
        newbalance = self.nodes[0].getbalance()
        #assert_equal(newbalance, balance + Decimal("20"))
        balance = newbalance

        # There is currently a minor bug around this and so this test doesn't work.  See Issue #7315
        # Invalidate the block with the double spend and B's 10 BTC output should no longer be available
        # Don't think C's should either
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        newbalance = self.nodes[0].getbalance()
        #assert_equal(newbalance, balance - Decimal("10"))
        print(
            "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer"
        )
        print(
            "conflicted has not resumed causing its inputs to be seen as spent.  See Issue #7315"
        )
        print(str(balance) + " -> " + str(newbalance) + " ?")
Example #13
0
    def run_test(self):
        proposer = self.nodes[0]
        finalizer1 = self.nodes[1]
        finalizer2 = self.nodes[2]

        self.setup_stake_coins(*self.nodes)

        # Leave IBD
        proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
        sync_blocks([proposer, finalizer1, finalizer2], timeout=10)

        finalizer1_address = finalizer1.getnewaddress('', 'legacy')

        # create deposits
        # F
        # e0 - e1
        #      d1
        #      d2
        d1 = finalizer1.deposit(finalizer1_address, 1500)
        d2 = finalizer2.deposit(finalizer2.getnewaddress('', 'legacy'), 1500)
        self.wait_for_transaction(d1, timeout=10)
        self.wait_for_transaction(d2, timeout=10)
        proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
        sync_blocks([proposer, finalizer1, finalizer2], timeout=10)
        disconnect_nodes(finalizer1, proposer.index)
        disconnect_nodes(finalizer2, proposer.index)
        assert_equal(proposer.getblockcount(), 2)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 0,
                'currentEpoch': 1,
                'lastJustifiedEpoch': 0,
                'lastFinalizedEpoch': 0,
                'validators': 0
            })
        self.log.info('deposits are created')

        # Generate enough blocks to activate deposits
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
        #      d1
        #      d2
        proposer.generatetoaddress(3 + 5 + 5 + 5 + 5,
                                   proposer.getnewaddress('', 'bech32'))
        assert_equal(proposer.getblockcount(), 25)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 2,
                'currentEpoch': 5,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 3,
                'validators': 0
            })

        proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
        assert_equal(proposer.getblockcount(), 26)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 3,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 3,
                'validators': 2
            })
        self.log.info('finalizers are created')

        # Logout finalizer1
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
        #      d1                       l1
        #      d2
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)

        # TODO UNIT-E: logout tx can't be created if its vote is not in the block
        # we should check that input of logout tx is in the mempool too
        proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))

        connect_nodes(finalizer1, proposer.index)
        sync_blocks([finalizer1, proposer], timeout=10)
        l1 = finalizer1.logout()
        wait_until(lambda: l1 in proposer.getrawmempool(), timeout=10)
        disconnect_nodes(finalizer1, proposer.index)

        proposer.generatetoaddress(3, proposer.getnewaddress('', 'bech32'))
        assert_equal(proposer.getblockcount(), 30)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 3,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 4,
                'validators': 2
            })
        self.log.info('finalizer1 logged out in dynasty=3')

        # During LOGOUT_DYNASTY_DELAY both finalizers can vote.
        # Since the finalization happens at every epoch,
        # number of dynasties is equal to number of epochs.
        for _ in range(LOGOUT_DYNASTY_DELAY):
            proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
            self.wait_for_vote_and_disconnect(finalizer=finalizer1,
                                              node=proposer)
            self.wait_for_vote_and_disconnect(finalizer=finalizer2,
                                              node=proposer)
            proposer.generatetoaddress(4, proposer.getnewaddress('', 'bech32'))

        assert_equal(proposer.getblockcount(), 45)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 6,
                'currentEpoch': 9,
                'lastJustifiedEpoch': 8,
                'lastFinalizedEpoch': 7,
                'validators': 2
            })

        self.log.info('finalizer1 voted during logout delay successfully')

        # During WITHDRAW_DELAY finalizer1 can't vote and can't withdraw
        proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 7,
                'currentEpoch': 10,
                'lastJustifiedEpoch': 8,
                'lastFinalizedEpoch': 7,
                'validators': 1
            })
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 7,
                'currentEpoch': 10,
                'lastJustifiedEpoch': 9,
                'lastFinalizedEpoch': 8,
                'validators': 1
            })

        # finalizer1 can't vote so we keep it connected
        connect_nodes(finalizer1, proposer.index)
        time.sleep(2)  # ensure no votes from finalizer1
        assert_equal(len(proposer.getrawmempool()), 0)

        proposer.generatetoaddress(3, proposer.getnewaddress('', 'bech32'))
        assert_equal(proposer.getblockcount(), 50)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 7,
                'currentEpoch': 10,
                'lastJustifiedEpoch': 9,
                'lastFinalizedEpoch': 8,
                'validators': 1
            })

        # WITHDRAW_DELAY - 2 is because:
        # -1 as we checked the first loop manually
        # -1 as at this epoch we should be able to withdraw already
        for _ in range(WITHDRAW_EPOCH_DELAY - 2):
            proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
            self.wait_for_vote_and_disconnect(finalizer=finalizer2,
                                              node=proposer)
            proposer.generatetoaddress(4, proposer.getnewaddress('', 'bech32'))

        assert_equal(proposer.getblockcount(), 100)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 17,
                'currentEpoch': 20,
                'lastJustifiedEpoch': 19,
                'lastFinalizedEpoch': 18,
                'validators': 1
            })

        # last block that finalizer1 can't withdraw
        # TODO UNIT-E: allow to create a withdraw tx on checkpoint
        # as it will be added to the block on the next epoch only.
        # We have an known issue https://github.com/dtr-org/unit-e/issues/643
        # that finalizer can't vote after checkpoint is processed, it looks that
        # finalizer can't create any finalizer commits at this point (and only at this point).
        assert_raises_rpc_error(-8, 'Cannot send withdraw transaction.',
                                finalizer1.withdraw, finalizer1_address)

        self.log.info(
            'finalizer1 could not withdraw during WITHDRAW_DELAY period')

        # test that deposit can be withdrawn
        # e0 - e1 - ... - e6 - ... - e21[101, 102]
        #      d1         l1                  w1
        #      d2
        proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        assert_equal(proposer.getblockcount(), 101)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 18,
                'currentEpoch': 21,
                'lastJustifiedEpoch': 19,
                'lastFinalizedEpoch': 18,
                'validators': 1
            })
        sync_blocks([proposer, finalizer1], timeout=10)
        w1 = finalizer1.withdraw(finalizer1_address)
        wait_until(lambda: w1 in proposer.getrawmempool(), timeout=10)
        proposer.generatetoaddress(1, proposer.getnewaddress('', 'bech32'))
        sync_blocks([proposer, finalizer1])

        self.log.info('finalizer1 was able to withdraw deposit at dynasty=18')

        # test that withdraw commit can be spent
        # test that deposit can be withdrawn
        # e0 - e1 - ... - e6 - ... - e21[101, 102, 103]
        #      d1         l1                  w1   spent_w1
        #      d2
        spent_w1_raw = finalizer1.createrawtransaction(
            [{
                'txid': w1,
                'vout': 0
            }], {finalizer1_address: Decimal('1499.999')})
        spent_w1_signed = finalizer1.signrawtransaction(spent_w1_raw)
        spent_w1 = finalizer1.sendrawtransaction(spent_w1_signed['hex'])
        self.wait_for_transaction(spent_w1, nodes=[proposer])

        # mine block
        block_hash = proposer.generatetoaddress(
            1, proposer.getnewaddress('', 'bech32'))[0]
        assert spent_w1 in proposer.getblock(block_hash)['tx']

        self.log.info('finalizer1 was able to spend withdraw commit')

        # Test that after withdraw the node can deposit again
        sync_blocks([proposer, finalizer1], timeout=10)
        assert_equal(proposer.getblockcount(), 103)
        wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] ==
                   'NOT_VALIDATING',
                   timeout=5)
        deposit = finalizer1.deposit(finalizer1.getnewaddress('', 'legacy'),
                                     1500)
        wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] ==
                   'WAITING_DEPOSIT_CONFIRMATION',
                   timeout=5)

        self.wait_for_transaction(deposit,
                                  timeout=10,
                                  nodes=[proposer, finalizer1])
        proposer.generate(1)
        sync_blocks([proposer, finalizer1], timeout=10)
        assert_equal(proposer.getblockcount(), 104)

        wait_until(lambda: finalizer1.getvalidatorinfo()['validator_status'] ==
                   'WAITING_DEPOSIT_FINALIZATION',
                   timeout=20)

        self.log.info('finalizer1 deposits again')

        disconnect_nodes(finalizer1, proposer.index)

        proposer.generate(2)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        assert_equal(proposer.getblockcount(), 106)

        proposer.generate(5)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        assert_equal(proposer.getblockcount(), 111)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 20,
                'currentEpoch': 23,
                'lastJustifiedEpoch': 21,
                'lastFinalizedEpoch': 20,
                'validators': 1
            })

        proposer.generate(5)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        assert_equal(proposer.getblockcount(), 116)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 21,
                'currentEpoch': 24,
                'lastJustifiedEpoch': 22,
                'lastFinalizedEpoch': 21,
                'validators': 2
            })

        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
        self.log.info('finalizer1 votes again')
Example #14
0
 def disconnect_all(self):
     disconnect_nodes(self.nodes[0], self.nodes[1])
     disconnect_nodes(self.nodes[1], self.nodes[0])
     disconnect_nodes(self.nodes[2], self.nodes[1])
     disconnect_nodes(self.nodes[2], self.nodes[0])
     disconnect_nodes(self.nodes[0], self.nodes[2])
     disconnect_nodes(self.nodes[1], self.nodes[2])
Example #15
0
    def run_test(self):
        def assert_vote(vote_raw_tx, input_raw_tx, source_epoch, target_epoch,
                        target_hash):
            vote_tx = FromHex(CTransaction(), vote_raw_tx)
            assert vote_tx.is_finalizer_commit()

            input_tx = FromHex(CTransaction(), input_raw_tx)
            input_tx.rehash()
            prevout = "%064x" % vote_tx.vin[0].prevout.hash
            assert_equal(prevout, input_tx.hash)

            vote = self.nodes[0].extractvotefromsignature(
                bytes_to_hex_str(vote_tx.vin[0].scriptSig))
            assert_equal(vote['source_epoch'], source_epoch)
            assert_equal(vote['target_epoch'], target_epoch)
            assert_equal(vote['target_hash'], target_hash)

        fork0 = self.nodes[0]
        fork1 = self.nodes[1]
        finalizer = self.nodes[2]  # main finalizer that being checked
        finalizer2 = self.nodes[
            3]  # secondary finalizer to control finalization

        self.setup_stake_coins(fork0, fork1, finalizer, finalizer2)

        connect_nodes(fork0, fork1.index)
        connect_nodes(fork0, finalizer.index)
        connect_nodes(fork0, finalizer2.index)

        # leave IBD
        fork0.generatetoaddress(1, fork0.getnewaddress('', 'bech32'))
        sync_blocks(self.nodes)

        # deposit
        d1_hash = finalizer.deposit(finalizer.getnewaddress('', 'legacy'),
                                    1500)
        d2_hash = finalizer2.deposit(finalizer2.getnewaddress('', 'legacy'),
                                     4000)
        d1 = finalizer.getrawtransaction(d1_hash)
        self.wait_for_transaction(d1_hash, timeout=10)
        self.wait_for_transaction(d2_hash, timeout=10)
        fork0.generatetoaddress(1, fork0.getnewaddress('', 'bech32'))
        disconnect_nodes(fork0, finalizer.index)
        disconnect_nodes(fork0, finalizer2.index)

        # leave instant justification
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
        fork0.generatetoaddress(3 + 5 + 5 + 5 + 5 + 1,
                                fork0.getnewaddress('', 'bech32'))
        assert_equal(fork0.getblockcount(), 26)
        assert_finalizationstate(
            fork0, {
                'currentDynasty': 3,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 3,
                'validators': 2
            })

        # move tip to one block before checkpoint to be able to
        # revert checkpoint on the fork
        #       J           v0
        # ... - e5 - e6[26, 27, 28, 29] fork0
        #                            \
        #                             - fork1
        v0 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork0)
        assert_vote(vote_raw_tx=v0,
                    input_raw_tx=d1,
                    source_epoch=4,
                    target_epoch=5,
                    target_hash=fork0.getblockhash(25))
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork0)
        fork0.generatetoaddress(3, fork0.getnewaddress('', 'bech32'))
        sync_blocks([fork0, fork1], timeout=10)
        disconnect_nodes(fork0, fork1.index)
        assert_equal(fork0.getblockcount(), 29)
        assert_finalizationstate(
            fork0, {
                'currentDynasty': 3,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 4,
                'validators': 2
            })

        # vote v1 on target_epoch=6 target_hash=30
        #       J           v0                       v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7[31, 32] fork0
        #                            \
        #                             - fork1
        fork0.generatetoaddress(2, fork0.getnewaddress('', 'bech32'))
        assert_equal(fork0.getblockcount(), 31)
        v1 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork0)
        assert_vote(vote_raw_tx=v1,
                    input_raw_tx=v0,
                    source_epoch=5,
                    target_epoch=6,
                    target_hash=fork0.getblockhash(30))
        fork0.generatetoaddress(1, fork0.getnewaddress('', 'bech32'))
        connect_nodes(finalizer, fork0.index)
        sync_blocks([finalizer, fork0], timeout=10)
        disconnect_nodes(finalizer, fork0.index)
        assert_equal(fork0.getblockcount(), 32)
        assert_equal(finalizer.getblockcount(), 32)
        self.log.info('finalizer successfully voted on the checkpoint')

        # re-org last checkpoint and check that finalizer doesn't vote
        #       J           v0                       v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7[31, 32] fork0
        #                            \
        #                             - 30] - e7[31, 32, 33] fork1
        fork1.generatetoaddress(4, fork1.getnewaddress('', 'bech32'))
        assert_equal(fork1.getblockcount(), 33)
        connect_nodes(finalizer, fork1.index)
        sync_blocks([finalizer, fork1], timeout=10)
        assert_equal(finalizer.getblockcount(), 33)
        assert_equal(len(fork1.getrawmempool()), 0)
        disconnect_nodes(finalizer, fork1.index)
        self.log.info(
            'finalizer successfully detected potential double vote and did not vote'
        )

        # continue to new epoch and check that finalizer votes on fork1
        #       J           v0                       v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7[31, 32] fork0
        #                            \                         v2
        #                             - 30] - e7[...] - e8[36, 37] fork1
        fork1.generatetoaddress(3, fork1.getnewaddress('', 'bech32'))
        assert_equal(fork1.getblockcount(), 36)
        v2 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork1)
        assert_vote(vote_raw_tx=v2,
                    input_raw_tx=v0,
                    source_epoch=5,
                    target_epoch=7,
                    target_hash=fork1.getblockhash(35))
        fork1.generatetoaddress(1, fork1.getnewaddress('', 'bech32'))
        assert_equal(fork1.getblockcount(), 37)

        # create new epoch on fork1 and check that finalizer votes
        #       J           v0                       v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7[31, 32] fork0
        #                            \                         v2                v3
        #                             - 30] - e7[...] - e8[36, 37, ...] - e9[41, 42] fork1
        fork1.generatetoaddress(4, fork1.getnewaddress('', 'bech32'))
        assert_equal(fork1.getblockcount(), 41)
        v3 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork1)
        assert_vote(vote_raw_tx=v3,
                    input_raw_tx=v2,
                    source_epoch=5,
                    target_epoch=8,
                    target_hash=fork1.getblockhash(40))
        fork1.generatetoaddress(1, fork1.getnewaddress('', 'bech32'))
        assert_equal(fork1.getblockcount(), 42)

        # create longer fork0 and check that after reorg finalizer doesn't vote
        #       J           v0                v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7 - e8 - e9[41,42, 43] fork0
        #                            \             v2          v3
        #                             - 30] - e7 - e8 - e9[41, 42] fork1
        fork0.generatetoaddress(11, fork0.getnewaddress('', 'bech32'))
        assert_equal(fork0.getblockcount(), 43)
        connect_nodes(finalizer, fork0.index)
        sync_blocks([finalizer, fork0])
        assert_equal(finalizer.getblockcount(), 43)
        assert_equal(len(fork0.getrawmempool()), 0)
        disconnect_nodes(finalizer, fork0.index)
        self.log.info(
            'finalizer successfully detected potential two consecutive double votes and did not vote'
        )

        # check that finalizer can vote from next epoch on fork0
        #       J           v0                v1                          v4
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7 - e8 - e9[...] - e10[46, 47] fork0
        #                            \             v2          v3
        #                             - 30] - e7 - e8 - e9[41, 42] fork1
        fork0.generatetoaddress(3, fork0.getnewaddress('', 'bech32'))
        assert_equal(fork0.getblockcount(), 46)
        v4 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork0)
        assert_vote(vote_raw_tx=v4,
                    input_raw_tx=v1,
                    source_epoch=5,
                    target_epoch=9,
                    target_hash=fork0.getblockhash(45))
        fork0.generatetoaddress(1, fork0.getnewaddress('', 'bech32'))
        assert_equal(fork0.getblockcount(), 47)

        # finalize epoch8 on fork1 and re-broadcast all vote txs
        # which must not create slash tx
        #       J           v0                v1                                      v4
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7 - e8[   ...    ] - e9[...] - e10[46, 47] fork0
        #                            \             F      v2        J      v3
        #                             - 30] - e7 - e8[36, 37,...] - e9[41, 42, 43] - e10[46, 47] fork1
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork1)
        fork1.generatetoaddress(1, fork1.getnewaddress('', 'bech32'))
        assert_equal(fork1.getblockcount(), 43)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 4,
                'currentEpoch': 9,
                'lastJustifiedEpoch': 8,
                'lastFinalizedEpoch': 4,
                'validators': 2
            })

        fork1.generatetoaddress(3, fork1.getnewaddress())
        assert_equal(fork1.getblockcount(), 46)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork1)
        fork1.generatetoaddress(1, fork1.getnewaddress('', 'bech32'))
        assert_equal(fork1.getblockcount(), 47)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 4,
                'currentEpoch': 10,
                'lastJustifiedEpoch': 9,
                'lastFinalizedEpoch': 8,
                'validators': 2
            })

        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork1.sendrawtransaction, v1)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork1.sendrawtransaction, v4)
        assert_equal(len(fork1.getrawmempool()), 0)

        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork0.sendrawtransaction, v2)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork0.sendrawtransaction, v3)
        assert_equal(len(fork0.getrawmempool()), 0)
        self.log.info('re-broadcasting existing votes did not create slash tx')
 def _disconnect_all_nodes(self):
     # Split the network
     for n1 in self.nodes:
         for n2 in self.nodes:
             if n1 is not n2:
                 disconnect_nodes(n1, n2)
Example #17
0
    def run_test(self):
        p0, p1, p2, v0 = self.nodes

        self.setup_stake_coins(p0, p1, p2, v0)

        # Leave IBD
        self.generate_sync(p0)

        self.log.info("Setup deposit")
        setup_deposit(self, p0, [v0])
        sync_blocks([p0, p1, p2, v0])

        self.log.info("Setup test prerequisites")
        # get to up to block 49, just one before the new checkpoint
        generate_block(p0, count=18)

        assert_equal(p0.getblockcount(), 49)
        sync_blocks([p0, p1, p2, v0])

        assert_finalizationstate(p0, {
            'currentEpoch': 5,
            'lastJustifiedEpoch': 4,
            'lastFinalizedEpoch': 3
        })

        # disconnect p0
        # v0: p1, p2
        # p0:
        # p1: v0
        # p2: v0
        disconnect_nodes(p0, v0.index)
        disconnect_nodes(p0, p1.index)

        # disconnect p2
        # v0: p1
        # p0:
        # p1: v0
        # p2:
        disconnect_nodes(p2, v0.index)

        # disconnect p1
        # v0:
        # p0:
        # p1:
        # p2:
        disconnect_nodes(p1, v0.index)

        # generate long chain in p0 but don't justify it
        #  F     J
        # 30 .. 40 .. 89    -- p0
        generate_block(p0, count=40)

        assert_equal(p0.getblockcount(), 89)
        assert_finalizationstate(p0, {
            'currentEpoch': 9,
            'lastJustifiedEpoch': 4,
            'lastFinalizedEpoch': 3
        })

        # generate short chain in p1 and justify it
        # on the 6th and 7th epochs sync with validator
        #  F     J
        # 30 .. 40 .. 49 .. .. .. .. .. .. 89    -- p0
        #               \
        #                50 .. 60 .. 69          -- p1
        #                 F     J
        # get to the 6th epoch
        generate_block(p1, count=2)
        self.wait_for_vote_and_disconnect(finalizer=v0, node=p1)
        # get to the 7th epoch
        generate_block(p1, count=10)
        self.wait_for_vote_and_disconnect(finalizer=v0, node=p1)
        # generate the rest of the blocks
        generate_block(p1, count=8)
        connect_nodes(p1, v0.index)
        sync_blocks([p1, v0])

        assert_equal(p1.getblockcount(), 69)
        assert_finalizationstate(p1, {
            'currentEpoch': 7,
            'lastJustifiedEpoch': 6,
            'lastFinalizedEpoch': 5
        })

        # connect p2 with p0 and p1; p2 must switch to the longest justified p1
        # v0: p1
        # p0: p2
        # p1: v0, p2
        # p2: p0, p1
        self.log.info("Test fresh node sync")
        connect_nodes(p2, p0.index)
        connect_nodes(p2, p1.index)

        sync_blocks([p1, p2])
        assert_equal(p1.getblockcount(), 69)
        assert_equal(p2.getblockcount(), 69)

        assert_finalizationstate(p1, {
            'currentEpoch': 7,
            'lastJustifiedEpoch': 6,
            'lastFinalizedEpoch': 5
        })
        assert_finalizationstate(p2, {
            'currentEpoch': 7,
            'lastJustifiedEpoch': 6,
            'lastFinalizedEpoch': 5
        })

        # connect p0 with p1, p0 must disconnect its longest but not justified fork and choose p1
        # v0: p1
        # p0: p1, p2
        # p1: v0, p0, p2
        # p2: p0, p1
        self.log.info("Test longest node reverts to justified")
        connect_nodes(p0, p1.index)
        sync_blocks([p0, p1])

        # check if p0 accepted shortest in terms of blocks but longest justified chain
        assert_equal(p0.getblockcount(), 69)
        assert_equal(p1.getblockcount(), 69)
        assert_equal(v0.getblockcount(), 69)

        # generate more blocks to make sure they're processed
        self.log.info("Test all nodes continue to work as usual")
        generate_block(p0, count=30)
        sync_blocks([p0, p1, p2, v0])
        assert_equal(p0.getblockcount(), 99)

        generate_block(p1, count=30)
        sync_blocks([p0, p1, p2, v0])
        assert_equal(p1.getblockcount(), 129)

        generate_block(p2, count=30)
        sync_blocks([p0, p1, p2, v0])
        assert_equal(p2.getblockcount(), 159)

        # disconnect all nodes
        # v0:
        # p0:
        # p1:
        # p2:
        self.log.info("Test nodes sync after reconnection")
        disconnect_nodes(v0, p1.index)
        disconnect_nodes(p0, p1.index)
        disconnect_nodes(p0, p2.index)
        disconnect_nodes(p1, p2.index)

        generate_block(p0, count=10)
        generate_block(p1, count=20)
        generate_block(p2, count=30)

        assert_equal(p0.getblockcount(), 169)
        assert_equal(p1.getblockcount(), 179)
        assert_equal(p2.getblockcount(), 189)

        # connect validator back to p1
        # v0: p1
        # p0: p1
        # p1: v0, p0, p2
        # p2: p1
        connect_nodes(p1, v0.index)
        sync_blocks([p1, v0])
        connect_nodes(p1, p0.index)
        connect_nodes(p1, p2.index)
        sync_blocks([p0, p1, p2, v0])
    def run_test(self):
        def sync_node_to_fork(node, fork, force=False):
            if force:
                self.restart_node(node.index, cleanup=True)
                node.importmasterkey(
                    regtest_mnemonics[node.index]['mnemonics'])
            connect_nodes(node, fork.index)
            block_hash = fork.getblockhash(fork.getblockcount())
            node.waitforblock(block_hash, 5000)
            assert_equal(node.getblockhash(node.getblockcount()), block_hash)
            disconnect_nodes(node, fork.index)

        def generate_epoch_and_vote(node, finalizer, finalizer_address,
                                    prevtx):
            assert node.getblockcount() % 5 == 0
            fs = node.getfinalizationstate()
            checkpoint = node.getbestblockhash()
            generate_block(node)
            vtx = make_vote_tx(finalizer,
                               finalizer_address,
                               checkpoint,
                               source_epoch=fs['lastJustifiedEpoch'],
                               target_epoch=fs['currentEpoch'],
                               input_tx_id=prevtx)
            node.sendrawtransaction(vtx)
            generate_block(node, count=4)
            vtx = FromHex(CTransaction(), vtx)
            vtx.rehash()
            return vtx.hash

        node = self.nodes[0]
        fork1 = self.nodes[1]
        fork2 = self.nodes[2]
        finalizer = self.nodes[3]

        node.importmasterkey(regtest_mnemonics[0]['mnemonics'])
        fork1.importmasterkey(regtest_mnemonics[1]['mnemonics'])
        fork2.importmasterkey(regtest_mnemonics[2]['mnemonics'])
        finalizer.importmasterkey(regtest_mnemonics[3]['mnemonics'])

        connect_nodes(node, fork1.index)
        connect_nodes(node, fork2.index)
        connect_nodes(node, finalizer.index)

        # leave IBD
        self.generate_sync(node, 1)

        finalizer_address = finalizer.getnewaddress('', 'legacy')
        deptx = finalizer.deposit(finalizer_address, 1500)
        self.wait_for_transaction(deptx)

        # leave insta justification
        #                   -  fork1
        # F    F    F       |
        # e0 - e1 - e2 - e3 -  node
        #                   |
        #                   -  fork2
        generate_block(node, count=14)
        assert_equal(node.getblockcount(), 15)
        sync_blocks([node, finalizer])
        assert_finalizationstate(
            node, {
                'currentDynasty': 1,
                'currentEpoch': 3,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 0
            })
        sync_blocks(self.nodes)
        disconnect_nodes(node, fork1.index)
        disconnect_nodes(node, fork2.index)
        disconnect_nodes(node, finalizer.index)

        # create first justified epoch on fork1
        #                     J
        #                   - e4 - e5 - e6 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |
        #                   -  fork2

        generate_block(fork1, count=5)
        vtx1 = generate_epoch_and_vote(fork1, finalizer, finalizer_address,
                                       deptx)
        generate_block(fork1, count=5)
        assert_equal(fork1.getblockcount(), 30)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        sync_node_to_fork(node, fork1)

        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info('node successfully switched to the justified fork')

        # create longer justified epoch on fork2
        # node must switch ("zig") to this fork
        #                     J
        #                   - e4 - e5 - e6 fork1
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J
        #                   - e4 - e5 - e6 fork2 node

        generate_block(fork2, count=10)
        vtx2 = generate_epoch_and_vote(fork2, finalizer, finalizer_address,
                                       deptx)
        assert_equal(fork2.getblockcount(), 30)
        assert_finalizationstate(
            fork2, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        sync_node_to_fork(node, fork2)

        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info(
            'node successfully switched to the longest justified fork')

        # create longer justified epoch on the previous fork1
        # node must switch ("zag") to this fork
        #                     J              J
        #                   - e4 - e5 - e6 - e7 - e8 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J
        #                   - e4 - e5 - e6 fork2
        generate_block(fork1, count=5)
        sync_node_to_fork(finalizer, fork1)
        vtx1 = generate_epoch_and_vote(fork1, finalizer, finalizer_address,
                                       vtx1)
        assert_equal(fork1.getblockcount(), 40)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 2,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 7,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        assert_not_equal(fork1.getbestblockhash(), fork2.getbestblockhash())
        sync_node_to_fork(node, fork1)
        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 7,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info(
            'node successfully switched back to the longest justified fork')

        # UNIT-E TODO: node must follow longest finalized chain
        # node follows longest finalization
        #                     J              J
        #                   - e4 - e5 - e6 - e7 - e8 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J    F
        #                   - e4 - e5 - e6 - e7 fork2
        sync_node_to_fork(finalizer, fork2, force=True)
        vtx2 = generate_epoch_and_vote(fork2, finalizer, finalizer_address,
                                       vtx2)
        assert_equal(fork2.getblockcount(), 35)
        assert_finalizationstate(
            fork2, {
                'currentDynasty': 2,
                'currentEpoch': 7,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 6,
                'validators': 1
            })
Example #19
0
 def sync_node_to_fork(node, fork):
     connect_nodes(node, fork.index)
     block_hash = fork.getblockhash(fork.getblockcount())
     node.waitforblock(block_hash, 5000)
     assert_equal(node.getblockhash(node.getblockcount()), block_hash)
     disconnect_nodes(node, fork.index)
Example #20
0
    def run_test(self):
        parent = self.nodes[0]
        #parent2 = self.nodes[1]
        sidechain = self.nodes[2]
        sidechain2 = self.nodes[3]
        for node in self.nodes:
            node.importprivkey(privkey=node.get_deterministic_priv_key().key, label="mining")
        util.node_fastmerkle = sidechain

        parent.generate(101)
        sidechain.generate(101)
        self.log.info("sidechain info: {}".format(sidechain.getsidechaininfo()))

        addrs = sidechain.getpeginaddress()
        addr = addrs["mainchain_address"]
        assert_equal(sidechain.decodescript(addrs["claim_script"])["type"], "witness_v0_keyhash")
        txid1 = parent.sendtoaddress(addr, 24)
        # 10+2 confirms required to get into mempool and confirm
        assert_equal(sidechain.getsidechaininfo()["pegin_confirmation_depth"], 10)
        parent.generate(1)
        time.sleep(2)
        proof = parent.gettxoutproof([txid1])

        raw = parent.gettransaction(txid1)["hex"]

        print("Attempting peg-ins")
        # First attempt fails the consensus check but gives useful result
        try:
            pegtxid = sidechain.claimpegin(raw, proof)
            raise Exception("Peg-in should not be mature enough yet, need another block.")
        except JSONRPCException as e:
            assert("Peg-in Bitcoin transaction needs more confirmations to be sent." in e.error["message"])

        # Second attempt simply doesn't hit mempool bar
        parent.generate(10)
        try:
            pegtxid = sidechain.claimpegin(raw, proof)
            raise Exception("Peg-in should not be mature enough yet, need another block.")
        except JSONRPCException as e:
            assert("Peg-in Bitcoin transaction needs more confirmations to be sent." in e.error["message"])

        try:
            pegtxid = sidechain.createrawpegin(raw, proof, 'AEIOU')
            raise Exception("Peg-in with non-hex claim_script should fail.")
        except JSONRPCException as e:
            assert("Given claim_script is not hex." in e.error["message"])

        # Should fail due to non-matching wallet address
        try:
            scriptpubkey = sidechain.getaddressinfo(get_new_unconfidential_address(sidechain))["scriptPubKey"]
            pegtxid = sidechain.claimpegin(raw, proof, scriptpubkey)
            raise Exception("Peg-in with non-matching claim_script should fail.")
        except JSONRPCException as e:
            assert("Given claim_script does not match the given Bitcoin transaction." in e.error["message"])

        # 12 confirms allows in mempool
        parent.generate(1)

        # Make sure that a tx with a duplicate pegin claim input gets rejected.
        raw_pegin = sidechain.createrawpegin(raw, proof)["hex"]
        raw_pegin = FromHex(CTransaction(), raw_pegin)
        raw_pegin.vin.append(raw_pegin.vin[0]) # duplicate the pegin input
        raw_pegin = sidechain.signrawtransactionwithwallet(raw_pegin.serialize().hex())["hex"]
        assert_raises_rpc_error(-26, "bad-txns-inputs-duplicate", sidechain.sendrawtransaction, raw_pegin)
        # Also try including this tx in a block manually and submitting it.
        doublespendblock = FromHex(CBlock(), sidechain.getnewblockhex())
        doublespendblock.vtx.append(FromHex(CTransaction(), raw_pegin))
        doublespendblock.hashMerkleRoot = doublespendblock.calc_merkle_root()
        add_witness_commitment(doublespendblock)
        doublespendblock.solve()
        block_hex = bytes_to_hex_str(doublespendblock.serialize(True))
        assert_raises_rpc_error(-25, "bad-txns-inputs-duplicate", sidechain.testproposedblock, block_hex, True)

        # Should succeed via wallet lookup for address match, and when given
        raw_pegin = sidechain.createrawpegin(raw, proof)['hex']
        signed_pegin = sidechain.signrawtransactionwithwallet(raw_pegin)

        sample_pegin_struct = FromHex(CTransaction(), signed_pegin["hex"])
        # Round-trip peg-in transaction using python serialization
        assert_equal(signed_pegin["hex"], sample_pegin_struct.serialize().hex())
        # Store this for later (evil laugh)
        sample_pegin_witness = sample_pegin_struct.wit.vtxinwit[0].peginWitness

        pegtxid1 = sidechain.claimpegin(raw, proof)
        # Make sure a second pegin claim does not get accepted in the mempool when
        # another mempool tx already claims that pegin.
        assert_raises_rpc_error(-4, "txn-mempool-conflict", sidechain.claimpegin, raw, proof)

        # Will invalidate the block that confirms this transaction later
        self.sync_all(self.node_groups)
        blockhash = sidechain2.generate(1)
        self.sync_all(self.node_groups)
        sidechain.generate(5)

        tx1 = sidechain.gettransaction(pegtxid1)

        if "confirmations" in tx1 and tx1["confirmations"] == 6:
            print("Peg-in is confirmed: Success!")
        else:
            raise Exception("Peg-in confirmation has failed.")

        # Look at pegin fields
        decoded = sidechain.decoderawtransaction(tx1["hex"])
        assert decoded["vin"][0]["is_pegin"] == True
        assert len(decoded["vin"][0]["pegin_witness"]) > 0
        # Check that there's sufficient fee for the peg-in
        vsize = decoded["vsize"]
        fee_output = decoded["vout"][1]
        fallbackfee_pervbyte = Decimal("0.00001")/Decimal("1000")
        assert fee_output["scriptPubKey"]["type"] == "fee"
        assert fee_output["value"] >= fallbackfee_pervbyte*vsize

        # Quick reorg checks of pegs
        sidechain.invalidateblock(blockhash[0])
        if sidechain.gettransaction(pegtxid1)["confirmations"] != 0:
            raise Exception("Peg-in didn't unconfirm after invalidateblock call.")

        # Create duplicate claim, put it in block along with current one in mempool
        # to test duplicate-in-block claims between two txs that are in the same block.
        raw_pegin = sidechain.createrawpegin(raw, proof)["hex"]
        raw_pegin = sidechain.signrawtransactionwithwallet(raw_pegin)["hex"]
        raw_pegin = FromHex(CTransaction(), raw_pegin)
        doublespendblock = FromHex(CBlock(), sidechain.getnewblockhex())
        assert(len(doublespendblock.vtx) == 2) # coinbase and pegin
        doublespendblock.vtx.append(raw_pegin)
        doublespendblock.hashMerkleRoot = doublespendblock.calc_merkle_root()
        add_witness_commitment(doublespendblock)
        doublespendblock.solve()
        block_hex = bytes_to_hex_str(doublespendblock.serialize(True))
        assert_raises_rpc_error(-25, "bad-txns-double-pegin", sidechain.testproposedblock, block_hex, True)

        # Re-enters block
        sidechain.generate(1)
        if sidechain.gettransaction(pegtxid1)["confirmations"] != 1:
            raise Exception("Peg-in should have one confirm on side block.")
        sidechain.reconsiderblock(blockhash[0])
        if sidechain.gettransaction(pegtxid1)["confirmations"] != 6:
            raise Exception("Peg-in should be back to 6 confirms.")

        # Now the pegin is already claimed in a confirmed tx.
        # In that case, a duplicate claim should (1) not be accepted in the mempool
        # and (2) not be accepted in a block.
        assert_raises_rpc_error(-4, "pegin-already-claimed", sidechain.claimpegin, raw, proof)
        # For case (2), manually craft a block and include the tx.
        doublespendblock = FromHex(CBlock(), sidechain.getnewblockhex())
        doublespendblock.vtx.append(raw_pegin)
        doublespendblock.hashMerkleRoot = doublespendblock.calc_merkle_root()
        add_witness_commitment(doublespendblock)
        doublespendblock.solve()
        block_hex = bytes_to_hex_str(doublespendblock.serialize(True))
        assert_raises_rpc_error(-25, "bad-txns-double-pegin", sidechain.testproposedblock, block_hex, True)

        # Do multiple claims in mempool
        n_claims = 6

        print("Flooding mempool with a few claims")
        pegtxs = []
        sidechain.generate(101)

        # Do mixture of raw peg-in and automatic peg-in tx construction
        # where raw creation is done on another node
        for i in range(n_claims):
            addrs = sidechain.getpeginaddress()
            txid = parent.sendtoaddress(addrs["mainchain_address"], 1)
            parent.generate(1)
            proof = parent.gettxoutproof([txid])
            raw = parent.gettransaction(txid)["hex"]
            if i % 2 == 0:
                parent.generate(11)
                pegtxs += [sidechain.claimpegin(raw, proof)]
            else:
                # The raw API doesn't check for the additional 2 confirmation buffer
                # So we only get 10 confirms then send off. Miners will add to block anyways.

                # Don't mature whole way yet to test signing immature peg-in input
                parent.generate(8)
                # Wallet in sidechain2 gets funds instead of sidechain
                raw_pegin = sidechain2.createrawpegin(raw, proof, addrs["claim_script"])["hex"]
                # First node should also be able to make a valid transaction with or without 3rd arg
                # since this wallet originated the claim_script itself
                sidechain.createrawpegin(raw, proof, addrs["claim_script"])
                sidechain.createrawpegin(raw, proof)
                signed_pegin = sidechain.signrawtransactionwithwallet(raw_pegin)
                assert(signed_pegin["complete"])
                assert("warning" in signed_pegin) # warning for immature peg-in
                # fully mature them now
                parent.generate(1)
                pegtxs += [sidechain.sendrawtransaction(signed_pegin["hex"])]

        self.sync_all(self.node_groups)
        sidechain2.generate(1)
        for i, pegtxid in enumerate(pegtxs):
            if i % 2 == 0:
                tx = sidechain.gettransaction(pegtxid)
            else:
                tx = sidechain2.gettransaction(pegtxid)
            if "confirmations" not in tx or tx["confirmations"] == 0:
                raise Exception("Peg-in confirmation has failed.")

        print("Test pegouts")
        self.test_pegout(get_new_unconfidential_address(parent, "legacy"), sidechain)
        self.test_pegout(get_new_unconfidential_address(parent, "p2sh-segwit"), sidechain)
        self.test_pegout(get_new_unconfidential_address(parent, "bech32"), sidechain)

        print("Test pegout P2SH")
        parent_chain_addr = get_new_unconfidential_address(parent)
        parent_pubkey = parent.getaddressinfo(parent_chain_addr)["pubkey"]
        parent_chain_p2sh_addr = parent.createmultisig(1, [parent_pubkey])["address"]
        self.test_pegout(parent_chain_p2sh_addr, sidechain)

        print("Test pegout Garbage")
        parent_chain_addr = "garbage"
        try:
            self.test_pegout(parent_chain_addr, sidechain)
            raise Exception("A garbage address should fail.")
        except JSONRPCException as e:
            assert("Invalid Bitcoin address" in e.error["message"])

        print("Test pegout Garbage valid")
        prev_txid = sidechain.sendtoaddress(sidechain.getnewaddress(), 1)
        sidechain.generate(1)
        pegout_chain = 'a' * 64
        pegout_hex = 'b' * 500
        inputs = [{"txid": prev_txid, "vout": 0}]
        outputs = {"vdata": [pegout_chain, pegout_hex]}
        rawtx = sidechain.createrawtransaction(inputs, outputs)
        raw_pegout = sidechain.decoderawtransaction(rawtx)

        assert 'vout' in raw_pegout and len(raw_pegout['vout']) > 0
        pegout_tested = False
        for output in raw_pegout['vout']:
            scriptPubKey = output['scriptPubKey']
            if 'type' in scriptPubKey and scriptPubKey['type'] == 'nulldata':
                assert ('pegout_hex' in scriptPubKey and 'pegout_asm' in scriptPubKey and 'pegout_type' in scriptPubKey)
                assert ('pegout_chain' in scriptPubKey and 'pegout_reqSigs' not in scriptPubKey and 'pegout_addresses' not in scriptPubKey)
                assert scriptPubKey['pegout_type'] == 'nonstandard'
                assert scriptPubKey['pegout_chain'] == pegout_chain
                assert scriptPubKey['pegout_hex'] == pegout_hex
                pegout_tested = True
                break
        assert pegout_tested

        print("Now test failure to validate peg-ins based on intermittent bitcoind rpc failure")
        self.stop_node(1)
        txid = parent.sendtoaddress(addr, 1)
        parent.generate(12)
        proof = parent.gettxoutproof([txid])
        raw = parent.gettransaction(txid)["hex"]
        sidechain.claimpegin(raw, proof) # stuck peg
        sidechain.generate(1)
        print("Waiting to ensure block is being rejected by sidechain2")
        time.sleep(5)

        assert(sidechain.getblockcount() != sidechain2.getblockcount())

        print("Restarting parent2")
        self.start_node(1)
        connect_nodes_bi(self.nodes, 0, 1)

        # Don't make a block, race condition when pegin-invalid block
        # is awaiting further validation, nodes reject subsequent blocks
        # even ones they create
        print("Now waiting for node to re-evaluate peg-in witness failed block... should take a few seconds")
        self.sync_all(self.node_groups)
        print("Completed!\n")
        print("Now send funds out in two stages, partial, and full")
        some_btc_addr = get_new_unconfidential_address(parent)
        bal_1 = sidechain.getwalletinfo()["balance"]['bitcoin']
        try:
            sidechain.sendtomainchain(some_btc_addr, bal_1 + 1)
            raise Exception("Sending out too much; should have failed")
        except JSONRPCException as e:
            assert("Insufficient funds" in e.error["message"])

        assert(sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)
        try:
            sidechain.sendtomainchain(some_btc_addr+"b", bal_1 - 1)
            raise Exception("Sending to invalid address; should have failed")
        except JSONRPCException as e:
            assert("Invalid Bitcoin address" in e.error["message"])

        assert(sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)
        try:
            sidechain.sendtomainchain("1Nro9WkpaKm9axmcfPVp79dAJU1Gx7VmMZ", bal_1 - 1)
            raise Exception("Sending to mainchain address when should have been testnet; should have failed")
        except JSONRPCException as e:
            assert("Invalid Bitcoin address" in e.error["message"])

        assert(sidechain.getwalletinfo()["balance"]["bitcoin"] == bal_1)

        # Test superfluous peg-in witness data on regular spend before we have no funds
        raw_spend = sidechain.createrawtransaction([], {sidechain.getnewaddress():1})
        fund_spend = sidechain.fundrawtransaction(raw_spend)
        sign_spend = sidechain.signrawtransactionwithwallet(fund_spend["hex"])
        signed_struct = FromHex(CTransaction(), sign_spend["hex"])
        # Non-witness tx has no witness serialized yet
        if len(signed_struct.wit.vtxinwit) == 0:
            signed_struct.wit.vtxinwit = [CTxInWitness()]
        signed_struct.wit.vtxinwit[0].peginWitness.stack = sample_pegin_witness.stack
        assert_equal(sidechain.testmempoolaccept([signed_struct.serialize().hex()])[0]["allowed"], False)
        assert_equal(sidechain.testmempoolaccept([signed_struct.serialize().hex()])[0]["reject-reason"], "68: extra-pegin-witness")
        signed_struct.wit.vtxinwit[0].peginWitness.stack = [b'\x00'*100000] # lol
        assert_equal(sidechain.testmempoolaccept([signed_struct.serialize().hex()])[0]["allowed"], False)
        assert_equal(sidechain.testmempoolaccept([signed_struct.serialize().hex()])[0]["reject-reason"], "68: extra-pegin-witness")

        peg_out_txid = sidechain.sendtomainchain(some_btc_addr, 1)

        peg_out_details = sidechain.decoderawtransaction(sidechain.getrawtransaction(peg_out_txid))
        # peg-out, change, fee
        assert(len(peg_out_details["vout"]) == 3)
        found_pegout_value = False
        for output in peg_out_details["vout"]:
            if "value" in output and output["value"] == 1:
                found_pegout_value = True
        assert(found_pegout_value)

        bal_2 = sidechain.getwalletinfo()["balance"]["bitcoin"]
        # Make sure balance went down
        assert(bal_2 + 1 < bal_1)

        # Send rest of coins using subtractfee from output arg
        sidechain.sendtomainchain(some_btc_addr, bal_2, True)

        assert(sidechain.getwalletinfo()["balance"]['bitcoin'] == 0)

        print('Test coinbase peg-in maturity rules')

        # Have bitcoin output go directly into a claim output
        pegin_info = sidechain.getpeginaddress()
        mainchain_addr = pegin_info["mainchain_address"]
        # Watch the address so we can get tx without txindex
        parent.importaddress(mainchain_addr)
        claim_block = parent.generatetoaddress(50, mainchain_addr)[0]
        block_coinbase = parent.getblock(claim_block, 2)["tx"][0]
        claim_txid = block_coinbase["txid"]
        claim_tx = block_coinbase["hex"]
        claim_proof = parent.gettxoutproof([claim_txid], claim_block)

        # Can't claim something even though it has 50 confirms since it's coinbase
        assert_raises_rpc_error(-8, "Peg-in Bitcoin transaction needs more confirmations to be sent.", sidechain.claimpegin, claim_tx, claim_proof)
        # If done via raw API, still doesn't work
        coinbase_pegin = sidechain.createrawpegin(claim_tx, claim_proof)
        assert_equal(coinbase_pegin["mature"], False)
        signed_pegin = sidechain.signrawtransactionwithwallet(coinbase_pegin["hex"])["hex"]
        assert_raises_rpc_error(-26, "bad-pegin-witness, Needs more confirmations.", sidechain.sendrawtransaction, signed_pegin)

        # 50 more blocks to allow wallet to make it succeed by relay and consensus
        parent.generatetoaddress(50, parent.getnewaddress())
        # Wallet still doesn't want to for 2 more confirms
        assert_equal(sidechain.createrawpegin(claim_tx, claim_proof)["mature"], False)
        # But we can just shoot it off
        claim_txid = sidechain.sendrawtransaction(signed_pegin)
        sidechain.generatetoaddress(1, sidechain.getnewaddress())
        assert_equal(sidechain.gettransaction(claim_txid)["confirmations"], 1)

        # Test a confidential pegin.
        print("Performing a confidential pegin.")
        # start pegin
        pegin_addrs = sidechain.getpeginaddress()
        assert_equal(sidechain.decodescript(pegin_addrs["claim_script"])["type"], "witness_v0_keyhash")
        pegin_addr = addrs["mainchain_address"]
        txid_fund = parent.sendtoaddress(pegin_addr, 10)
        # 10+2 confirms required to get into mempool and confirm
        parent.generate(11)
        proof = parent.gettxoutproof([txid_fund])
        raw = parent.gettransaction(txid_fund)["hex"]
        raw_pegin = sidechain.createrawpegin(raw, proof)['hex']
        pegin = FromHex(CTransaction(), raw_pegin)
        # add new blinding pubkey for the pegin output
        pegin.vout[0].nNonce = CTxOutNonce(hex_str_to_bytes(sidechain.getaddressinfo(sidechain.getnewaddress("", "blech32"))["confidential_key"]))
        # now add an extra input and output from listunspent; we need a blinded output for this
        blind_addr = sidechain.getnewaddress("", "blech32")
        sidechain.sendtoaddress(blind_addr, 15)
        sidechain.generate(6)
        unspent = [u for u in sidechain.listunspent(6, 6) if u["amount"] == 15][0]
        assert(unspent["spendable"])
        assert("amountcommitment" in unspent)
        pegin.vin.append(CTxIn(COutPoint(int(unspent["txid"], 16), unspent["vout"])))
        # insert corresponding output before fee output
        new_destination = sidechain.getaddressinfo(sidechain.getnewaddress("", "blech32"))
        new_dest_script_pk = hex_str_to_bytes(new_destination["scriptPubKey"])
        new_dest_nonce = CTxOutNonce(hex_str_to_bytes(new_destination["confidential_key"]))
        new_dest_asset = pegin.vout[0].nAsset
        pegin.vout.insert(1, CTxOut(int(unspent["amount"]*COIN) - 10000, new_dest_script_pk, new_dest_asset, new_dest_nonce))
        # add the 10 ksat fee
        pegin.vout[2].nValue.setToAmount(pegin.vout[2].nValue.getAmount() + 10000)
        pegin_hex = ToHex(pegin)
        # test with both blindraw and rawblindraw
        raw_pegin_blinded1 = sidechain.blindrawtransaction(pegin_hex)
        raw_pegin_blinded2 = sidechain.rawblindrawtransaction(pegin_hex, ["", unspent["amountblinder"]], [10, 15], [unspent["asset"]]*2, ["", unspent["assetblinder"]], "", False)
        pegin_signed1 = sidechain.signrawtransactionwithwallet(raw_pegin_blinded1)
        pegin_signed2 = sidechain.signrawtransactionwithwallet(raw_pegin_blinded2)
        for pegin_signed in [pegin_signed1, pegin_signed2]:
            final_decoded = sidechain.decoderawtransaction(pegin_signed["hex"])
            assert(final_decoded["vin"][0]["is_pegin"])
            assert(not final_decoded["vin"][1]["is_pegin"])
            assert("assetcommitment" in final_decoded["vout"][0])
            assert("valuecommitment" in final_decoded["vout"][0])
            assert("commitmentnonce" in final_decoded["vout"][0])
            assert("value" not in final_decoded["vout"][0])
            assert("asset" not in final_decoded["vout"][0])
            assert(final_decoded["vout"][0]["commitmentnonce_fully_valid"])
            assert("assetcommitment" in final_decoded["vout"][1])
            assert("valuecommitment" in final_decoded["vout"][1])
            assert("commitmentnonce" in final_decoded["vout"][1])
            assert("value" not in final_decoded["vout"][1])
            assert("asset" not in final_decoded["vout"][1])
            assert(final_decoded["vout"][1]["commitmentnonce_fully_valid"])
            assert("value" in final_decoded["vout"][2])
            assert("asset" in final_decoded["vout"][2])
            # check that it is accepted in the mempool
            accepted = sidechain.testmempoolaccept([pegin_signed["hex"]])[0]
            if not accepted["allowed"]:
                raise Exception(accepted["reject-reason"])
            print("Blinded transaction looks ok!") # need this print to distinguish failures in for loop
        # check if they get mined; since we're trying to mine two double spends, disconnect the nodes
        disconnect_nodes(sidechain, 3)
        disconnect_nodes(sidechain2, 2)
        txid1 = sidechain.sendrawtransaction(pegin_signed1["hex"])
        blocks = sidechain.generate(3)
        assert_equal(sidechain.getrawtransaction(txid1, True, blocks[0])["confirmations"], 3)
        txid2 = sidechain2.sendrawtransaction(pegin_signed2["hex"])
        blocks = sidechain2.generate(3)
        assert_equal(sidechain2.getrawtransaction(txid2, True, blocks[0])["confirmations"], 3)
        # reconnect in case we extend the test
        connect_nodes_bi(self.nodes, 2, 3)
        sidechain.generate(10)

        print('Success!')

        # Manually stop sidechains first, then the parent chains.
        self.stop_node(2)
        self.stop_node(3)
        self.stop_node(0)
        self.stop_node(1)
Example #21
0
 def create_deposit(finalizer, node):
     connect_nodes(finalizer, node.index)
     payto = finalizer.getnewaddress('', 'legacy')
     txid = finalizer.deposit(payto, 1500)
     wait_until(lambda: txid in node.getrawmempool())
     disconnect_nodes(finalizer, node.index)
 def disconnect_remotes(self):
     for i in [self.remoteOnePos, self.remoteTwoPos]:
         for j in range(self.num_nodes):
             if i != j:
                 disconnect_nodes(self.nodes[i], j)
Example #23
0
    def test_double_votes(self):
        def corrupt_script(script, n_byte):
            script = bytearray(script)
            script[n_byte] = 1 if script[n_byte] == 0 else 0
            return bytes(script)

        # initial topology where arrows denote the direction of connections
        # finalizer2 ← fork1 → finalizer1
        #                ↓  ︎
        #              fork2
        fork1 = self.nodes[0]
        fork2 = self.nodes[1]

        fork1.importmasterkey(regtest_mnemonics[0]['mnemonics'])
        fork2.importmasterkey(regtest_mnemonics[1]['mnemonics'])

        finalizer1 = self.nodes[2]
        finalizer2 = self.nodes[3]

        connect_nodes(fork1, fork2.index)
        connect_nodes(fork1, finalizer1.index)
        connect_nodes(fork1, finalizer2.index)

        # leave IBD
        generate_block(fork1)
        sync_blocks([fork1, fork2, finalizer1, finalizer2])

        # clone finalizer
        finalizer2.importmasterkey(regtest_mnemonics[2]['mnemonics'])
        finalizer1.importmasterkey(regtest_mnemonics[2]['mnemonics'])

        disconnect_nodes(fork1, finalizer2.index)
        addr = finalizer1.getnewaddress('', 'legacy')
        txid1 = finalizer1.deposit(addr, 1500)
        wait_until(lambda: txid1 in fork1.getrawmempool())

        finalizer2.setlabel(addr, '')
        txid2 = finalizer2.deposit(addr, 1500)
        assert_equal(txid1, txid2)
        connect_nodes(fork1, finalizer2.index)

        generate_block(fork1)
        sync_blocks([fork1, fork2, finalizer1, finalizer2])
        disconnect_nodes(fork1, finalizer1.index)
        disconnect_nodes(fork1, finalizer2.index)

        # pass instant finalization
        # F    F    F
        # e0 - e1 - e2 - e3 - e4[16] fork1, fork2
        generate_block(fork1, count=3 + 5 + 5 + 1)
        assert_equal(fork1.getblockcount(), 16)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 4,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        # change topology where forks are not connected
        # finalizer1 → fork1
        #
        # finalizer2 → fork2
        sync_blocks([fork1, fork2])
        disconnect_nodes(fork1, fork2.index)

        # test that same vote included on different forks
        # doesn't create a slash transaction
        #                                       v1
        #                          - e4[17, 18, 19, 20] fork1
        # F    F    F    F        /
        # e0 - e1 - e2 - e3 - e4[16]
        #                         \     v1
        #                          - e4[17, 18, 19, 20] fork2
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=fork1)
        v1 = fork1.getrawtransaction(fork1.getrawmempool()[0])
        generate_block(fork1, count=4)
        assert_equal(fork1.getblockcount(), 20)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 4,
                'lastJustifiedEpoch': 3,
                'lastFinalizedEpoch': 3,
                'validators': 1
            })

        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork2)
        generate_block(fork2)
        assert_raises_rpc_error(-27, 'transaction already in block chain',
                                fork2.sendrawtransaction, v1)
        assert_equal(len(fork2.getrawmempool()), 0)
        generate_block(fork2, count=3)
        assert_equal(fork2.getblockcount(), 20)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 4,
                'lastJustifiedEpoch': 3,
                'lastFinalizedEpoch': 3,
                'validators': 1
            })
        self.log.info('same vote on two forks was accepted')

        # test that double-vote with invalid vote signature is ignored
        # and doesn't cause slashing
        #                            v1          v2a
        #                          - e4 - e5[21, 22] fork1
        # F    F    F    F     F  /
        # e0 - e1 - e2 - e3 - e4[16]
        #                         \  v1          v2a
        #                          - e4 - e5[21, 22] fork2
        generate_block(fork1)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=fork1)
        v2a = fork1.getrawtransaction(fork1.getrawmempool()[0])
        generate_block(fork1)
        assert_equal(fork1.getblockcount(), 22)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 5,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 4,
                'validators': 1
            })

        generate_block(fork2)
        tx_v2a = FromHex(CTransaction(), v2a)

        # corrupt the 1st byte of the validator's pubkey in the commit script
        # see schema in CScript::CommitScript
        tx_v2a.vout[0].scriptPubKey = corrupt_script(
            script=tx_v2a.vout[0].scriptPubKey, n_byte=2)

        assert_raises_rpc_error(-26, 'bad-vote-signature',
                                fork2.sendrawtransaction, ToHex(tx_v2a))
        assert_equal(len(fork2.getrawmempool()), 0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork2)
        time.sleep(
            10
        )  # slash transactions are processed every 10 sec. UNIT-E TODO: remove once optimized
        assert_equal(len(fork2.getrawmempool()), 1)
        v2b = fork2.getrawtransaction(fork2.getrawmempool()[0])
        tx_v2b = FromHex(CTransaction(), v2b)
        assert_equal(tx_v2b.get_type(), TxType.VOTE)

        generate_block(fork2)
        assert_equal(len(fork2.getrawmempool()), 0)
        assert_equal(fork2.getblockcount(), 22)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 5,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 4,
                'validators': 1
            })
        self.log.info('double-vote with invalid signature is ignored')

        # test that valid double-vote but corrupt withdraw address
        # creates slash tx it is included in the next block
        #                            v1          v2a
        #                          - e4 - e5[21, 22] fork1
        # F    F    F    F     F  /
        # e0 - e1 - e2 - e3 - e4[16]
        #                         \  v1          v2a s1
        #                          - e4 - e5[21, 22, 23] fork2
        # corrupt the 1st byte of the address in the scriptpubkey
        # but keep the correct vote signature see schema in CScript::CommitScript
        tx_v2a = FromHex(CTransaction(), v2a)

        # Remove the signature
        tx_v2a.vin[0].scriptSig = list(CScript(tx_v2a.vin[0].scriptSig))[1]
        tx_v2a.vout[0].scriptPubKey = corrupt_script(
            script=tx_v2a.vout[0].scriptPubKey, n_byte=42)
        tx_v2a = sign_transaction(finalizer2, tx_v2a)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork2.sendrawtransaction, ToHex(tx_v2a))
        wait_until(lambda: len(fork2.getrawmempool()) == 1, timeout=20)
        s1_hash = fork2.getrawmempool()[0]
        s1 = FromHex(CTransaction(), fork2.getrawtransaction(s1_hash))
        assert_equal(s1.get_type(), TxType.SLASH)

        b23 = generate_block(fork2)[0]
        block = FromHex(CBlock(), fork2.getblock(b23, 0))
        assert_equal(len(block.vtx), 2)
        block.vtx[1].rehash()
        assert_equal(block.vtx[1].hash, s1_hash)
        self.log.info('slash tx for double-vote was successfully created')
Example #24
0
 def split_network(self):
     # Split the network of three nodes into nodes 0-1 and 2.
     assert not self.is_network_split
     disconnect_nodes(self.nodes[1], 2)
     disconnect_nodes(self.nodes[2], 1)
     self.is_network_split = True
Example #25
0
        self.sync_all()

        # ---------------------------------------------------------------------------------------
        # Node 1 - Check that a new sidechain certificate correctly updates FT and MBTR fees
        mark_logs(
            "\nNode 1 creates a new certificate updating FT and MBTR fees",
            self.nodes, DEBUG_MODE)

        mark_logs("Node 1 generates " + str(EPOCH_LENGTH) + " blocks",
                  self.nodes, DEBUG_MODE)
        blocks.extend(self.nodes[1].generate(EPOCH_LENGTH))
        self.sync_all()

        mark_logs("Disconnecting nodes", self.nodes, DEBUG_MODE)
        disconnect_nodes(self.nodes[1], 0)
        disconnect_nodes(self.nodes[0], 1)
        self.is_network_split = True

        quality = 1
        epoch_number, epoch_cum_tree_hash = get_epoch_data(
            scid, self.nodes[1], EPOCH_LENGTH)
        addr_node1 = self.nodes[1].getnewaddress()
        cert_amount = Decimal("10.0")
        amount_cert_1 = [{"address": addr_node1, "amount": cert_amount}]

        ftFee = ftScFee
        mbtrFee = mbtrScFee
        newFtFee = ftFee + 1
        newMbtrFee = mbtrFee + 1
        scid_swapped = str(swap_bytes(scid))
Example #26
0
 def disconnect_nodes(self, a, b):
     disconnect_nodes(a, b)
Example #27
0
    def _shorter_endorsed_chain_wins(self):
        self.log.warning("starting _shorter_endorsed_chain_wins()")
        lastblock = self.nodes[3].getblockcount()

        # stop node3
        self.stop_node(3)
        self.log.info("node3 stopped with block height %d", lastblock)

        # all nodes start with lastblock + 103 blocks
        self.nodes[0].generate(nblocks=103)
        self.log.info("node0 mined 103 blocks")
        self.sync_blocks([self.nodes[0], self.nodes[1], self.nodes[2]],
                         timeout=20)
        assert self.get_best_block(self.nodes[0])['height'] == lastblock + 103
        assert self.get_best_block(self.nodes[1])['height'] == lastblock + 103
        assert self.get_best_block(self.nodes[2])['height'] == lastblock + 103
        self.log.info("nodes[0,1,2] synced are at block %d", lastblock + 103)

        # node2 is disconnected from others
        disconnect_nodes(self.nodes[2], 0)
        disconnect_nodes(self.nodes[2], 1)
        self.log.info("node2 is disconnected")

        # node2 mines another 97 blocks, so total height is lastblock + 200
        self.nodes[2].generate(nblocks=97)

        # fork A is at 303 (lastblock = 200)
        # fork B is at 400
        self.nodes[2].waitforblockheight(lastblock + 200)
        self.log.info("node2 mined 97 more blocks, total height is %d",
                      lastblock + 200)

        bestblocks = [self.get_best_block(x) for x in self.nodes[0:3]]

        assert bestblocks[0] != bestblocks[2], "node[0,2] have same best hashes"
        assert bestblocks[0] == bestblocks[
            1], "node[0,1] have different best hashes: {} vs {}".format(
                bestblocks[0], bestblocks[1])

        # mine a keystone interval of blocks to fork A
        self.nodes[0].generate(nblocks=self.keystoneInterval)
        self.sync_all(self.nodes[0:2])
        self.log.info(
            "nodes[0,1] are in sync and are at fork A (%d...%d blocks)",
            lastblock + 103, lastblock + 103 + self.keystoneInterval)

        # fork B is at 400
        assert bestblocks[2][
            'height'] == lastblock + 200, "unexpected tip: {}".format(
                bestblocks[2])
        self.log.info("node2 is at fork B (%d...%d blocks)", lastblock + 103,
                      lastblock + 200)

        assert 200 > 103 + self.keystoneInterval + 10, "keystone interval is set too high"

        # endorse block 303 + keystone interval (fork A tip)
        addr0 = self.nodes[0].getnewaddress()
        txid = endorse_block(self.nodes[0], self.apm,
                             lastblock + 103 + self.keystoneInterval, addr0)
        self.log.info("node0 endorsed block %d (fork A tip)",
                      lastblock + 103 + self.keystoneInterval)
        # mine pop tx on node0
        containinghash = self.nodes[0].generate(nblocks=10)
        self.log.info("node0 mines 10 more blocks")
        self.sync_all(self.nodes[0:2])
        containingblock = self.nodes[0].getblock(containinghash[0])

        assert_equal(self.nodes[1].getblock(containinghash[0])['hash'],
                     containingblock['hash'])

        tip = self.get_best_block(self.nodes[0])
        assert txid in containingblock['pop']['data'][
            'atvs'], "pop tx is not in containing block"
        self.sync_blocks(self.nodes[0:2])
        self.log.info(
            "nodes[0,1] are in sync, pop tx containing block is {}".format(
                containingblock['height']))
        self.log.info("node0 tip is {}".format(tip['height']))

        connect_nodes(self.nodes[0], 2)
        connect_nodes(self.nodes[1], 2)
        self.log.info("node2 connected to nodes[0,1]")

        self.start_node(3)
        connect_nodes(self.nodes[3], 0)
        connect_nodes(self.nodes[3], 2)
        self.log.info("node3 started with 0 blocks, connected to nodes[0,2]")

        self.sync_blocks(self.nodes, timeout=30)
        self.log.info("nodes[0,1,2,3] are in sync")

        # expected best block hash is fork A (has higher pop score)
        bestblocks = [self.get_best_block(x) for x in self.nodes]
        assert_equal(bestblocks[0]['hash'], bestblocks[1]['hash'])
        assert_equal(bestblocks[0]['hash'], bestblocks[2]['hash'])
        assert_equal(bestblocks[0]['hash'], bestblocks[3]['hash'])
        self.log.info("all nodes switched to common block")

        for i in range(len(bestblocks)):
            assert bestblocks[i]['height'] == tip['height'], \
                "node[{}] expected to select shorter chain ({}) with higher pop score\n" \
                "but selected longer chain ({})".format(i, tip['height'], bestblocks[i]['height'])

        # get best headers view
        blockchaininfo = [x.getblockchaininfo() for x in self.nodes]
        for n in blockchaininfo:
            assert_equal(n['blocks'], n['headers'])

        self.log.info("all nodes selected fork A as best chain")
        self.log.warning("_shorter_endorsed_chain_wins() succeeded!")
    def run_test(self):
        self.log.info("test -blocknotify")
        block_count = 10
        blocks = self.nodes[1].generatetoaddress(
            block_count, self.nodes[1].getnewaddress()
            if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE)

        # wait at most 10 seconds for expected number of files before reading the content
        wait_until(
            lambda: len(os.listdir(self.blocknotify_dir)) == block_count,
            timeout=10)

        # directory content should equal the generated blocks hashes
        assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir)))

        if self.is_wallet_compiled():
            self.log.info("test -walletnotify")
            # wait at most 10 seconds for expected number of files before reading the content
            wait_until(
                lambda: len(os.listdir(self.walletnotify_dir)) == block_count,
                timeout=10)

            # directory content should equal the generated transaction hashes
            txids_rpc = list(
                map(lambda t: notify_outputname(self.wallet, t['txid']),
                    self.nodes[1].listtransactions("*", block_count)))
            assert_equal(sorted(txids_rpc),
                         sorted(os.listdir(self.walletnotify_dir)))
            self.stop_node(1)
            for tx_file in os.listdir(self.walletnotify_dir):
                os.remove(os.path.join(self.walletnotify_dir, tx_file))

            self.log.info("test -walletnotify after rescan")
            # restart node to rescan to force wallet notifications
            self.start_node(1)
            connect_nodes(self.nodes[0], 1)

            wait_until(
                lambda: len(os.listdir(self.walletnotify_dir)) == block_count,
                timeout=10)

            # directory content should equal the generated transaction hashes
            txids_rpc = list(
                map(lambda t: notify_outputname(self.wallet, t['txid']),
                    self.nodes[1].listtransactions("*", block_count)))
            assert_equal(sorted(txids_rpc),
                         sorted(os.listdir(self.walletnotify_dir)))
            for tx_file in os.listdir(self.walletnotify_dir):
                os.remove(os.path.join(self.walletnotify_dir, tx_file))

            # Conflicting transactions tests. Give node 0 same wallet seed as
            # node 1, generate spends from node 0, and check notifications
            # triggered by node 1
            self.log.info("test -walletnotify with conflicting transactions")
            self.nodes[0].sethdseed(seed=self.nodes[1].dumpprivkey(
                keyhash_to_p2pkh(
                    hex_str_to_bytes(self.nodes[1].getwalletinfo()['hdseedid'])
                    [::-1])))
            self.nodes[0].rescanblockchain()
            self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_UNSPENDABLE)

            # Generate transaction on node 0, sync mempools, and check for
            # notification on node 1.
            tx1 = self.nodes[0].sendtoaddress(
                address=ADDRESS_BCRT1_UNSPENDABLE, amount=1, replaceable=True)
            assert_equal(tx1 in self.nodes[0].getrawmempool(), True)
            self.sync_mempools()
            self.expect_wallet_notify([tx1])

            # Generate bump transaction, sync mempools, and check for bump1
            # notification. In the future, per
            # https://github.com/youngseokcoin/youngseokcoin/pull/9371, it might be better
            # to have notifications for both tx1 and bump1.
            bump1 = self.nodes[0].bumpfee(tx1)["txid"]
            assert_equal(bump1 in self.nodes[0].getrawmempool(), True)
            self.sync_mempools()
            self.expect_wallet_notify([bump1])

            # Add bump1 transaction to new block, checking for a notification
            # and the correct number of confirmations.
            self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
            self.sync_blocks()
            self.expect_wallet_notify([bump1])
            assert_equal(self.nodes[1].gettransaction(bump1)["confirmations"],
                         1)

            # Generate a second transaction to be bumped.
            tx2 = self.nodes[0].sendtoaddress(
                address=ADDRESS_BCRT1_UNSPENDABLE, amount=1, replaceable=True)
            assert_equal(tx2 in self.nodes[0].getrawmempool(), True)
            self.sync_mempools()
            self.expect_wallet_notify([tx2])

            # Bump tx2 as bump2 and generate a block on node 0 while
            # disconnected, then reconnect and check for notifications on node 1
            # about newly confirmed bump2 and newly conflicted tx2. Currently
            # only the bump2 notification is sent. Ideally, notifications would
            # be sent both for bump2 and tx2, which was the previous behavior
            # before being broken by an accidental change in PR
            # https://github.com/youngseokcoin/youngseokcoin/pull/16624. The bug is reported
            # in issue https://github.com/youngseokcoin/youngseokcoin/issues/18325.
            disconnect_nodes(self.nodes[0], 1)
            bump2 = self.nodes[0].bumpfee(tx2)["txid"]
            self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
            assert_equal(self.nodes[0].gettransaction(bump2)["confirmations"],
                         1)
            assert_equal(tx2 in self.nodes[1].getrawmempool(), True)
            connect_nodes(self.nodes[0], 1)
            self.sync_blocks()
            self.expect_wallet_notify([bump2])
            assert_equal(self.nodes[1].gettransaction(bump2)["confirmations"],
                         1)
Example #29
0
    def test_successful_deposit(self, finalizer, proposer):

        payto = finalizer.getnewaddress("", "legacy")
        txid = finalizer.deposit(payto, 1500)

        deposit_tx = finalizer.gettransaction(txid)
        assert_equal(deposit_tx['amount'],
                     0)  # 0 because we send the money to ourselves
        assert_less_than(deposit_tx['fee'],
                         0)  # fee returned by gettransaction is negative

        raw_deposit_tx = finalizer.decoderawtransaction(deposit_tx['hex'])
        assert_equal(raw_deposit_tx['vout'][0]['value'], 1500)
        assert_equal(raw_deposit_tx['vout'][1]['value'],
                     10000 - 1500 + deposit_tx['fee'])

        # wait for transaction to propagate
        self.wait_for_transaction(txid, 10)

        wait_until(lambda: finalizer.getvalidatorinfo()['validator_status'] ==
                   'WAITING_DEPOSIT_CONFIRMATION',
                   timeout=5)

        # mine a block to allow the deposit to get included
        self.generate_sync(proposer)
        disconnect_nodes(finalizer, proposer.index)

        wait_until(lambda: finalizer.getvalidatorinfo()['validator_status'] ==
                   'WAITING_DEPOSIT_FINALIZATION',
                   timeout=5)

        # move to checkpoint
        proposer.generate(8)
        assert_equal(proposer.getblockcount(), 10)
        assert_finalizationstate(
            proposer, {
                'currentEpoch': 1,
                'currentDynasty': 0,
                'lastJustifiedEpoch': 0,
                'lastFinalizedEpoch': 0,
                'validators': 0
            })

        # the finalizer will be ready to operate at currentDynasty=2
        for _ in range(2):
            proposer.generate(10)
            assert_finalizationstate(proposer, {'validators': 0})

        # start new dynasty
        proposer.generate(1)
        assert_equal(proposer.getblockcount(), 31)
        assert_finalizationstate(
            proposer, {
                'currentEpoch': 4,
                'currentDynasty': 2,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        connect_nodes(finalizer, proposer.index)
        sync_blocks([finalizer, proposer], timeout=10)
        wait_until(lambda: finalizer.getvalidatorinfo()['enabled'] == 1,
                   timeout=5)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'IS_VALIDATING')

        # creates actual vote
        wait_until(lambda: len(proposer.getrawmempool()) == 1, timeout=5)
        txraw = proposer.getrawtransaction(proposer.getrawmempool()[0])
        vote = FromHex(CTransaction(), txraw)
        assert_equal(vote.get_type(), TxType.VOTE)
Example #30
0
 def setup_network(self):
     # Start with split network:
     super().setup_network()
     disconnect_nodes(self.nodes[1], self.nodes[2])
     disconnect_nodes(self.nodes[2], self.nodes[1])
Example #31
0
    def run_test(self):
        # Node 0 supports COMPACT_FILTERS, node 1 does not.
        node0 = self.nodes[0].add_p2p_connection(CFiltersClient())
        node1 = self.nodes[1].add_p2p_connection(CFiltersClient())

        # Nodes 0 & 1 share the same first 999 blocks in the chain.
        self.nodes[0].generate(999)
        self.sync_blocks(timeout=600)

        # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting
        disconnect_nodes(self.nodes[0], 1)

        self.nodes[0].generate(1)
        wait_until(lambda: self.nodes[0].getblockcount() == 1000)
        stale_block_hash = self.nodes[0].getblockhash(1000)

        self.nodes[1].generate(1001)
        wait_until(lambda: self.nodes[1].getblockcount() == 2000)

        self.log.info("get cfcheckpt on chain to be re-orged out.")
        request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC,
                                   stop_hash=int(stale_block_hash, 16))
        node0.send_and_ping(message=request)
        response = node0.last_message['cfcheckpt']
        assert_equal(response.filter_type, request.filter_type)
        assert_equal(response.stop_hash, request.stop_hash)
        assert_equal(len(response.headers), 1)

        self.log.info("Reorg node 0 to a new chain.")
        connect_nodes(self.nodes[0], 1)
        self.sync_blocks(timeout=600)

        main_block_hash = self.nodes[0].getblockhash(1000)
        assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize"

        self.log.info("Check that peers can fetch cfcheckpt on active chain.")
        tip_hash = self.nodes[0].getbestblockhash()
        request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC,
                                   stop_hash=int(tip_hash, 16))
        node0.send_and_ping(request)
        response = node0.last_message['cfcheckpt']
        assert_equal(response.filter_type, request.filter_type)
        assert_equal(response.stop_hash, request.stop_hash)

        main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash,
                                                      'basic')['header']
        tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash,
                                                     'basic')['header']
        assert_equal(
            response.headers,
            [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)])

        self.log.info("Check that peers can fetch cfcheckpt on stale chain.")
        request = msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC,
                                   stop_hash=int(stale_block_hash, 16))
        node0.send_and_ping(request)
        response = node0.last_message['cfcheckpt']

        stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash,
                                                       'basic')['header']
        assert_equal(response.headers,
                     [int(header, 16) for header in (stale_cfcheckpt, )])

        self.log.info("Check that peers can fetch cfheaders on active chain.")
        request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC,
                                   start_height=1,
                                   stop_hash=int(main_block_hash, 16))
        node0.send_and_ping(request)
        response = node0.last_message['cfheaders']
        main_cfhashes = response.hashes
        assert_equal(len(main_cfhashes), 1000)
        assert_equal(
            compute_last_header(response.prev_header, response.hashes),
            int(main_cfcheckpt, 16))

        self.log.info("Check that peers can fetch cfheaders on stale chain.")
        request = msg_getcfheaders(filter_type=FILTER_TYPE_BASIC,
                                   start_height=1,
                                   stop_hash=int(stale_block_hash, 16))
        node0.send_and_ping(request)
        response = node0.last_message['cfheaders']
        stale_cfhashes = response.hashes
        assert_equal(len(stale_cfhashes), 1000)
        assert_equal(
            compute_last_header(response.prev_header, response.hashes),
            int(stale_cfcheckpt, 16))

        self.log.info("Check that peers can fetch cfilters.")
        stop_hash = self.nodes[0].getblockhash(10)
        request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC,
                                  start_height=1,
                                  stop_hash=int(stop_hash, 16))
        node0.send_message(request)
        node0.sync_with_ping()
        response = node0.pop_cfilters()
        assert_equal(len(response), 10)

        self.log.info("Check that cfilter responses are correct.")
        for cfilter, cfhash, height in zip(response, main_cfhashes,
                                           range(1, 11)):
            block_hash = self.nodes[0].getblockhash(height)
            assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
            assert_equal(cfilter.block_hash, int(block_hash, 16))
            computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
            assert_equal(computed_cfhash, cfhash)

        self.log.info("Check that peers can fetch cfilters for stale blocks.")
        request = msg_getcfilters(filter_type=FILTER_TYPE_BASIC,
                                  start_height=1000,
                                  stop_hash=int(stale_block_hash, 16))
        node0.send_message(request)
        node0.sync_with_ping()
        response = node0.pop_cfilters()
        assert_equal(len(response), 1)

        cfilter = response[0]
        assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
        assert_equal(cfilter.block_hash, int(stale_block_hash, 16))
        computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
        assert_equal(computed_cfhash, stale_cfhashes[999])

        self.log.info(
            "Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection."
        )
        requests = [
            msg_getcfcheckpt(filter_type=FILTER_TYPE_BASIC,
                             stop_hash=int(main_block_hash, 16)),
            msg_getcfheaders(filter_type=FILTER_TYPE_BASIC,
                             start_height=1000,
                             stop_hash=int(main_block_hash, 16)),
            msg_getcfilters(filter_type=FILTER_TYPE_BASIC,
                            start_height=1000,
                            stop_hash=int(main_block_hash, 16)),
        ]
        for request in requests:
            node1 = self.nodes[1].add_p2p_connection(P2PInterface())
            node1.send_message(request)
            node1.wait_for_disconnect()

        self.log.info("Check that invalid requests result in disconnection.")
        requests = [
            # Requesting too many filters results in disconnection.
            msg_getcfilters(filter_type=FILTER_TYPE_BASIC,
                            start_height=0,
                            stop_hash=int(main_block_hash, 16)),
            # Requesting too many filter headers results in disconnection.
            msg_getcfheaders(filter_type=FILTER_TYPE_BASIC,
                             start_height=0,
                             stop_hash=int(tip_hash, 16)),
            # Requesting unknown filter type results in disconnection.
            msg_getcfcheckpt(filter_type=255,
                             stop_hash=int(main_block_hash, 16)),
            # Requesting unknown hash results in disconnection.
            msg_getcfcheckpt(
                filter_type=FILTER_TYPE_BASIC,
                stop_hash=123456789,
            ),
        ]
        for request in requests:
            node0 = self.nodes[0].add_p2p_connection(P2PInterface())
            node0.send_message(request)
            node0.wait_for_disconnect()
    def run_test(self):
        self.nodes[1].generate(100)
        self.sync_blocks()
        balance = self.nodes[0].getbalance()
        txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                          Decimal("10"))
        txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                          Decimal("10"))
        txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(),
                                          Decimal("10"))
        self.sync_mempools()
        self.nodes[1].generate(1)

        # Can not abandon non-wallet transaction
        assert_raises_rpc_error(
            -5, 'Invalid or non-wallet transaction id',
            lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
        # Can not abandon confirmed transaction
        assert_raises_rpc_error(
            -5, 'Transaction not eligible for abandonment',
            lambda: self.nodes[0].abandontransaction(txid=txA))

        self.sync_blocks()
        newbalance = self.nodes[0].getbalance()
        assert balance - newbalance < Decimal("0.001")  #no more than fees lost
        balance = newbalance

        # Disconnect nodes so node0's transactions don't get into node1's mempool
        disconnect_nodes(self.nodes[0], 1)

        # Identify the 10BPS outputs
        nA = next(tx_out["vout"]
                  for tx_out in self.nodes[0].gettransaction(txA)["details"]
                  if tx_out["amount"] == Decimal("10"))
        nB = next(tx_out["vout"]
                  for tx_out in self.nodes[0].gettransaction(txB)["details"]
                  if tx_out["amount"] == Decimal("10"))
        nC = next(tx_out["vout"]
                  for tx_out in self.nodes[0].gettransaction(txC)["details"]
                  if tx_out["amount"] == Decimal("10"))

        inputs = []
        # spend 10BPS outputs from txA and txB
        inputs.append({"txid": txA, "vout": nA})
        inputs.append({"txid": txB, "vout": nB})
        outputs = {}

        outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
        outputs[self.nodes[1].getnewaddress()] = Decimal("5")
        signed = self.nodes[0].signrawtransactionwithwallet(
            self.nodes[0].createrawtransaction(inputs, outputs))
        txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])

        # Identify the 14.99998BPS output
        nAB = next(tx_out["vout"]
                   for tx_out in self.nodes[0].gettransaction(txAB1)["details"]
                   if tx_out["amount"] == Decimal("14.99998"))

        #Create a child tx spending AB1 and C
        inputs = []
        inputs.append({"txid": txAB1, "vout": nAB})
        inputs.append({"txid": txC, "vout": nC})
        outputs = {}
        outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
        signed2 = self.nodes[0].signrawtransactionwithwallet(
            self.nodes[0].createrawtransaction(inputs, outputs))
        txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])

        # Create a child tx spending ABC2
        signed3_change = Decimal("24.999")
        inputs = [{"txid": txABC2, "vout": 0}]
        outputs = {self.nodes[0].getnewaddress(): signed3_change}
        signed3 = self.nodes[0].signrawtransactionwithwallet(
            self.nodes[0].createrawtransaction(inputs, outputs))
        # note tx is never directly referenced, only abandoned as a child of the above
        self.nodes[0].sendrawtransaction(signed3["hex"])

        # In mempool txs from self should increase balance from change
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("30") + signed3_change)
        balance = newbalance

        # Restart the node with a higher min relay fee so the parent tx is no longer in mempool
        # TODO: redo with eviction
        self.restart_node(0, extra_args=["-minrelaytxfee=0.0001"])
        assert self.nodes[0].getmempoolinfo()['loaded']

        # Verify txs no longer in either node's mempool
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        # Not in mempool txs from self should only reduce balance
        # inputs are still spent, but change not received
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - signed3_change)
        # Unconfirmed received funds that are not in mempool, also shouldn't show
        # up in unconfirmed balance
        balances = self.nodes[0].getbalances()['mine']
        assert_equal(balances['untrusted_pending'] + balances['trusted'],
                     newbalance)
        # Also shouldn't show up in listunspent
        assert not txABC2 in [
            utxo["txid"] for utxo in self.nodes[0].listunspent(0)
        ]
        balance = newbalance

        # Abandon original transaction and verify inputs are available again
        # including that the child tx was also abandoned
        self.nodes[0].abandontransaction(txAB1)
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance + Decimal("30"))
        balance = newbalance

        # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
        self.restart_node(0, extra_args=["-minrelaytxfee=0.00001"])
        assert self.nodes[0].getmempoolinfo()['loaded']

        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        assert_equal(self.nodes[0].getbalance(), balance)

        # But if it is received again then it is unabandoned
        # And since now in mempool, the change is available
        # But its child tx remains abandoned
        self.nodes[0].sendrawtransaction(signed["hex"])
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
        balance = newbalance

        # Send child tx again so it is unabandoned
        self.nodes[0].sendrawtransaction(signed2["hex"])
        newbalance = self.nodes[0].getbalance()
        assert_equal(
            newbalance,
            balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
        balance = newbalance

        # Remove using high relay fee again
        self.restart_node(0, extra_args=["-minrelaytxfee=0.0001"])
        assert self.nodes[0].getmempoolinfo()['loaded']
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("24.9996"))
        balance = newbalance

        # Create a double spend of AB1 by spending again from only A's 10 output
        # Mine double spend from node 1
        inputs = []
        inputs.append({"txid": txA, "vout": nA})
        outputs = {}
        outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
        tx = self.nodes[0].createrawtransaction(inputs, outputs)
        signed = self.nodes[0].signrawtransactionwithwallet(tx)
        self.nodes[1].sendrawtransaction(signed["hex"])
        self.nodes[1].generate(1)

        connect_nodes(self.nodes[0], 1)
        self.sync_blocks()

        # Verify that B and C's 10 BPS outputs are available for spending again because AB1 is now conflicted
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance + Decimal("20"))
        balance = newbalance

        # There is currently a minor bug around this and so this test doesn't work.  See Issue #7315
        # Invalidate the block with the double spend and B's 10 BPS output should no longer be available
        # Don't think C's should either
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        newbalance = self.nodes[0].getbalance()
        #assert_equal(newbalance, balance - Decimal("10"))
        self.log.info(
            "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer"
        )
        self.log.info(
            "conflicted has not resumed causing its inputs to be seen as spent.  See Issue #7315"
        )
        self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
 def disconnect_all(self):
     self.log.info("Disconnecting nodes...")
     disconnect_nodes(self.nodes[0], 1)
     disconnect_nodes(self.nodes[1], 0)
     self.log.info("Nodes disconnected")
Example #34
0
 def disconnect_all(self):
     disconnect_nodes(self.nodes[0], 1)
     disconnect_nodes(self.nodes[1], 0)
     disconnect_nodes(self.nodes[2], 1)
     disconnect_nodes(self.nodes[2], 0)
     disconnect_nodes(self.nodes[0], 2)
     disconnect_nodes(self.nodes[1], 2)
Example #35
0
    def run_test(self):
        self.log.debug("Send 5 transactions from node2 (to its own address)")
        tx_creation_time_lower = int(time.time())
        for i in range(5):
            last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
        node2_balance = self.nodes[2].getbalance()
        self.sync_all()
        tx_creation_time_higher = int(time.time())

        self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
        assert_equal(len(self.nodes[0].getrawmempool()), 5)
        assert_equal(len(self.nodes[1].getrawmempool()), 5)

        self.log.debug("Prioritize a transaction on node0")
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'], fees['modified'])
        self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])

        tx_creation_time = self.nodes[0].getmempoolentry(txid=last_txid)['time']
        assert_greater_than_or_equal(tx_creation_time, tx_creation_time_lower)
        assert_greater_than_or_equal(tx_creation_time_higher, tx_creation_time)

        # disconnect nodes & make a txn that remains in the unbroadcast set.
        disconnect_nodes(self.nodes[0], 1)
        assert(len(self.nodes[0].getpeerinfo()) == 0)
        assert(len(self.nodes[0].p2ps) == 0)
        self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("12"))
        connect_nodes(self.nodes[0], 2)

        self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
        self.stop_nodes()
        # Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
        # Also don't store the mempool, to keep the datadir clean
        self.start_node(1, extra_args=["-persistmempool=0"])
        self.start_node(0)
        self.start_node(2)
        assert self.nodes[0].getmempoolinfo()["loaded"]  # start_node is blocking on the mempool being loaded
        assert self.nodes[2].getmempoolinfo()["loaded"]
        assert_equal(len(self.nodes[0].getrawmempool()), 6)
        assert_equal(len(self.nodes[2].getrawmempool()), 5)
        # The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        self.log.debug('Verify prioritization is loaded correctly')
        fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
        assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])

        self.log.debug('Verify time is loaded correctly')
        assert_equal(tx_creation_time, self.nodes[0].getmempoolentry(txid=last_txid)['time'])

        # Verify accounting of mempool transactions after restart is correct
        self.nodes[2].syncwithvalidationinterfacequeue()  # Flush mempool to wallet
        assert_equal(node2_balance, self.nodes[2].getbalance())

        # start node0 with wallet disabled so wallet transactions don't get resubmitted
        self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
        self.stop_nodes()
        self.start_node(0, extra_args=["-persistmempool=0", "-disablewallet"])
        assert self.nodes[0].getmempoolinfo()["loaded"]
        assert_equal(len(self.nodes[0].getrawmempool()), 0)

        self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
        self.stop_nodes()
        self.start_node(0)
        assert self.nodes[0].getmempoolinfo()["loaded"]
        assert_equal(len(self.nodes[0].getrawmempool()), 6)

        mempooldat0 = os.path.join(self.nodes[0].datadir, self.chain, 'mempool.dat')
        mempooldat1 = os.path.join(self.nodes[1].datadir, self.chain, 'mempool.dat')
        self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
        os.remove(mempooldat0)
        self.nodes[0].savemempool()
        assert os.path.isfile(mempooldat0)

        self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 6 transactions")
        os.rename(mempooldat0, mempooldat1)
        self.stop_nodes()
        self.start_node(1, extra_args=[])
        assert self.nodes[1].getmempoolinfo()["loaded"]
        assert_equal(len(self.nodes[1].getrawmempool()), 6)

        self.log.debug("Prevent pexad from writing mempool.dat to disk. Verify that `savemempool` fails")
        # to test the exception we are creating a tmp folder called mempool.dat.new
        # which is an implementation detail that could change and break this test
        mempooldotnew1 = mempooldat1 + '.new'
        os.mkdir(mempooldotnew1)
        assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
        os.rmdir(mempooldotnew1)

        self.test_persist_unbroadcast()
    def run_test(self):
        self.nodes[1].generate(100)
        sync_blocks(self.nodes)
        balance = self.nodes[0].getbalance()
        txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
        txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
        txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
        sync_mempools(self.nodes)
        self.nodes[1].generate(1)

        # Can not abandon non-wallet transaction
        assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
        # Can not abandon confirmed transaction
        assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))

        sync_blocks(self.nodes)
        newbalance = self.nodes[0].getbalance()
        assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
        balance = newbalance

        # Disconnect nodes so node0's transactions don't get into node1's mempool
        disconnect_nodes(self.nodes[0], 1)

        # Identify the 10btc outputs
        nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
        nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
        nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))

        inputs =[]
        # spend 10btc outputs from txA and txB
        inputs.append({"txid":txA, "vout":nA})
        inputs.append({"txid":txB, "vout":nB})
        outputs = {}

        outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
        outputs[self.nodes[1].getnewaddress()] = Decimal("5")
        signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
        txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])

        # Identify the 14.99998btc output
        nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))

        #Create a child tx spending AB1 and C
        inputs = []
        inputs.append({"txid":txAB1, "vout":nAB})
        inputs.append({"txid":txC, "vout":nC})
        outputs = {}
        outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
        signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
        txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])

        # Create a child tx spending ABC2
        signed3_change = Decimal("24.999")
        inputs = [ {"txid":txABC2, "vout":0} ]
        outputs = { self.nodes[0].getnewaddress(): signed3_change }
        signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
        # note tx is never directly referenced, only abandoned as a child of the above
        self.nodes[0].sendrawtransaction(signed3["hex"])

        # In mempool txs from self should increase balance from change
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("30") + signed3_change)
        balance = newbalance

        # Restart the node with a higher min relay fee so the parent tx is no longer in mempool
        # TODO: redo with eviction
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])

        # Verify txs no longer in either node's mempool
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        assert_equal(len(self.nodes[1].getrawmempool()), 0)

        # Not in mempool txs from self should only reduce balance
        # inputs are still spent, but change not received
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - signed3_change)
        # Unconfirmed received funds that are not in mempool, also shouldn't show
        # up in unconfirmed balance
        unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
        assert_equal(unconfbalance, newbalance)
        # Also shouldn't show up in listunspent
        assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
        balance = newbalance

        # Abandon original transaction and verify inputs are available again
        # including that the child tx was also abandoned
        self.nodes[0].abandontransaction(txAB1)
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance + Decimal("30"))
        balance = newbalance

        # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        assert_equal(self.nodes[0].getbalance(), balance)

        # But if it is received again then it is unabandoned
        # And since now in mempool, the change is available
        # But its child tx remains abandoned
        self.nodes[0].sendrawtransaction(signed["hex"])
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
        balance = newbalance

        # Send child tx again so it is unabandoned
        self.nodes[0].sendrawtransaction(signed2["hex"])
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
        balance = newbalance

        # Remove using high relay fee again
        self.stop_node(0)
        self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
        assert_equal(len(self.nodes[0].getrawmempool()), 0)
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance - Decimal("24.9996"))
        balance = newbalance

        # Create a double spend of AB1 by spending again from only A's 10 output
        # Mine double spend from node 1
        inputs =[]
        inputs.append({"txid":txA, "vout":nA})
        outputs = {}
        outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
        tx = self.nodes[0].createrawtransaction(inputs, outputs)
        signed = self.nodes[0].signrawtransactionwithwallet(tx)
        self.nodes[1].sendrawtransaction(signed["hex"])
        self.nodes[1].generate(1)

        connect_nodes(self.nodes[0], 1)
        sync_blocks(self.nodes)

        # Verify that B and C's 10 MAC outputs are available for spending again because AB1 is now conflicted
        newbalance = self.nodes[0].getbalance()
        assert_equal(newbalance, balance + Decimal("20"))
        balance = newbalance

        # There is currently a minor bug around this and so this test doesn't work.  See Issue #7315
        # Invalidate the block with the double spend and B's 10 MAC output should no longer be available
        # Don't think C's should either
        self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
        newbalance = self.nodes[0].getbalance()
        #assert_equal(newbalance, balance - Decimal("10"))
        self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
        self.log.info("conflicted has not resumed causing its inputs to be seen as spent.  See Issue #7315")
        self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
    def test_broadcast(self):
        self.log.info(
            "Test that mempool reattempts delivery of locally submitted transaction"
        )
        node = self.nodes[0]

        min_relay_fee = node.getnetworkinfo()["relayfee"]
        create_confirmed_utxos(node, 10)

        disconnect_nodes(node, self.nodes[1])

        self.log.info("Generate transactions that only node 0 knows about")

        # generate a wallet txn
        addr = node.getnewaddress()
        wallet_tx_hsh = node.sendtoaddress(addr, 0.0001)
        utxos = node.listunspent()

        # generate a txn using sendrawtransaction
        us0 = utxos.pop()
        inputs = [{"txid": us0["txid"], "vout": us0["vout"]}]
        outputs = {addr: 0.0001}
        tx = node.createrawtransaction(inputs, outputs)
        node.settxfee(min_relay_fee)
        txF = node.fundrawtransaction(tx)
        txFS = node.signrawtransactionwithwallet(txF["hex"])
        rpc_tx_hsh = node.sendrawtransaction(txFS["hex"])

        # check transactions are in unbroadcast using rpc
        mempoolinfo = self.nodes[0].getmempoolinfo()
        assert_equal(mempoolinfo['unbroadcastcount'], 2)
        mempool = self.nodes[0].getrawmempool(True)
        for tx in mempool:
            assert_equal(mempool[tx]['unbroadcast'], True)

        # check that second node doesn't have these two txns
        mempool = self.nodes[1].getrawmempool()
        assert rpc_tx_hsh not in mempool
        assert wallet_tx_hsh not in mempool

        # ensure that unbroadcast txs are persisted to mempool.dat
        self.restart_node(0)

        self.log.info("Reconnect nodes & check if they are sent to node 1")
        connect_nodes(node, self.nodes[1])

        # fast forward into the future & ensure that the second node has the
        # txns
        node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
        self.sync_mempools(timeout=30)
        mempool = self.nodes[1].getrawmempool()
        assert rpc_tx_hsh in mempool
        assert wallet_tx_hsh in mempool

        # check that transactions are no longer in first node's unbroadcast set
        mempool = self.nodes[0].getrawmempool(True)
        for tx in mempool:
            assert_equal(mempool[tx]['unbroadcast'], False)

        self.log.info(
            "Add another connection & ensure transactions aren't broadcast again"
        )

        conn = node.add_p2p_connection(P2PTxInvStore())
        node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
        # allow sufficient time for possibility of broadcast
        time.sleep(2)
        assert_equal(len(conn.get_invs()), 0)

        disconnect_nodes(node, self.nodes[1])
        node.disconnect_p2ps()