def run_test(self):

        # Sending exactly 50 coins and subtracting fee from amount should always
        # result in a tx with exactly 1 input and 1 output. A list of 1 is always
        # sorted but check anyway. We run this check first to ensure we do not
        # randomly select and output that is less than 50 because it has been used
        # for something else in this test
        self.log.info('Check that a tx with 1 input and 1 output is BIP69 sorted')
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 50, "", "", True)
        tx = self.nodes[0].decoderawtransaction(
            self.nodes[0].gettransaction(txid)['hex'])
        tx_vin = tx["vin"]
        assert_equal(len(tx_vin), 1)
        tx_vout = tx["vout"]
        assert_equal(len(tx_vout), 1)
        assert_equal(self.validate_inputs_bip69(tx_vin), True)
        assert_equal(self.validate_outputs_bip69(tx_vout), True)

        self.log.info('Check that a tx with >1 input and >1 output is BIP69 sorted')
        outputs = {self.nodes[0].getnewaddress(): 110, self.nodes[0].getnewaddress(): 1.2, self.nodes[0].getnewaddress(): 35, self.nodes[
            0].getnewaddress(): 1.3, self.nodes[0].getnewaddress(): 20, self.nodes[0].getnewaddress(): 0.3}
        txid = self.nodes[0].sendmany("", outputs)
        tx = self.nodes[0].decoderawtransaction(
            self.nodes[0].gettransaction(txid)['hex'])
        tx_vin = tx["vin"]
        # It is not necessary to check for len of 0 because it is not possible.
        # The number of inputs is variable, as long as it is not 1 or 0 this test
        # is behaving as intended
        assert_not_equal(len(tx_vin), 1)
        tx_vout = tx["vout"]
        # There should be 7 outputs, 6 specified sends and 1 change
        assert_equal(len(tx_vout), 7)
        assert_equal(self.validate_inputs_bip69(tx_vin), True)
        assert_equal(self.validate_outputs_bip69(tx_vout), True)

        self.log.info(
            'Check that we have some (old) blocks and that anti-fee-sniping is disabled')
        assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        tx = self.nodes[0].decoderawtransaction(
            self.nodes[0].gettransaction(txid)['hex'])
        assert_equal(tx['locktime'], 0)

        self.log.info(
            'Check that anti-fee-sniping is enabled when we mine a recent block')
        self.generate(self.nodes[0], 1)
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        tx = self.nodes[0].decoderawtransaction(
            self.nodes[0].gettransaction(txid)['hex'])
        assert 0 < tx['locktime'] <= 201

        # Ensure the 'include_unsafe' option works for sendmany and sendtoaddress RPCs
        self.test_send_unsafe_inputs()
Example #2
0
    def test_compactblock_reconstruction_multiple_peers(
            self, node, stalling_peer, delivery_peer):
        assert len(self.utxos)

        def announce_cmpct_block(node, peer):
            utxo = self.utxos.pop(0)
            block = self.build_block_with_transactions(node, utxo, 5)

            cmpct_block = HeaderAndShortIDs()
            cmpct_block.initialize_from_block(block)
            msg = msg_cmpctblock(cmpct_block.to_p2p())
            peer.send_and_ping(msg)
            with mininode_lock:
                assert "getblocktxn" in peer.last_message
            return block, cmpct_block

        block, cmpct_block = announce_cmpct_block(node, stalling_peer)

        for tx in block.vtx[1:]:
            delivery_peer.send_message(msg_tx(tx))
        delivery_peer.sync_with_ping()
        mempool = node.getrawmempool()
        for tx in block.vtx[1:]:
            assert tx.hash in mempool

        delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
        assert_equal(int(node.getbestblockhash(), 16), block.sha256)

        self.utxos.append(
            [block.unspent_tx.sha256, 0, block.unspent_tx.vout[0].nValue])

        # Now test that delivering an invalid compact block won't break relay

        block, cmpct_block = announce_cmpct_block(node, stalling_peer)
        for tx in block.vtx[1:]:
            delivery_peer.send_message(msg_tx(tx))
        delivery_peer.sync_with_ping()

        # mutilate the merkle root to make the block invalid
        cmpct_block.header.hashMerkleRoot += 1

        delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
        assert_not_equal(int(node.getbestblockhash(), 16), block.sha256)

        msg = msg_blocktxn()
        msg.block_transactions.blockhash = block.sha256
        msg.block_transactions.transactions = block.vtx[1:]
        stalling_peer.send_and_ping(msg)
        assert_equal(int(node.getbestblockhash(), 16), block.sha256)
    def run_test(self):
        sapling_address = self.nodes[0].z_getnewaddress('sapling')
        addr2 = self.nodes[0].z_getnewdiversifiedaddress(sapling_address)
        assert_not_equal(sapling_address, addr2)

        alladdrs = self.nodes[0].z_getalldiversifiedaddresses(sapling_address)

        # Assert that both addresses are diviersified
        assert_equal(len(alladdrs), 2,
                     "There should be 2 diversified addresses now")
        assert_true(sapling_address in alladdrs, "Original should be present")
        assert_true(addr2 in alladdrs, "Diversified address should be present")

        # Query it from the other address
        self.verify_diversified_set(self.nodes[0], sapling_address,
                                    [sapling_address, addr2])

        # Add a third diversified address
        addr3 = self.nodes[0].z_getnewdiversifiedaddress(sapling_address)
        self.verify_diversified_set(self.nodes[0], sapling_address,
                                    [sapling_address, addr2, addr3])

        # Now, add an entirely new address, and make sure it shows up separately.
        sapling_address2 = self.nodes[0].z_getnewaddress('sapling')
        alladdrs = self.nodes[0].z_getalldiversifiedaddresses(sapling_address)
        newaddrs = self.nodes[0].z_getalldiversifiedaddresses(sapling_address2)

        assert_equal(len(alladdrs), 3)
        assert_equal(len(newaddrs), 1)

        # Assert that the private keys match
        privkey = self.nodes[0].z_exportkey(sapling_address)
        privkey2 = self.nodes[0].z_exportkey(addr2)
        assert_equal(privkey, privkey2, "Private keys should match")

        newprivkey = self.nodes[0].z_exportkey(sapling_address2)
        assert_not_equal(privkey2, newprivkey)

        # Generate a new address and another set of diversified addresses based on that, to make
        # Sure that there are now 2 sets of diversified addresses
        baseaddr2 = self.nodes[0].z_getnewaddress('sapling')
        baseadd2div1 = self.nodes[0].z_getnewdiversifiedaddress(baseaddr2)
        baseadd2div2 = self.nodes[0].z_getnewdiversifiedaddress(baseaddr2)

        # Verify that the original set of diversified addresses still match as well as the new set
        self.verify_diversified_set(self.nodes[0], sapling_address,
                                    [sapling_address, addr2, addr3])
        self.verify_diversified_set(self.nodes[0], baseaddr2,
                                    [baseaddr2, baseadd2div1, baseadd2div2])

        # Sanity-check the test harness
        assert_equal(self.nodes[0].getblockcount(), 200)
        # Activate sapling
        self.nodes[3].generate(5)
        self.sync_all()

        # Try sending to the diversified addresses and make sure it is recieved
        coinbase_taddr = get_coinbase_address(self.nodes[3])

        recipients = []
        recipients.append({"address": addr2, "amount": Decimal('10')})
        myopid = self.nodes[3].z_sendmany(coinbase_taddr, recipients, 1, 0)
        wait_and_assert_operationid_status(self.nodes[3], myopid)

        self.nodes[3].generate(1)
        self.sync_all()

        assert_equal(self.nodes[0].z_getbalance(addr2), Decimal("10"))

        # Then send from addr2 to addr3, both of which are diversified
        recipients = []
        recipients.append({"address": addr3, "amount": Decimal('1')})
        myopid = self.nodes[0].z_sendmany(addr2, recipients, 1, 0)
        wait_and_assert_operationid_status(self.nodes[0], myopid)

        self.nodes[0].generate(1)
        self.sync_all()

        # Now, balances should be 9 and 1
        assert_equal(self.nodes[0].z_getbalance(addr2), Decimal("9"))
        assert_equal(self.nodes[0].z_getbalance(addr3), Decimal("1"))

        # Import the private key into a new node, and ensure that we get the funds as well
        self.nodes[2].z_importkey(privkey)
        assert_equal(self.nodes[2].z_getbalance(addr2), Decimal("9"))
        assert_equal(self.nodes[2].z_getbalance(addr3), Decimal("1"))

        # We should be able to generate 100s of diversified addresses
        node2saplingaddress = self.nodes[1].z_getnewaddress("sapling")
        node2privkey = self.nodes[1].z_exportkey(node2saplingaddress)

        for i in range(0, 99):
            self.nodes[1].z_getnewdiversifiedaddress(node2saplingaddress)
        allnode2divaddrs = self.nodes[1].z_getalldiversifiedaddresses(
            node2saplingaddress)
        assert_equal(len(allnode2divaddrs), 100,
                     "Should generate 100 diversified addresses")
        assert_equal(len(set(allnode2divaddrs)), 100,
                     "Diversified addresses should be unique")
        #... and their private keys should match
        for i in range(0, 100):
            assert_equal(self.nodes[1].z_exportkey(allnode2divaddrs[i]),
                         node2privkey)

        # Can't generate diversified address for an address that doesn't exist in the wallet
        try:
            self.nodes[0].z_getnewdiversifiedaddress(node2saplingaddress)
            raise AssertionError("Should have failed")
        except JSONRPCException as e:
            assert_equal("Wallet does not hold private zkey for this zaddr",
                         e.error['message'])

        # Make sure that diversified addresses are a Sapling only feature
        sproutaddress = self.nodes[0].z_getnewaddress("sprout")
        try:
            self.nodes[0].z_getnewdiversifiedaddress(sproutaddress)
            raise AssertionError("Should have failed")
        except JSONRPCException as e:
            assert_equal("Invalid Sapling zaddr", e.error['message'])

        try:
            self.nodes[0].z_getalldiversifiedaddresses(sproutaddress)
            raise AssertionError("Should have failed")
        except JSONRPCException as e:
            assert_equal("Invalid Sapling zaddr", e.error['message'])
    def run_test(self):
        def sync_node_to_fork(node, fork, force=False):
            if force:
                self.restart_node(node.index, cleanup=True)
                node.importmasterkey(
                    regtest_mnemonics[node.index]['mnemonics'])
            connect_nodes(node, fork.index)
            block_hash = fork.getblockhash(fork.getblockcount())
            node.waitforblock(block_hash, 5000)
            assert_equal(node.getblockhash(node.getblockcount()), block_hash)
            disconnect_nodes(node, fork.index)

        def generate_epoch_and_vote(node, finalizer, finalizer_address,
                                    prevtx):
            assert node.getblockcount() % 5 == 0
            fs = node.getfinalizationstate()
            checkpoint = node.getbestblockhash()
            generate_block(node)
            vtx = make_vote_tx(finalizer,
                               finalizer_address,
                               checkpoint,
                               source_epoch=fs['lastJustifiedEpoch'],
                               target_epoch=fs['currentEpoch'],
                               input_tx_id=prevtx)
            node.sendrawtransaction(vtx)
            generate_block(node, count=4)
            vtx = FromHex(CTransaction(), vtx)
            vtx.rehash()
            return vtx.hash

        node = self.nodes[0]
        fork1 = self.nodes[1]
        fork2 = self.nodes[2]
        finalizer = self.nodes[3]

        node.importmasterkey(regtest_mnemonics[0]['mnemonics'])
        fork1.importmasterkey(regtest_mnemonics[1]['mnemonics'])
        fork2.importmasterkey(regtest_mnemonics[2]['mnemonics'])
        finalizer.importmasterkey(regtest_mnemonics[3]['mnemonics'])

        connect_nodes(node, fork1.index)
        connect_nodes(node, fork2.index)
        connect_nodes(node, finalizer.index)

        # leave IBD
        self.generate_sync(node, 1)

        finalizer_address = finalizer.getnewaddress('', 'legacy')
        deptx = finalizer.deposit(finalizer_address, 1500)
        self.wait_for_transaction(deptx)

        # leave insta justification
        #                   -  fork1
        # F    F    F       |
        # e0 - e1 - e2 - e3 -  node
        #                   |
        #                   -  fork2
        generate_block(node, count=14)
        assert_equal(node.getblockcount(), 15)
        sync_blocks([node, finalizer])
        assert_finalizationstate(
            node, {
                'currentDynasty': 1,
                'currentEpoch': 3,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 0
            })
        sync_blocks(self.nodes)
        disconnect_nodes(node, fork1.index)
        disconnect_nodes(node, fork2.index)
        disconnect_nodes(node, finalizer.index)

        # create first justified epoch on fork1
        #                     J
        #                   - e4 - e5 - e6 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |
        #                   -  fork2

        generate_block(fork1, count=5)
        vtx1 = generate_epoch_and_vote(fork1, finalizer, finalizer_address,
                                       deptx)
        generate_block(fork1, count=5)
        assert_equal(fork1.getblockcount(), 30)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        sync_node_to_fork(node, fork1)

        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info('node successfully switched to the justified fork')

        # create longer justified epoch on fork2
        # node must switch ("zig") to this fork
        #                     J
        #                   - e4 - e5 - e6 fork1
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J
        #                   - e4 - e5 - e6 fork2 node

        generate_block(fork2, count=10)
        vtx2 = generate_epoch_and_vote(fork2, finalizer, finalizer_address,
                                       deptx)
        assert_equal(fork2.getblockcount(), 30)
        assert_finalizationstate(
            fork2, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        sync_node_to_fork(node, fork2)

        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info(
            'node successfully switched to the longest justified fork')

        # create longer justified epoch on the previous fork1
        # node must switch ("zag") to this fork
        #                     J              J
        #                   - e4 - e5 - e6 - e7 - e8 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J
        #                   - e4 - e5 - e6 fork2
        generate_block(fork1, count=5)
        sync_node_to_fork(finalizer, fork1)
        vtx1 = generate_epoch_and_vote(fork1, finalizer, finalizer_address,
                                       vtx1)
        assert_equal(fork1.getblockcount(), 40)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 2,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 7,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        assert_not_equal(fork1.getbestblockhash(), fork2.getbestblockhash())
        sync_node_to_fork(node, fork1)
        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 7,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info(
            'node successfully switched back to the longest justified fork')

        # UNIT-E TODO: node must follow longest finalized chain
        # node follows longest finalization
        #                     J              J
        #                   - e4 - e5 - e6 - e7 - e8 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J    F
        #                   - e4 - e5 - e6 - e7 fork2
        sync_node_to_fork(finalizer, fork2, force=True)
        vtx2 = generate_epoch_and_vote(fork2, finalizer, finalizer_address,
                                       vtx2)
        assert_equal(fork2.getblockcount(), 35)
        assert_finalizationstate(
            fork2, {
                'currentDynasty': 2,
                'currentEpoch': 7,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 6,
                'validators': 1
            })
Example #5
0
    def _zmq_test(self):
        genhashes = self.nodes[0].generate(1)
        self.sync_all()

        self.log.info("Wait for tx")
        msg = self.zmqSubSocket.recv_multipart()
        topic = msg[0]
        assert_equal(topic, b"hashtx")
        body = msg[1]
        msgSequence = struct.unpack('<I', msg[-1])[-1]
        assert_equal(msgSequence, 0)  # must be sequence 0 on hashtx

        self.log.info("Wait for wallet tx")
        msg = self.zmqSubSocket.recv_multipart()
        topic = msg[0]
        assert_equal(topic, b"hashwallettx-block")
        body = msg[1]
        msgSequence = struct.unpack('<I', msg[-1])[-1]
        assert_equal(msgSequence, 0)  #must be sequence 0 on hashwallettx

        self.log.info("Wait for block")
        msg = self.zmqSubSocket.recv_multipart()
        topic = msg[0]
        body = msg[1]
        msgSequence = struct.unpack('<I', msg[-1])[-1]
        assert_equal(msgSequence, 0)  # must be sequence 0 on hashblock
        blkhash = bytes_to_hex_str(body)

        assert_equal(
            genhashes[0], blkhash
        )  # blockhash from generate must be equal to the hash received over zmq

        self.log.info("Generate 10 blocks (and 10 coinbase txes)")
        n = 10
        genhashes = self.nodes[1].generate(n)
        self.sync_all()

        zmqHashes = []
        blockcount = 0
        for x in range(n * 2):
            msg = self.zmqSubSocket.recv_multipart()
            topic = msg[0]
            assert_not_equal(
                topic, b"hashwallettx-block"
            )  # as originated from another node must not belong to node0 wallet
            assert_not_equal(topic, b"hashwallettx-mempool")
            body = msg[1]
            if topic == b"hashblock":
                zmqHashes.append(bytes_to_hex_str(body))
                msgSequence = struct.unpack('<I', msg[-1])[-1]
                assert_equal(msgSequence, blockcount + 1)
                blockcount += 1

        for x in range(n):
            assert_equal(
                genhashes[x], zmqHashes[x]
            )  # blockhash from generate must be equal to the hash received over zmq

        self.log.info("Wait for tx from second node")
        # test tx from a second node
        hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(),
                                              1.0)
        self.sync_all()

        # now we should receive a zmq msg because the tx was broadcast
        msg = self.zmqSubSocket.recv_multipart()
        topic = msg[0]
        body = msg[1]
        assert_equal(topic, b"hashtx")
        hashZMQ = bytes_to_hex_str(body)
        msgSequence = struct.unpack('<I', msg[-1])[-1]
        assert_equal(msgSequence, blockcount + 1)

        assert_equal(
            hashRPC, hashZMQ
        )  # txid from sendtoaddress must be equal to the hash received over zmq

        msg = self.zmqSubSocket.recv_multipart()
        topic = msg[0]
        assert_equal(topic, b"hashwallettx-mempool")
        body = msg[1]
        hashZMQ = bytes_to_hex_str(body)
        msgSequence = struct.unpack('<I', msg[-1])[-1]
        assert_equal(msgSequence, 1)
        assert_equal(hashRPC, hashZMQ)
Example #6
0
 def run_test(self):
     super(dPoS_p2pMessagesTest, self).run_test()
     mns = self.create_masternodes([1, 2, 3, 4])
     self.stop_nodes()
     self.start_masternodes()
     # First group (4 masternodes)
     connect_nodes_bi(self.nodes, 0, 1)
     connect_nodes_bi(self.nodes, 1, 2)
     connect_nodes_bi(self.nodes, 2, 3)
     connect_nodes_bi(self.nodes, 3, 4)
     connect_nodes_bi(self.nodes, 4, 0)
     # Second group (0 masternodes)
     connect_nodes_bi(self.nodes, 5, 6)
     connect_nodes_bi(self.nodes, 6, 7)
     connect_nodes_bi(self.nodes, 7, 8)
     connect_nodes_bi(self.nodes, 8, 9)
     connect_nodes_bi(self.nodes, 9, 5)
     self.sync_nodes(0, 5)
     self.sync_nodes(5, 10)
     time.sleep(15)
     [
         assert_equal(len(node.dpos_listviceblocks()), 0)
         for node in self.nodes
     ]
     [
         assert_equal(len(node.dpos_listroundvotes()), 0)
         for node in self.nodes
     ]
     [assert_equal(len(node.dpos_listtxvotes()), 0) for node in self.nodes]
     tx1 = self.create_transaction(1, self.nodes[9].getnewaddress(), 4.4,
                                   True)
     tx2 = self.create_transaction(6, self.nodes[0].getnewaddress(), 4.4,
                                   True)
     time.sleep(2)
     txs1 = self.nodes[2].i_listtransactions()
     txs2 = self.nodes[7].i_listtransactions()
     assert_equal(len(txs1), 1)
     assert_equal(len(txs2), 0)
     assert_equal(txs1[0]["hash"], tx1)
     self.nodes[3].generate(1)
     self.nodes[8].generate(1)
     time.sleep(2)
     self.sync_nodes(0, 5)
     self.sync_nodes(5, 10)
     vblocks = [node.dpos_listviceblocks() for node in self.nodes]
     rdvotes = [node.dpos_listroundvotes() for node in self.nodes]
     txvotes = [node.dpos_listtxvotes() for node in self.nodes]
     vblocks_left = vblocks[0:len(vblocks) / 2]
     vblocks_right = vblocks[len(vblocks) / 2:]
     assert_equal(len(vblocks_left), len(vblocks_right))
     rdvotes_left = rdvotes[0:len(rdvotes) / 2]
     rdvotes_right = rdvotes[len(rdvotes) / 2:]
     assert_equal(len(rdvotes_left), len(rdvotes_right))
     txvotes_left = vblocks[0:len(txvotes) / 2]
     txvotes_right = vblocks[len(rdvotes) / 2:]
     assert_equal(len(txvotes_left), len(txvotes_right))
     assert_not_equal(vblocks_left, vblocks_right)
     assert_not_equal(rdvotes_left, rdvotes_right)
     assert_not_equal(txvotes_left, txvotes_right)
     [assert_not_equal(len(x), 0) for x in vblocks_left]
     [assert_not_equal(len(x), 0) for x in vblocks_right]
     [assert_not_equal(len(x), 0) for x in rdvotes_left]
     [assert_equal(len(x), 0) for x in rdvotes_right]
     [assert_not_equal(len(x), 0) for x in txvotes_left]
     [assert_not_equal(len(x), 0) for x in txvotes_right]