Exemple #1
0
    def repair_masternodes(self, restart):
        # Repair all nodes
        for mn in self.mninfo:
            if self.check_banned(mn) or self.check_punished(mn):
                addr = self.nodes[0].getnewaddress()
                self.nodes[0].sendtoaddress(addr, 0.1)
                self.nodes[0].protx_update_service(mn.proTxHash, '127.0.0.1:%d' % p2p_port(mn.node.index), mn.keyOperator, "", addr)
                # Make sure this tx "safe" to mine even when InstantSend and ChainLocks are no longer functional
                self.bump_mocktime(60 * 10 + 1)
                self.generate(self.nodes[0], 1, sync_fun=self.no_op)
                assert(not self.check_banned(mn))

                if restart:
                    self.stop_node(mn.node.index)
                    time.sleep(0.5)
                    self.start_masternode(mn, extra_args=["-mocktime=" + str(self.mocktime)])
                else:
                    mn.node.setnetworkactive(True)
            self.connect_nodes(mn.node.index, 0)
        self.sync_all()

        # Isolate and re-connect all MNs (otherwise there might be open connections with no MNAUTH for MNs which were banned before)
        for mn in self.mninfo:
            mn.node.setnetworkactive(False)
            self.wait_until(lambda: mn.node.getconnectioncount() == 0)
            mn.node.setnetworkactive(True)
            force_finish_mnsync(mn.node)
            self.connect_nodes(mn.node.index, 0)
Exemple #2
0
 def reset_probe_timeouts(self):
     # Make sure all masternodes will reconnect/re-probe
     self.bump_mocktime(50 * 60 + 1)
     # Sleep a couple of seconds to let mn sync tick to happen
     time.sleep(2)
     for i in range(len(self.nodes)):
         force_finish_mnsync(self.nodes[i])
Exemple #3
0
 def start_controller_node(self):
     self.log.info("starting controller node")
     self.start_node(0, extra_args=self.extra_args)
     force_finish_mnsync(self.nodes[0])
     for i in range(1, self.num_nodes):
         if i < len(self.nodes) and self.nodes[i] is not None and self.nodes[i].process is not None:
             self.connect_nodes(i, 0)
Exemple #4
0
    def run_test(self):
        self.sync_blocks(self.nodes, timeout=60 * 5)
        self.confirm_mns()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.wait_for_sporks_same()

        q = self.mine_quorum()

        self.log.info("checking for old intra quorum connections")
        total_count = 0
        for mn in self.get_quorum_masternodes(q):
            count = self.get_mn_connection_count(mn.node)
            total_count += count
            assert_greater_than_or_equal(count, 2)
        assert (total_count < 40)

        self.check_reconnects(2)

        self.log.info("activating SPORK_23_QUORUM_POSE")
        self.nodes[0].spork("SPORK_23_QUORUM_POSE", 0)
        self.wait_for_sporks_same()

        self.log.info(
            "mining one block and waiting for all members to connect to each other"
        )
        self.nodes[0].generate(1)
        for mn in self.get_quorum_masternodes(q):
            self.wait_for_mnauth(mn.node, 4)

        self.log.info(
            "mine a new quorum and verify that all members connect to each other"
        )
        q = self.mine_quorum()

        self.log.info("checking that all MNs got probed")
        for mn in self.get_quorum_masternodes(q):
            self.wait_until(
                lambda: self.get_mn_probe_count(mn.node, q, False) == 4)

        self.log.info("checking that probes age")
        self.bump_mocktime(120)
        for mn in self.get_quorum_masternodes(q):
            self.wait_until(
                lambda: self.get_mn_probe_count(mn.node, q, False) == 0)

        self.log.info("mine a new quorum and re-check probes")
        q = self.mine_quorum()
        for mn in self.get_quorum_masternodes(q):
            self.wait_until(
                lambda: self.get_mn_probe_count(mn.node, q, True) == 4)

        self.log.info("Activating SPORK_21_QUORUM_ALL_CONNECTED")
        self.nodes[0].spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
        self.wait_for_sporks_same()

        self.check_reconnects(4)
Exemple #5
0
 def restart_mn(self, mn, reindex=False):
     args = self.extra_args[mn.nodeIdx] + [
         '-masternodeblsprivkey=%s' % mn.keyOperator
     ]
     if reindex:
         args.append('-reindex')
     self.restart_node(mn.nodeIdx, args)
     force_finish_mnsync(mn.node)
     connect_nodes(mn.node, 0)
     self.sync_blocks()
    def start_mn(self, mn):
        start_idx = len(self.nodes) - 1
        # SYSCOIN add offset and add nodes individually with offset and custom args
        for idx in range(start_idx, mn.idx):
            self.add_nodes(1, offset=idx + 1)

        extra_args = ['-masternodeblsprivkey=%s' % mn.blsMnkey]
        self.start_node(mn.idx, extra_args=self.extra_args + extra_args)
        force_finish_mnsync(self.nodes[mn.idx])
        mn.node = self.nodes[mn.idx]
        self.connect_nodes(mn.node.index, 0)
        self.nodes[mn.idx].mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
        self.sync_all()
Exemple #7
0
    def run_test(self):
        spork_default_state = self.get_test_spork_state(self.nodes[0])
        # check test spork default state matches on all nodes
        assert (self.get_test_spork_state(
            self.nodes[1]) == spork_default_state)
        assert (self.get_test_spork_state(
            self.nodes[2]) == spork_default_state)

        # check spork propagation for connected nodes
        spork_new_state = not spork_default_state
        self.set_test_spork_state(self.nodes[0], spork_new_state)
        time.sleep(0.1)
        self.wait_until(lambda: self.get_test_spork_state(self.nodes[1]),
                        timeout=10)
        self.wait_until(lambda: self.get_test_spork_state(self.nodes[0]),
                        timeout=10)

        # restart nodes to check spork persistence
        self.stop_node(0)
        self.stop_node(1)
        self.start_node(0)
        self.start_node(1)
        self.connect_nodes(0, 1)
        assert (self.get_test_spork_state(self.nodes[0]) == spork_new_state)
        assert (self.get_test_spork_state(self.nodes[1]) == spork_new_state)

        # Generate one block to kick off masternode sync, which also starts sporks syncing for node2
        self.generate(self.nodes[1], 1, sync_fun=self.no_op)

        # connect new node and check spork propagation after restoring from cache
        self.connect_nodes(1, 2)
        time.sleep(0.1)
        self.wait_until(lambda: self.get_test_spork_state(self.nodes[2]),
                        timeout=12)

        # turn off and check
        self.bump_mocktime(1)
        force_finish_mnsync(self.nodes[0])
        force_finish_mnsync(self.nodes[1])
        self.set_test_spork_state(self.nodes[0], False)

        time.sleep(0.1)
        self.wait_until(lambda: not self.get_test_spork_state(self.nodes[1]),
                        timeout=10)
        self.wait_until(lambda: not self.get_test_spork_state(self.nodes[2]),
                        timeout=10)
        self.wait_until(lambda: not self.get_test_spork_state(self.nodes[0]),
                        timeout=10)
 def restart_mn(self,
                mn,
                reindex=False,
                qvvec_sync=[],
                qdata_recovery_enabled=True):
     args = self.extra_args[mn.nodeIdx] + [
         '-masternodeblsprivkey=%s' % mn.keyOperator,
         '-llmq-data-recovery=%d' % qdata_recovery_enabled
     ]
     if reindex:
         args.append('-reindex')
     for llmq_sync in qvvec_sync:
         args.append('-llmq-qvvec-sync=%s:%d' %
                     (llmq_type_strings[llmq_sync[0]], llmq_sync[1]))
     self.restart_node(mn.nodeIdx, args)
     force_finish_mnsync(mn.node)
     connect_nodes(mn.node, 0)
     self.sync_blocks()
Exemple #9
0
 def run_test(self):
     # SYSCOIN
     for i in range(len(self.nodes)):
         force_finish_mnsync(self.nodes[i])
     self.test_buffer()
     self.test_duplicate_version_msg()
     self.test_magic_bytes()
     self.test_checksum()
     self.test_size()
     self.test_msgtype()
     self.test_addrv2_empty()
     self.test_addrv2_no_addresses()
     self.test_addrv2_too_long_address()
     self.test_addrv2_unrecognized_network()
     self.test_oversized_inv_msg()
     self.test_oversized_getdata_msg()
     self.test_oversized_headers_msg()
     self.test_resource_exhaustion()
Exemple #10
0
    def run_test(self):
        self.sync_blocks(self.nodes, timeout=60*5)
        self.confirm_mns()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.wait_for_sporks_same()

        # check if mining quorums with all nodes being online succeeds without punishment/banning
        self.test_no_banning()

        # Now lets isolate MNs one by one and verify that punishment/banning happens
        self.test_banning(self.isolate_mn, 1)

        self.repair_masternodes(True)

        self.nodes[0].spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
        self.nodes[0].spork("SPORK_23_QUORUM_POSE", 0)
        self.wait_for_sporks_same()

        self.reset_probe_timeouts()

        # Make sure no banning happens with spork21 enabled
        self.test_no_banning()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        # Lets restart masternodes with closed ports and verify that they get banned even though they are connected to other MNs (via outbound connections)
        self.test_banning(self.close_mn_port, 3)

        self.repair_masternodes(True)
        self.reset_probe_timeouts()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.test_banning(self.force_old_mn_proto, 3)

        # With PoSe off there should be no punishing for non-reachable and outdated nodes
        self.nodes[0].spork("SPORK_23_QUORUM_POSE", 4070908800)
        self.wait_for_sporks_same()

        self.repair_masternodes(True)
        self.force_old_mn_proto(self.mninfo[0])
        self.test_no_banning(3)

        self.repair_masternodes(True)
        self.close_mn_port(self.mninfo[0])
        self.test_no_banning(3)
Exemple #11
0
    def run_test(self):
        self.sync_blocks(self.nodes, timeout=60 * 5)
        self.confirm_mns()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        if self.options.spork21:
            self.nodes[0].spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
        self.wait_for_sporks_same()

        self.mine_quorum()
        if self.options.spork21:
            assert self.mninfo[0].node.getconnectioncount() == 5
        id = "0000000000000000000000000000000000000000000000000000000000000001"
        msgHash = "0000000000000000000000000000000000000000000000000000000000000002"
        msgHashConflict = "0000000000000000000000000000000000000000000000000000000000000003"

        def check_sigs(hasrecsigs, isconflicting1, isconflicting2):
            for mn in self.mninfo:
                if mn.node.quorum_hasrecsig(100, id, msgHash) != hasrecsigs:
                    return False
                if mn.node.quorum_isconflicting(100, id,
                                                msgHash) != isconflicting1:
                    return False
                if mn.node.quorum_isconflicting(
                        100, id, msgHashConflict) != isconflicting2:
                    return False
            return True

        def wait_for_sigs(hasrecsigs, isconflicting1, isconflicting2, timeout):
            t = time.time()
            while time.time() - t < timeout:
                if check_sigs(hasrecsigs, isconflicting1, isconflicting2):
                    return
                self.bump_mocktime(2)
                time.sleep(1)
            raise AssertionError("wait_for_sigs timed out")

        def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2,
                                 timeout):
            t = time.time()
            while time.time() - t < timeout:
                assert (check_sigs(hasrecsigs, isconflicting1, isconflicting2))
                time.sleep(0.1)

        # Initial state
        wait_for_sigs(False, False, False, 1)
        # Sign first share without any optional parameter, should not result in recovered sig
        self.mninfo[0].node.quorum_sign(100, id, msgHash)

        assert_sigs_nochange(False, False, False, 3)
        # Sign second share and test optional quorumHash parameter, should not result in recovered sig

        # 1. Providing an invalid quorum hash should fail and cause no changes for sigs
        assert (not self.mninfo[1].node.quorum_sign(100, id, msgHash, msgHash))
        assert_sigs_nochange(False, False, False, 3)
        # 2. Providing a valid quorum hash should succeed and cause no changes for sigss
        quorumHash = self.mninfo[1].node.quorum_selectquorum(100,
                                                             id)["quorumHash"]
        assert (self.mninfo[1].node.quorum_sign(100, id, msgHash, quorumHash))
        assert_sigs_nochange(False, False, False, 3)
        # Sign third share and test optional submit parameter if spork21 is enabled, should result in recovered sig
        # and conflict for msgHashConflict
        if self.options.spork21:
            # 1. Providing an invalid quorum hash and set submit=false, should throw an error
            assert_raises_rpc_error(-8, 'quorum not found',
                                    self.mninfo[2].node.quorum_sign, 100, id,
                                    msgHash, id, False)
            # 2. Providing a valid quorum hash and set submit=false, should return a valid sigShare object
            sig_share_rpc_1 = self.mninfo[2].node.quorum_sign(
                100, id, msgHash, quorumHash, False)
            sig_share_rpc_2 = self.mninfo[2].node.quorum_sign(
                100, id, msgHash, "", False)
            assert_equal(sig_share_rpc_1, sig_share_rpc_2)
            assert_sigs_nochange(False, False, False, 3)
            # 3. Sending the sig share received from RPC to the recovery member through P2P interface, should result
            # in a recovered sig
            sig_share = CSigShare()
            sig_share.llmqType = int(sig_share_rpc_1["llmqType"])
            sig_share.quorumHash = int(sig_share_rpc_1["quorumHash"], 16)
            sig_share.quorumMember = int(sig_share_rpc_1["quorumMember"])
            sig_share.id = int(sig_share_rpc_1["id"], 16)
            sig_share.msgHash = int(sig_share_rpc_1["msgHash"], 16)
            sig_share.sigShare = hex_str_to_bytes(sig_share_rpc_1["signature"])
            for i in range(len(self.mninfo)):
                assert self.mninfo[i].node.getconnectioncount() == 5
            # Get the current recovery member of the quorum
            q = self.nodes[0].quorum_selectquorum(100, id)
            mn = self.get_mninfo(q['recoveryMembers'][0])
            # Open a P2P connection to it
            p2p_interface = mn.node.add_p2p_connection(P2PInterface())
            # Send the last required QSIGSHARE message to the recovery member
            p2p_interface.send_message(msg_qsigshare([sig_share]))
        else:
            # If spork21 is not enabled just sign regularly
            self.mninfo[2].node.quorum_sign(100, id, msgHash)

        wait_for_sigs(True, False, True, 15)

        self.bump_mocktime(5)
        wait_for_sigs(True, False, True, 15)
        if self.options.spork21:
            mn.node.disconnect_p2ps()
        # Test `quorum verify` rpc
        node = self.mninfo[0].node
        recsig = node.quorum_getrecsig(100, id, msgHash)
        # Find quorum automatically
        height = node.getblockcount()
        height_bad = node.getblockheader(recsig["quorumHash"])["height"]
        hash_bad = node.getblockhash(0)
        assert (node.quorum_verify(100, id, msgHash, recsig["sig"]))
        assert (node.quorum_verify(100, id, msgHash, recsig["sig"], "",
                                   height))
        assert (not node.quorum_verify(100, id, msgHashConflict,
                                       recsig["sig"]))
        assert not node.quorum_verify(100, id, msgHash, recsig["sig"], "",
                                      height_bad)
        # Use specific quorum
        assert (node.quorum_verify(100, id, msgHash, recsig["sig"],
                                   recsig["quorumHash"]))
        assert (not node.quorum_verify(100, id, msgHashConflict, recsig["sig"],
                                       recsig["quorumHash"]))
        assert_raises_rpc_error(-8, "quorum not found", node.quorum_verify,
                                100, id, msgHash, recsig["sig"], hash_bad)

        # Mine one more quorum, so that we have 2 active ones, nothing should change
        self.mine_quorum()
        assert_sigs_nochange(True, False, True, 3)

        # Create a recovered sig for the oldest quorum i.e. the active quorum which will be moved
        # out of the active set when a new quorum appears
        request_id = 2
        oldest_quorum_hash = node.quorum_list()["llmq_test"][-1]
        # Search for a request id which selects the last active quorum
        while True:
            selected_hash = node.quorum_selectquorum(100, "%064x" %
                                                     request_id)["quorumHash"]
            if selected_hash == oldest_quorum_hash:
                break
            else:
                request_id += 1
        # Produce the recovered signature
        id = "%064x" % request_id
        for mn in self.mninfo:
            mn.node.quorum_sign(100, id, msgHash)
        # And mine a quorum to move the quorum which signed out of the active set
        self.mine_quorum()
        # Verify the recovered sig. This triggers the "signHeight + dkgInterval" verification
        recsig = node.quorum_getrecsig(100, id, msgHash)
        assert node.quorum_verify(100, id, msgHash, recsig["sig"], "",
                                  node.getblockcount())

        recsig_time = self.mocktime

        # Mine 4 more quorums, so that the one used for the the recovered sig should become inactive, nothing should change
        self.mine_quorum()
        self.mine_quorum()
        self.mine_quorum()
        self.mine_quorum()
        assert_sigs_nochange(True, False, True, 3)

        # fast forward until 0.5 days before cleanup is expected, recovered sig should still be valid
        self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) -
                           self.mocktime)
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].generate(1)
        self.sync_blocks()
        self.bump_mocktime(5)
        # Cleanup starts every 5 seconds
        wait_for_sigs(True, False, True, 15)
        # fast forward 1 day, recovered sig should not be valid anymore
        self.bump_mocktime(int(60 * 60 * 24 * 1))
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].generate(1)
        self.sync_blocks()
        self.bump_mocktime(5)
        # Cleanup starts every 5 seconds
        wait_for_sigs(False, False, False, 15)
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        for i in range(2):
            self.mninfo[i].node.quorum_sign(100, id, msgHashConflict)
        for i in range(2, 5):
            self.mninfo[i].node.quorum_sign(100, id, msgHash)
        self.nodes[0].generate(1)
        self.sync_blocks()
        self.bump_mocktime(5)
        wait_for_sigs(True, False, True, 15)

        if self.options.spork21:
            id = "%064x" % (request_id + 1)

            # Isolate the node that is responsible for the recovery of a signature and assert that recovery fails
            q = self.nodes[0].quorum_selectquorum(100, id)
            mn = self.get_mninfo(q['recoveryMembers'][0])
            mn.node.setnetworkactive(False)
            self.wait_until(lambda: mn.node.getconnectioncount() == 0)
            for i in range(4):
                self.mninfo[i].node.quorum_sign(100, id, msgHash)
            assert_sigs_nochange(False, False, False, 3)
            # Need to re-connect so that it later gets the recovered sig
            mn.node.setnetworkactive(True)
            self.connect_nodes(mn.node.index, 0)
            force_finish_mnsync(mn.node)
            # Make sure intra-quorum connections were also restored
            self.bump_mocktime(
                1)  # need this to bypass quorum connection retry timeout
            wait_until_helper(lambda: mn.node.getconnectioncount() == 5,
                              timeout=10,
                              sleep=2)
            mn.node.ping()
            self.wait_until(lambda: all('pingwait' not in peer
                                        for peer in mn.node.getpeerinfo()))
            self.nodes[0].generate(1)
            self.sync_blocks()
            self.bump_mocktime(5)
            wait_for_sigs(True, False, True, 15)
Exemple #12
0
    def run_test(self):
        self.sync_blocks(self.nodes, timeout=60 * 5)
        self.confirm_mns()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.wait_for_sporks_same()

        self.log.info("Mine one quorum without simulating any errors")
        qh = self.mine_quorum()
        self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)

        self.log.info("Lets omit the contribution")
        self.mninfo[0].node.quorum_dkgsimerror('contribution-omit', 1)
        qh = self.mine_quorum(expected_contributions=2)
        self.assert_member_valid(qh, self.mninfo[0].proTxHash, False)

        self.log.info(
            "Lets lie in the contribution but provide a correct justification")
        self.mninfo[0].node.quorum_dkgsimerror('contribution-omit', 0)
        self.mninfo[0].node.quorum_dkgsimerror('contribution-lie', 1)
        qh = self.mine_quorum(expected_contributions=3,
                              expected_complaints=2,
                              expected_justifications=1)
        self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)

        self.log.info(
            "Lets lie in the contribution and then omit the justification")
        self.mninfo[0].node.quorum_dkgsimerror('justify-omit', 1)
        qh = self.mine_quorum(expected_contributions=3, expected_complaints=2)
        self.assert_member_valid(qh, self.mninfo[0].proTxHash, False)

        self.log.info("Heal some damage (don't get PoSe banned)")
        self.heal_masternodes(33)

        self.log.info(
            "Lets lie in the contribution and then also lie in the justification"
        )
        self.mninfo[0].node.quorum_dkgsimerror('justify-omit', 0)
        self.mninfo[0].node.quorum_dkgsimerror('justify-lie', 1)
        qh = self.mine_quorum(expected_contributions=3,
                              expected_complaints=2,
                              expected_justifications=1)
        self.assert_member_valid(qh, self.mninfo[0].proTxHash, False)

        self.log.info("Lets lie about another MN")
        self.mninfo[0].node.quorum_dkgsimerror('contribution-lie', 0)
        self.mninfo[0].node.quorum_dkgsimerror('justify-lie', 0)
        self.mninfo[0].node.quorum_dkgsimerror('complain-lie', 1)
        qh = self.mine_quorum(expected_contributions=3,
                              expected_complaints=1,
                              expected_justifications=2)
        self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)

        self.log.info("Lets omit 1 premature commitments")
        self.mninfo[0].node.quorum_dkgsimerror('complain-lie', 0)
        self.mninfo[0].node.quorum_dkgsimerror('commit-omit', 1)
        qh = self.mine_quorum(expected_contributions=3,
                              expected_complaints=0,
                              expected_justifications=0,
                              expected_commitments=2)
        self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)

        self.log.info("Lets lie in 1 premature commitments")
        self.mninfo[0].node.quorum_dkgsimerror('commit-omit', 0)
        self.mninfo[0].node.quorum_dkgsimerror('commit-lie', 1)
        qh = self.mine_quorum(expected_contributions=3,
                              expected_complaints=0,
                              expected_justifications=0,
                              expected_commitments=2)
        self.assert_member_valid(qh, self.mninfo[0].proTxHash, True)
    def run_test(self):
        self.sync_blocks(self.nodes, timeout=60 * 5)
        self.confirm_mns()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])

        self.nodes[0].generate(10)
        self.sync_blocks(self.nodes, timeout=60 * 5)
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 4070908800)
        self.wait_for_sporks_same()

        self.log.info("Mining 4 quorums")
        for i in range(4):
            self.mine_quorum()
        self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
        self.wait_for_sporks_same()
        self.log.info("Mine single block, wait for chainlock")
        self.nodes[0].generate(1)
        self.wait_for_chainlocked_block_all_nodes(
            self.nodes[0].getbestblockhash())

        self.bump_mocktime(1)
        block = self.nodes[0].generate(1)[0]
        self.wait_for_chainlocked_block_all_nodes(block)

        self.log.info("testing normal signing with partially known TX")
        self.isolate_node(self.nodes[3])
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        # Make sure nodes 1 and 2 received the TX before we continue,
        # otherwise it might announce the TX to node 3 when reconnecting
        self.wait_for_tx(txid, self.nodes[1])
        self.wait_for_tx(txid, self.nodes[2])
        self.reconnect_isolated_node(self.nodes[3], 0)
        # Make sure nodes actually try re-connecting quorum connections
        self.bump_mocktime(30)
        self.wait_for_mnauth(self.nodes[3], 2)
        # push the tx directly via rpc
        self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid))

        self.log.info("testing retroactive signing with unknown TX")
        self.isolate_node(self.nodes[3])
        rawtx = self.nodes[0].createrawtransaction(
            [], {self.nodes[0].getnewaddress(): 1})
        rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
        rawtx = self.nodes[0].signrawtransactionwithwallet(rawtx)['hex']
        txid = self.nodes[3].sendrawtransaction(rawtx)
        # Make node 3 consider the TX as safe
        self.bump_mocktime(10 * 60 + 1)
        block = self.nodes[3].generatetoaddress(
            1, self.nodes[0].getnewaddress())[0]
        self.reconnect_isolated_node(self.nodes[3], 0)
        self.wait_for_chainlocked_block_all_nodes(block)
        self.nodes[0].setmocktime(self.mocktime)

        self.log.info("testing retroactive signing with partially known TX")
        self.isolate_node(self.nodes[3])
        txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
        # Make sure nodes 1 and 2 received the TX before we continue,
        # otherwise it might announce the TX to node 3 when reconnecting
        self.wait_for_tx(txid, self.nodes[1])
        self.wait_for_tx(txid, self.nodes[2])
        self.reconnect_isolated_node(self.nodes[3], 0)
        # Make sure nodes actually try re-connecting quorum connections
        self.bump_mocktime(30)
        self.wait_for_mnauth(self.nodes[3], 2)
        # Make node0 consider the TX as safe
        self.bump_mocktime(10 * 60 + 1)
        block = self.nodes[0].generate(1)[0]
        assert (txid in self.nodes[0].getblock(block, 1)['tx'])
        self.wait_for_chainlocked_block_all_nodes(block)

        self.log.info(
            "testing retroactive signing with partially known TX and all nodes session timeout"
        )
        self.test_all_nodes_session_timeout(False)
        self.log.info("repeating test, but with cycled LLMQs")
        self.test_all_nodes_session_timeout(True)

        self.log.info(
            "testing retroactive signing with partially known TX and single node session timeout"
        )
        self.test_single_node_session_timeout(False)
        self.log.info("repeating test, but with cycled LLMQs")
        self.test_single_node_session_timeout(True)
Exemple #14
0
    def run_test(self):
        self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
        self.nodes[0].p2ps[0].wait_for_verack()

        self.confirm_mns()

        null_hash = format(0, "064x")

        # Check if a diff with the genesis block as base returns all MNs
        expectedUpdated = [mn.proTxHash for mn in self.mninfo]
        mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated)
        expectedUpdated2 = expectedUpdated + []

        # Register one more MN, but don't start it (that would fail as DashTestFramework doesn't support this atm)
        baseBlockHash = self.nodes[0].getbestblockhash()
        self.prepare_masternode(self.mn_count)
        new_mn = self.mninfo[self.mn_count]

        # Now test if that MN appears in a diff when the base block is the one just before MN registration
        expectedDeleted = []
        expectedUpdated = [new_mn.proTxHash]
        mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
        assert(mnList[new_mn.proTxHash].confirmedHash == 0)
        # Now let the MN get enough confirmations and verify that the MNLISTDIFF now has confirmedHash != 0
        self.confirm_mns()
        mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
        assert(mnList[new_mn.proTxHash].confirmedHash != 0)

        # Spend the collateral of the previously added MN and test if it appears in "deletedMNs"
        expectedDeleted = [new_mn.proTxHash]
        expectedUpdated = []
        baseBlockHash2 = self.nodes[0].getbestblockhash()
        self.remove_masternode(self.mn_count)
        mnList = self.test_getmnlistdiff(baseBlockHash2, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)

        # When comparing genesis and best block, we shouldn't see the previously added and then deleted MN
        mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated2)

        #############################
        # Now start testing quorum commitment merkle roots

        self.generate(self.nodes[0], 1)
        oldhash = self.nodes[0].getbestblockhash()
        # Have to disable ChainLocks here because they won't let you to invalidate already locked blocks
        self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 4070908800)
        self.wait_for_sporks_same()
        # Test DIP8 activation once with a pre-existing quorum and once without (we don't know in which order it will activate on mainnet)
        self.test_dip8_quorum_merkle_root_activation(True)
        for n in self.nodes:
            n.invalidateblock(oldhash)
        self.sync_all()
        first_quorum = self.test_dip8_quorum_merkle_root_activation(False, True)
        # Re-enable ChainLocks again
        self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.wait_for_sporks_same()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        # Verify that the first quorum appears in MNLISTDIFF
        expectedDeleted = []
        expectedNew = [QuorumId(100, int(first_quorum, 16))]
        quorumList = self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
        baseBlockHash = self.nodes[0].getbestblockhash()

        second_quorum = self.mine_quorum()

        # Verify that the second quorum appears in MNLISTDIFF
        expectedDeleted = []
        expectedNew = [QuorumId(100, int(second_quorum, 16))]
        quorumList = self.test_getmnlistdiff_quorums(baseBlockHash, self.nodes[0].getbestblockhash(), quorumList, expectedDeleted, expectedNew)
        baseBlockHash = self.nodes[0].getbestblockhash()

        third_quorum = self.mine_quorum()
        expectedDeleted = []
        expectedNew = [QuorumId(100, int(third_quorum, 16))]
        quorumList = self.test_getmnlistdiff_quorums(baseBlockHash, self.nodes[0].getbestblockhash(), quorumList, expectedDeleted, expectedNew)
        baseBlockHash = self.nodes[0].getbestblockhash()

        fourth_quorum = self.mine_quorum()
        expectedDeleted = []
        expectedNew = [QuorumId(100, int(fourth_quorum, 16))]

        quorums_before_fifth = self.test_getmnlistdiff_quorums(baseBlockHash, self.nodes[0].getbestblockhash(), quorumList, expectedDeleted, expectedNew)
        block_before_fifth = self.nodes[0].getbestblockhash()

        fifth_quorum = self.mine_quorum()
        # Verify that the first quorum is deleted and the fifth quorum is added in MNLISTDIFF (the first got inactive)
        expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
        expectedNew = [QuorumId(100, int(fifth_quorum, 16))]
        self.test_getmnlistdiff_quorums(block_before_fifth, self.nodes[0].getbestblockhash(), quorums_before_fifth, expectedDeleted, expectedNew)

        # Verify that the diff between genesis and best block is the current active set (second and fifth quorum)
        expectedDeleted = []
        expectedNew = [QuorumId(100, int(second_quorum, 16)), QuorumId(100, int(third_quorum, 16)), QuorumId(100, int(fourth_quorum, 16)), QuorumId(100, int(fifth_quorum, 16))]
        self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)

        # Now verify that diffs are correct around the block that mined the fifth quorum.
        # This tests the logic in CalcCbTxMerkleRootQuorums, which has to manually add the commitment from the current
        # block
        mined_in_block = self.nodes[0].quorum_info(100, fifth_quorum)["minedBlock"]
        prev_block = self.nodes[0].getblock(mined_in_block)["previousblockhash"]
        prev_block2 = self.nodes[0].getblock(prev_block)["previousblockhash"]
        next_block = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
        next_block2 = self.nodes[0].getblock(next_block)["nextblockhash"]
        # The 2 block before the quorum was mined should both give an empty diff
        expectedDeleted = []
        expectedNew = []
        self.test_getmnlistdiff_quorums(block_before_fifth, prev_block2, quorums_before_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(block_before_fifth, prev_block, quorums_before_fifth, expectedDeleted, expectedNew)
        # The block in which the quorum was mined and the 2 after that should all give the same diff
        expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
        expectedNew = [QuorumId(100, int(fifth_quorum, 16))]
        quorums_with_fifth = self.test_getmnlistdiff_quorums(block_before_fifth, mined_in_block, quorums_before_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(block_before_fifth, next_block, quorums_before_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(block_before_fifth, next_block2, quorums_before_fifth, expectedDeleted, expectedNew)
        # A diff between the two block that happened after the quorum was mined should give an empty diff
        expectedDeleted = []
        expectedNew = []
        self.test_getmnlistdiff_quorums(mined_in_block, next_block, quorums_with_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(mined_in_block, next_block2, quorums_with_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(next_block, next_block2, quorums_with_fifth, expectedDeleted, expectedNew)

        # Using the same block for baseBlockHash and blockHash should give empty diffs
        self.test_getmnlistdiff_quorums(prev_block, prev_block, quorums_before_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(prev_block2, prev_block2, quorums_before_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(mined_in_block, mined_in_block, quorums_with_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(next_block, next_block, quorums_with_fifth, expectedDeleted, expectedNew)
        self.test_getmnlistdiff_quorums(next_block2, next_block2, quorums_with_fifth, expectedDeleted, expectedNew)