Exemplo n.º 1
0
    def wait_until(self, test_function_in, *, timeout=60, check_connected=True):
        def test_function():
            if check_connected:
                assert self.is_connected
            return test_function_in()

        wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
Exemplo n.º 2
0
 def close(self, timeout=10):
     """Close the connections and network event loop."""
     self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
     wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout)
     self.network_event_loop.close()
     self.join(timeout)
     # Safe to remove event loop.
     NetworkThread.network_event_loop = None
Exemplo n.º 3
0
 def wait_for_connect(self, timeout=60):
     test_function = lambda: self.is_connected
     wait_until_helper(test_function, timeout=timeout, lock=p2p_lock)
Exemplo n.º 4
0
    def test_spork(self, spork_name, final_value):
        # check test spork default state
        for node in self.nodes:
            assert (self.get_test_spork_value(node, spork_name) == 4070908800)

        self.bump_mocktime(1)
        # first and second signers set spork value
        self.nodes[0].spork(spork_name, 1)
        self.nodes[1].spork(spork_name, 1)
        self.bump_mocktime(5)
        # spork change requires at least 3 signers
        time.sleep(10)
        for node in self.nodes:
            assert (self.get_test_spork_value(node, spork_name) != 1)

        # restart with no extra args to trigger CheckAndRemove
        self.restart_node(0)
        assert (self.get_test_spork_value(self.nodes[0], spork_name) != 1)

        # restart again with correct_params, should resync spork parts from other nodes
        self.restart_node(0, self.node0_extra_args)
        for i in range(1, 5):
            self.connect_nodes(0, i)

        # third signer set spork value
        self.nodes[2].spork(spork_name, 1)
        self.bump_mocktime(6)
        time.sleep(5)
        # now spork state is changed
        for node in self.nodes:
            wait_until_helper(
                lambda: self.get_test_spork_value(node, spork_name) == 1,
                sleep=0.1,
                timeout=10)

        # restart with no extra args to trigger CheckAndRemove, should reset the spork back to its default
        self.restart_node(0)
        assert (self.get_test_spork_value(self.nodes[0],
                                          spork_name) == 4070908800)

        # restart again with correct_params, should resync sporks from other nodes
        self.restart_node(0, self.node0_extra_args)
        time.sleep(5)
        self.bump_mocktime(6)
        for i in range(1, 5):
            self.connect_nodes(0, i)

        wait_until_helper(
            lambda: self.get_test_spork_value(self.nodes[0], spork_name) == 1,
            sleep=0.1,
            timeout=10)

        self.bump_mocktime(1)
        # now set the spork again with other signers to test
        # old and new spork messages interaction
        self.nodes[2].spork(spork_name, final_value)
        self.nodes[3].spork(spork_name, final_value)
        self.nodes[4].spork(spork_name, final_value)
        for node in self.nodes:
            wait_until_helper(lambda: self.get_test_spork_value(
                node, spork_name) == final_value,
                              sleep=0.1,
                              timeout=10)
Exemplo n.º 5
0
    def run_test(self):
        self.sync_blocks(self.nodes, timeout=60 * 5)
        self.confirm_mns()
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        if self.options.spork21:
            self.nodes[0].spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
        self.wait_for_sporks_same()

        self.mine_quorum()
        if self.options.spork21:
            assert self.mninfo[0].node.getconnectioncount() == 5
        id = "0000000000000000000000000000000000000000000000000000000000000001"
        msgHash = "0000000000000000000000000000000000000000000000000000000000000002"
        msgHashConflict = "0000000000000000000000000000000000000000000000000000000000000003"

        def check_sigs(hasrecsigs, isconflicting1, isconflicting2):
            for mn in self.mninfo:
                if mn.node.quorum_hasrecsig(100, id, msgHash) != hasrecsigs:
                    return False
                if mn.node.quorum_isconflicting(100, id,
                                                msgHash) != isconflicting1:
                    return False
                if mn.node.quorum_isconflicting(
                        100, id, msgHashConflict) != isconflicting2:
                    return False
            return True

        def wait_for_sigs(hasrecsigs, isconflicting1, isconflicting2, timeout):
            t = time.time()
            while time.time() - t < timeout:
                if check_sigs(hasrecsigs, isconflicting1, isconflicting2):
                    return
                self.bump_mocktime(2)
                time.sleep(1)
            raise AssertionError("wait_for_sigs timed out")

        def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2,
                                 timeout):
            t = time.time()
            while time.time() - t < timeout:
                assert (check_sigs(hasrecsigs, isconflicting1, isconflicting2))
                time.sleep(0.1)

        # Initial state
        wait_for_sigs(False, False, False, 1)
        # Sign first share without any optional parameter, should not result in recovered sig
        self.mninfo[0].node.quorum_sign(100, id, msgHash)

        assert_sigs_nochange(False, False, False, 3)
        # Sign second share and test optional quorumHash parameter, should not result in recovered sig

        # 1. Providing an invalid quorum hash should fail and cause no changes for sigs
        assert (not self.mninfo[1].node.quorum_sign(100, id, msgHash, msgHash))
        assert_sigs_nochange(False, False, False, 3)
        # 2. Providing a valid quorum hash should succeed and cause no changes for sigss
        quorumHash = self.mninfo[1].node.quorum_selectquorum(100,
                                                             id)["quorumHash"]
        assert (self.mninfo[1].node.quorum_sign(100, id, msgHash, quorumHash))
        assert_sigs_nochange(False, False, False, 3)
        # Sign third share and test optional submit parameter if spork21 is enabled, should result in recovered sig
        # and conflict for msgHashConflict
        if self.options.spork21:
            # 1. Providing an invalid quorum hash and set submit=false, should throw an error
            assert_raises_rpc_error(-8, 'quorum not found',
                                    self.mninfo[2].node.quorum_sign, 100, id,
                                    msgHash, id, False)
            # 2. Providing a valid quorum hash and set submit=false, should return a valid sigShare object
            sig_share_rpc_1 = self.mninfo[2].node.quorum_sign(
                100, id, msgHash, quorumHash, False)
            sig_share_rpc_2 = self.mninfo[2].node.quorum_sign(
                100, id, msgHash, "", False)
            assert_equal(sig_share_rpc_1, sig_share_rpc_2)
            assert_sigs_nochange(False, False, False, 3)
            # 3. Sending the sig share received from RPC to the recovery member through P2P interface, should result
            # in a recovered sig
            sig_share = CSigShare()
            sig_share.llmqType = int(sig_share_rpc_1["llmqType"])
            sig_share.quorumHash = int(sig_share_rpc_1["quorumHash"], 16)
            sig_share.quorumMember = int(sig_share_rpc_1["quorumMember"])
            sig_share.id = int(sig_share_rpc_1["id"], 16)
            sig_share.msgHash = int(sig_share_rpc_1["msgHash"], 16)
            sig_share.sigShare = hex_str_to_bytes(sig_share_rpc_1["signature"])
            for i in range(len(self.mninfo)):
                assert self.mninfo[i].node.getconnectioncount() == 5
            # Get the current recovery member of the quorum
            q = self.nodes[0].quorum_selectquorum(100, id)
            mn = self.get_mninfo(q['recoveryMembers'][0])
            # Open a P2P connection to it
            p2p_interface = mn.node.add_p2p_connection(P2PInterface())
            # Send the last required QSIGSHARE message to the recovery member
            p2p_interface.send_message(msg_qsigshare([sig_share]))
        else:
            # If spork21 is not enabled just sign regularly
            self.mninfo[2].node.quorum_sign(100, id, msgHash)

        wait_for_sigs(True, False, True, 15)

        self.bump_mocktime(5)
        wait_for_sigs(True, False, True, 15)
        if self.options.spork21:
            mn.node.disconnect_p2ps()
        # Test `quorum verify` rpc
        node = self.mninfo[0].node
        recsig = node.quorum_getrecsig(100, id, msgHash)
        # Find quorum automatically
        height = node.getblockcount()
        height_bad = node.getblockheader(recsig["quorumHash"])["height"]
        hash_bad = node.getblockhash(0)
        assert (node.quorum_verify(100, id, msgHash, recsig["sig"]))
        assert (node.quorum_verify(100, id, msgHash, recsig["sig"], "",
                                   height))
        assert (not node.quorum_verify(100, id, msgHashConflict,
                                       recsig["sig"]))
        assert not node.quorum_verify(100, id, msgHash, recsig["sig"], "",
                                      height_bad)
        # Use specific quorum
        assert (node.quorum_verify(100, id, msgHash, recsig["sig"],
                                   recsig["quorumHash"]))
        assert (not node.quorum_verify(100, id, msgHashConflict, recsig["sig"],
                                       recsig["quorumHash"]))
        assert_raises_rpc_error(-8, "quorum not found", node.quorum_verify,
                                100, id, msgHash, recsig["sig"], hash_bad)

        # Mine one more quorum, so that we have 2 active ones, nothing should change
        self.mine_quorum()
        assert_sigs_nochange(True, False, True, 3)

        # Create a recovered sig for the oldest quorum i.e. the active quorum which will be moved
        # out of the active set when a new quorum appears
        request_id = 2
        oldest_quorum_hash = node.quorum_list()["llmq_test"][-1]
        # Search for a request id which selects the last active quorum
        while True:
            selected_hash = node.quorum_selectquorum(100, "%064x" %
                                                     request_id)["quorumHash"]
            if selected_hash == oldest_quorum_hash:
                break
            else:
                request_id += 1
        # Produce the recovered signature
        id = "%064x" % request_id
        for mn in self.mninfo:
            mn.node.quorum_sign(100, id, msgHash)
        # And mine a quorum to move the quorum which signed out of the active set
        self.mine_quorum()
        # Verify the recovered sig. This triggers the "signHeight + dkgInterval" verification
        recsig = node.quorum_getrecsig(100, id, msgHash)
        assert node.quorum_verify(100, id, msgHash, recsig["sig"], "",
                                  node.getblockcount())

        recsig_time = self.mocktime

        # Mine 4 more quorums, so that the one used for the the recovered sig should become inactive, nothing should change
        self.mine_quorum()
        self.mine_quorum()
        self.mine_quorum()
        self.mine_quorum()
        assert_sigs_nochange(True, False, True, 3)

        # fast forward until 0.5 days before cleanup is expected, recovered sig should still be valid
        self.bump_mocktime(recsig_time + int(60 * 60 * 24 * 6.5) -
                           self.mocktime)
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].generate(1)
        self.sync_blocks()
        self.bump_mocktime(5)
        # Cleanup starts every 5 seconds
        wait_for_sigs(True, False, True, 15)
        # fast forward 1 day, recovered sig should not be valid anymore
        self.bump_mocktime(int(60 * 60 * 24 * 1))
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        self.nodes[0].generate(1)
        self.sync_blocks()
        self.bump_mocktime(5)
        # Cleanup starts every 5 seconds
        wait_for_sigs(False, False, False, 15)
        for i in range(len(self.nodes)):
            force_finish_mnsync(self.nodes[i])
        for i in range(2):
            self.mninfo[i].node.quorum_sign(100, id, msgHashConflict)
        for i in range(2, 5):
            self.mninfo[i].node.quorum_sign(100, id, msgHash)
        self.nodes[0].generate(1)
        self.sync_blocks()
        self.bump_mocktime(5)
        wait_for_sigs(True, False, True, 15)

        if self.options.spork21:
            id = "%064x" % (request_id + 1)

            # Isolate the node that is responsible for the recovery of a signature and assert that recovery fails
            q = self.nodes[0].quorum_selectquorum(100, id)
            mn = self.get_mninfo(q['recoveryMembers'][0])
            mn.node.setnetworkactive(False)
            self.wait_until(lambda: mn.node.getconnectioncount() == 0)
            for i in range(4):
                self.mninfo[i].node.quorum_sign(100, id, msgHash)
            assert_sigs_nochange(False, False, False, 3)
            # Need to re-connect so that it later gets the recovered sig
            mn.node.setnetworkactive(True)
            self.connect_nodes(mn.node.index, 0)
            force_finish_mnsync(mn.node)
            # Make sure intra-quorum connections were also restored
            self.bump_mocktime(
                1)  # need this to bypass quorum connection retry timeout
            wait_until_helper(lambda: mn.node.getconnectioncount() == 5,
                              timeout=10,
                              sleep=2)
            mn.node.ping()
            self.wait_until(lambda: all('pingwait' not in peer
                                        for peer in mn.node.getpeerinfo()))
            self.nodes[0].generate(1)
            self.sync_blocks()
            self.bump_mocktime(5)
            wait_for_sigs(True, False, True, 15)
Exemplo n.º 6
0
 def getBlock(self, blkHash):
     self.block = None
     inv = CInv(t=2, h=int(blkHash, 16))
     self.send_message(msg_getdata(inv=[inv]))
     wait_until_helper(lambda: self.block is not None)
     return self.block
Exemplo n.º 7
0
    def run_test(self):

        # Connect all nodes to node1 so that we always have the whole network connected
        # Otherwise only masternode connections will be established between nodes, which won't propagate TXs/blocks
        # Usually node0 is the one that does this, but in this test we isolate it multiple times
        for i in range(len(self.nodes)):
            if i != 1:
                self.connect_nodes(i, 1)
        self.nodes[0].generate(10)
        self.sync_blocks(self.nodes, timeout=60 * 5)
        self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
        self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 4070908800)
        self.wait_for_sporks_same()

        self.log.info("Mining 4 quorums")
        for i in range(4):
            self.mine_quorum()
        self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
        self.wait_for_sporks_same()
        self.log.info("Mine single block, wait for chainlock")
        self.nodes[0].generate(1)
        self.wait_for_chainlocked_block_all_nodes(
            self.nodes[0].getbestblockhash())

        self.log.info("Mine many blocks, wait for chainlock")
        self.nodes[0].generate(20)
        # We need more time here due to 20 blocks being generated at once
        self.wait_for_chainlocked_block_all_nodes(
            self.nodes[0].getbestblockhash(), timeout=30)

        self.log.info(
            "Assert that all blocks up until the tip are chainlocked")
        for h in range(1, self.nodes[0].getblockcount()):
            block = self.nodes[0].getblock(self.nodes[0].getblockhash(h))
            assert (block['chainlock'])

        self.log.info("Isolate node, mine on another, and reconnect")
        self.isolate_node(self.nodes[0])
        node0_mining_addr = self.nodes[0].getnewaddress()
        node0_tip = self.nodes[0].getbestblockhash()
        self.nodes[1].generatetoaddress(5, node0_mining_addr)
        self.wait_for_chainlocked_block(self.nodes[1],
                                        self.nodes[1].getbestblockhash())
        assert (self.nodes[0].getbestblockhash() == node0_tip)
        self.nodes[0].setnetworkactive(True)
        for i in range(self.num_nodes - 1):
            self.connect_nodes(0, i + 1)
        self.nodes[1].generatetoaddress(1, node0_mining_addr)
        self.wait_for_chainlocked_block(self.nodes[0],
                                        self.nodes[1].getbestblockhash())

        self.log.info(
            "Isolate node, mine on both parts of the network, and reconnect")
        self.isolate_node(self.nodes[0])
        bad_tip = self.nodes[0].generate(5)[-1]
        self.nodes[1].generatetoaddress(1, node0_mining_addr)
        good_tip = self.nodes[1].getbestblockhash()
        self.wait_for_chainlocked_block(self.nodes[1], good_tip)
        assert (not self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())["chainlock"])
        self.nodes[0].setnetworkactive(True)
        for i in range(self.num_nodes - 1):
            self.connect_nodes(0, i + 1)
        self.nodes[1].generatetoaddress(1, node0_mining_addr)
        self.wait_for_chainlocked_block(self.nodes[0],
                                        self.nodes[1].getbestblockhash())
        assert (self.nodes[0].getblock(
            self.nodes[0].getbestblockhash())["previousblockhash"] == good_tip)
        assert (self.nodes[1].getblock(
            self.nodes[1].getbestblockhash())["previousblockhash"] == good_tip)

        self.log.info(
            "The tip mined while this node was isolated should be marked conflicting now"
        )
        found = False
        for tip in self.nodes[0].getchaintips():
            if tip["hash"] == bad_tip:
                assert (tip["status"] == "conflicting")
                found = True
                break
        assert (found)

        def wait_for_headers():
            if self.nodes[1].getconnectioncount() == node1_num_peers_before:
                return False
            if self.nodes[1].getpeerinfo(
            )[-1]["synced_headers"] < self.nodes[0].getblockcount():
                return False
            return True

        self.log.info("Keep node connected and let it try to reorg the chain")
        good_tip = self.nodes[0].getbestblockhash()
        self.log.info(
            "Restart it so that it forgets all the chainlock messages from the past"
        )
        self.stop_node(0)
        self.start_node(0)
        node1_num_peers_before = self.nodes[1].getconnectioncount()
        self.connect_nodes(0, 1)
        # We need to wait for nodes to exchange headers first because
        # node1 won't recognize node0 as a good peer to send new blocks to otherwise.
        wait_until_helper(wait_for_headers, sleep=1, timeout=5)
        assert (self.nodes[0].getbestblockhash() == good_tip)
        self.nodes[0].invalidateblock(good_tip)
        self.log.info("Now try to reorg the chain")
        self.nodes[0].generate(2)
        time.sleep(6)
        assert (self.nodes[1].getbestblockhash() == good_tip)
        bad_tip = self.nodes[0].generate(2)[-1]
        time.sleep(6)
        assert (self.nodes[0].getbestblockhash() == bad_tip)
        assert (self.nodes[1].getbestblockhash() == good_tip)

        self.log.info(
            "Now let the node which is on the wrong chain reorg back to the locked chain"
        )
        self.nodes[0].reconsiderblock(good_tip)
        assert (self.nodes[0].getbestblockhash() != good_tip)
        good_fork = good_tip
        good_tip = self.nodes[1].generatetoaddress(
            1,
            node0_mining_addr)[-1]  # this should mark bad_tip as conflicting
        self.wait_for_chainlocked_block(self.nodes[0], good_tip)
        assert (self.nodes[0].getbestblockhash() == good_tip)
        found = False
        for tip in self.nodes[0].getchaintips():
            if tip["hash"] == bad_tip:
                assert (tip["status"] == "conflicting")
                found = True
                break
        assert (found)

        self.log.info(
            "Should switch to the best non-conflicting tip (not to the most work chain) on restart"
        )
        assert (int(self.nodes[0].getblock(bad_tip)["chainwork"], 16) > int(
            self.nodes[1].getblock(good_tip)["chainwork"], 16))
        self.stop_node(0)
        self.start_node(0)
        self.nodes[0].invalidateblock(good_fork)
        self.stop_node(0)
        self.start_node(0)
        time.sleep(1)
        assert (self.nodes[0].getbestblockhash() == good_tip)

        txs = []
        for i in range(3):
            txs.append(self.nodes[0].sendtoaddress(
                self.nodes[0].getnewaddress(), 1))
        txs += self.create_chained_txs(self.nodes[0], 1)
        self.log.info(
            "Assert that after block generation these TXs are included")
        node0_tip = self.nodes[0].generate(1)[-1]
        for txid in txs:
            tx = self.nodes[0].getrawtransaction(txid, True)
            assert ("confirmations" in tx)
        time.sleep(1)
        node0_tip_block = self.nodes[0].getblock(node0_tip)
        assert (not node0_tip_block["chainlock"])
        assert (node0_tip_block["previousblockhash"] == good_tip)
        self.nodes[0].generate(1)
        self.log.info("Assert that TXs got included now")
        for txid in txs:
            tx = self.nodes[0].getrawtransaction(txid, True)
            assert ("confirmations" in tx and tx["confirmations"] > 0)
        # Enable network on first node again, which will cause the blocks to propagate
        # for the mined TXs, which will then allow the network to create a CLSIG
        self.log.info("Re-enable network on first node and wait for chainlock")
        self.nodes[0].setnetworkactive(True)
        for i in range(self.num_nodes - 1):
            self.connect_nodes(0, i + 1)

        self.log.info(
            "Send fake future clsigs and see if this breaks ChainLocks")
        for i in range(len(self.nodes)):
            if i != 0:
                self.connect_nodes(i, 0)
        SIGN_HEIGHT_OFFSET = 20
        p2p_node = self.nodes[0].add_p2p_connection(TestP2PConn())
        p2p_node.wait_for_verack()
        self.wait_for_chainlocked_block_all_nodes(
            self.nodes[0].getbestblockhash(), timeout=30)
        self.log.info(
            "Should accept fake clsig but other quorums should sign the actual block on the same height and override the malicious one"
        )
        fake_clsig1, fake_block_hash1 = self.create_fake_clsig(1)
        p2p_node.send_clsig(fake_clsig1)
        self.bump_mocktime(5, nodes=self.nodes)
        time.sleep(5)
        for node in self.nodes:
            self.wait_for_most_recent_chainlock(node,
                                                fake_block_hash1,
                                                timeout=15)
        tip = self.nodes[0].generate(1)[-1]
        self.wait_for_chainlocked_block_all_nodes(tip, timeout=5)
        self.log.info(
            "Shouldn't accept fake clsig for 'tip + SIGN_HEIGHT_OFFSET + 1' block height"
        )
        fake_clsig2, fake_block_hash2 = self.create_fake_clsig(
            SIGN_HEIGHT_OFFSET + 1)
        p2p_node.send_clsig(fake_clsig2)
        self.bump_mocktime(7, nodes=self.nodes)
        time.sleep(5)
        for node in self.nodes:
            assert (self.nodes[0].getchainlocks()["recent_chainlock"]
                    ["blockhash"] == tip)
            assert (self.nodes[0].getchainlocks()["active_chainlock"]
                    ["blockhash"] == tip)
        self.log.info(
            "Should accept fake clsig for 'tip + SIGN_HEIGHT_OFFSET' but new clsigs should still be formed"
        )
        fake_clsig3, fake_block_hash3 = self.create_fake_clsig(
            SIGN_HEIGHT_OFFSET)
        p2p_node.send_clsig(fake_clsig3)
        self.bump_mocktime(7, nodes=self.nodes)
        time.sleep(5)
        for node in self.nodes:
            self.wait_for_most_recent_chainlock(node,
                                                fake_block_hash3,
                                                timeout=15)
        tip = self.nodes[0].generate(1)[-1]
        self.bump_mocktime(7, nodes=self.nodes)
        self.wait_for_chainlocked_block_all_nodes(tip, timeout=15)
        self.nodes[0].disconnect_p2ps()