Esempio n. 1
0
        def calc_vote_relay_delay(record_to):
            # UNIT-E TODO: node can't vote when it processed the checkpoint
            # so we create one extra block to pass that. See https://github.com/dtr-org/unit-e/issues/643
            generate_block(middle)

            # ensure all nodes are synced before recording the delay
            sync_blocks([middle, record_to], timeout=10)
            sync_mempools([middle, record_to], timeout=10)
            assert_equal(len(new_votes_in_mempool(middle)), 0)

            # ensure that record_from node receives the block earlier than the vote
            disconnect_nodes(middle, finalizer.index)
            generate_block(middle)
            connect_nodes(middle, finalizer.index)

            wait_until(lambda: len(new_votes_in_mempool(middle)) > 0, timeout=10)

            now = time.perf_counter()
            sync_mempools([middle, record_to], wait=0.05, timeout=10)
            delay = time.perf_counter() - now

            new_votes = new_votes_in_mempool(middle)
            assert_equal(len(new_votes), 1)
            new_vote = new_votes[0]
            vote_tx_ids.add(new_vote)

            # sanity check: tx we measured is a vote tx
            tx = FromHex(CTransaction(), middle.getrawtransaction(new_vote))
            assert_equal(tx.get_type(), TxType.VOTE)

            self.log.debug("Vote(%s) propagated from %d to %d in %0.3f seconds"
                           % (new_vote, middle.index, record_to.index, delay))

            return delay
Esempio n. 2
0
    def setup_deposit(self):
        deposit_tx = self.finalizer.deposit(
            self.finalizer.getnewaddress("", "legacy"), MIN_DEPOSIT)
        self.wait_for_transaction(deposit_tx)

        generate_block(self.proposer, count=14)
        assert_equal(self.proposer.getblockcount(), 15)
Esempio n. 3
0
    def setup_deposit(self, proposer, finalizers):
        for f in finalizers:
            f.new_address = f.getnewaddress("", "legacy")
            assert_equal(f.getbalance(), 10000)

        for f in finalizers:
            deptx = f.deposit(f.new_address, 1500)
            self.wait_for_transaction(deptx, nodes=[proposer])

        generate_block(proposer, count=14)
        assert_equal(proposer.getblockcount(), 15)
def setup_deposit(self, proposer, validators):
    for i, n in enumerate(validators):
        n.new_address = n.getnewaddress("", "legacy")

        assert_equal(n.getbalance(), 10000)

    for n in validators:
        deptx = n.deposit(n.new_address, 1500)
        self.wait_for_transaction(deptx, nodes=[proposer])

    generate_block(proposer, count=21)

    assert_equal(proposer.getblockcount(), 22)
Esempio n. 5
0
def setup_deposit(self, proposer, validators):
    for _, n in enumerate(validators):
        n.new_address = n.getnewaddress("", "legacy")
        assert_equal(n.getbalance(), 10000)

    for n in validators:
        deptx = n.deposit(n.new_address, 1500)
        self.wait_for_transaction(deptx)

    generate_block(proposer, count=24)
    assert_equal(proposer.getblockcount(), 25)
    sync_blocks(validators + [proposer])
    for v in validators:
        disconnect_nodes(proposer, v.index)
 def generate_epoch_and_vote(node, finalizer, finalizer_address, prevtx):
     assert node.getblockcount() % 5 == 0
     fs = node.getfinalizationstate()
     checkpoint = node.getbestblockhash()
     generate_block(node)
     vtx = make_vote_tx(finalizer, finalizer_address, checkpoint,
                        source_epoch=fs['lastJustifiedEpoch'],
                        target_epoch=fs['currentEpoch'],
                        input_tx_id=prevtx)
     node.sendrawtransaction(vtx)
     generate_block(node, count=4)
     vtx = FromHex(CTransaction(), vtx)
     vtx.rehash()
     return vtx.hash
Esempio n. 7
0
def setup_deposit(self, proposer, validators):
    for i, n in enumerate(validators):
        n.new_address = n.getnewaddress("", "legacy")

        assert_equal(n.getbalance(), 10000)

    for n in validators:
        deptx = n.deposit(n.new_address, 1500)
        self.wait_for_transaction(deptx)

    # the validator will be ready to operate in epoch 4
    # TODO: UNIT - E: it can be 2 epochs as soon as #572 is fixed
    generate_block(proposer, count=30)

    assert_equal(proposer.getblockcount(), 31)
Esempio n. 8
0
    def test_fallback_to_ibd(self):
        """
        This test checks that node can fallback to Initial Block Download
        if its peers can't provide the snapshot
        """
        full_node = self.nodes[4]
        sync_node = self.nodes[5]

        self.setup_stake_coins(full_node)

        generate_block(full_node, count=5)
        wait_until(lambda: len(full_node.listsnapshots()) == 1, timeout=10)
        for res in full_node.listsnapshots():
            full_node.deletesnapshot(res['snapshot_hash'])

        connect_nodes(sync_node, full_node.index)
        sync_blocks([sync_node, full_node])
        assert_equal(sync_node.gettxoutsetinfo(), full_node.gettxoutsetinfo())
        for height in range(0, 6):
            block_hash = sync_node.getblockhash(height)
            block = sync_node.getblock(block_hash)
            assert_equal(block['hash'], block_hash)

        self.log.info('Test fallback to IBD passed')
Esempio n. 9
0
    def run_test(self):
        # Check finalizer can vote after restart
        p, v = self.nodes
        self.setup_stake_coins(p, v)
        self.generate_sync(p)

        self.log.info("Setup deposit")
        v.new_address = v.getnewaddress("", "legacy")
        tx = v.deposit(v.new_address, 1500)
        self.wait_for_transaction(tx)
        generate_block(p)
        sync_blocks([p, v])

        self.log.info("Restart validator")
        self.restart_node(v.index)

        self.log.info("Leave insta justification")
        for _ in range(24):
            generate_block(p)
        assert_equal(p.getblockcount(), 26)
        assert_finalizationstate(
            p, {
                "currentEpoch": 6,
                "lastJustifiedEpoch": 4,
                "lastFinalizedEpoch": 3,
                "validators": 1
            })

        self.log.info("Check finalizer votes after restart")
        self.wait_for_vote_and_disconnect(finalizer=v, node=p)
        generate_block(p)

        assert_equal(p.getblockcount(), 27)
        assert_finalizationstate(
            p, {
                "currentEpoch": 6,
                "lastJustifiedEpoch": 5,
                "lastFinalizedEpoch": 4,
                "validators": 1
            })
Esempio n. 10
0
    def run_test(self):
        node0 = self.nodes[0]
        node1 = self.nodes[1]
        node2 = self.nodes[2]
        finalizer = self.nodes[3]

        self.setup_stake_coins(node0, node1)

        # leave IBD
        # e0 - e1[1] node1, node2, node3, finalizer
        generate_block(node0)
        assert_equal(node0.getblockcount(), 1)
        sync_blocks([node0, node1, finalizer], timeout=10)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'NOT_VALIDATING')
        self.log.info('started epoch=1')

        # disconnect node1 to be able to revert finalizer's and node2's UTXOs later
        # e0 - e1[1] node0, node2, finalizer
        #         |
        #         - node1
        disconnect_nodes(node0, node1.index)

        # transfer amount to node2 which will be used to create an UTXO for finalizer
        #            t1
        # e0 - e1[1, 2] node0, node2, finalizer
        #         |
        #         - node1
        a1 = node2.getnewaddress('', 'bech32')
        t1 = node0.sendtoaddress(a1, 5000)
        assert t1 in node0.getrawmempool()
        generate_block(node0)
        assert_equal(node0.getblockcount(), 2)
        sync_blocks([node0, node2, finalizer], timeout=10)
        assert_equal(node2.getbalance(), 5000)
        self.log.info('created UTXO for node2')

        # receive amount for deposit
        #            t1 t2
        # e0 - e1[1, 2, 3] node0, node2, finalizer
        #         |
        #         - node1
        a2 = finalizer.getnewaddress('', 'bech32')
        t2 = node2.sendtoaddress(a2, 2000)
        assert t2 in node2.getrawmempool()
        wait_until(lambda: t2 in node0.getrawmempool(), timeout=150)
        generate_block(node0)
        sync_blocks([node0, node2, finalizer], timeout=10)
        assert_equal(node0.getblockcount(), 3)
        assert_equal(finalizer.getbalance(), 2000)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'NOT_VALIDATING')
        self.log.info('created UTXO for finalizer1')

        # disconnect node2 to be able to revert deposit
        #            t1 t2
        # e0 - e1[1, 2, 3] node0, finalizer
        #         |     |
        #         |     - node2
        #         - node1
        disconnect_nodes(node0, node2.index)

        # create deposit
        #            t1 t2 d1
        # e0 - e1[1, 2, 3, 4] node0, finalizer
        #         |     |
        #         |     - node2
        #         - node1
        a3 = finalizer.getnewaddress('', 'legacy')
        d1 = finalizer.deposit(a3, 1500)
        wait_until(lambda: d1 in node0.getrawmempool(), timeout=20)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_CONFIRMATION')
        self.log.info('validator_status is correct after creating deposit')

        generate_block(node0)
        assert_equal(node0.getblockcount(), 4)
        sync_blocks([node0, finalizer], timeout=10)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_FINALIZATION')
        self.log.info('validator_status is correct after mining deposit')

        # revert deposit
        #            t1 t2 d1
        # e0 - e1[1, 2, 3, 4] node0
        #         |     |
        #         |     -- 4, 5] node2, finalizer
        #         - node1
        generate_block(node2, count=2)
        assert_equal(node2.getblockcount(), 5)
        disconnect_nodes(node0, finalizer.index)
        connect_nodes(node2, finalizer.index)
        sync_blocks([node2, finalizer], timeout=10)
        assert_equal(finalizer.getblockcount(), 5)

        # we re-org'ed deposit but since the finalizer keeps
        # its txs in mempool, status must be WAITING_DEPOSIT_CONFIRMATION
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_CONFIRMATION')
        assert_equal(finalizer.gettransaction(d1)['txid'], d1)

        assert d1 in finalizer.resendwallettransactions()
        wait_until(lambda: d1 in finalizer.getrawmempool(), timeout=10)
        wait_until(lambda: d1 in node2.getrawmempool(), timeout=10)
        disconnect_nodes(node2, finalizer.index)
        self.log.info('validator_status is correct after reverting deposit')

        # revert UTXOs that lead to deposit
        #            t1 t2 d1
        # e0 - e1[1, 2, 3, 4] node0
        #         |     |
        #         |     -- 4, 5] node2
        #         -- 2, 3, 4, 5, 6] node1, finalizer
        generate_block(node1, count=5)
        assert_equal(node1.getblockcount(), 6)
        connect_nodes(node1, finalizer.index)
        sync_blocks([node1, finalizer], timeout=10)
        assert_equal(finalizer.getblockcount(), 6)

        # we re-org'ed all txs but since they are in mempool
        # validator_status shouldn't change
        wait_until(lambda: d1 in finalizer.getrawmempool(), timeout=10)
        wait_until(lambda: t2 in finalizer.getrawmempool(), timeout=10)
        wait_until(lambda: t1 in finalizer.getrawmempool(), timeout=10)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_CONFIRMATION')
        self.log.info(
            'validator_status is correct after reverting all txs that led to deposit'
        )

        # remove mempool.dat to simulate tx eviction
        # this keeps the same phase as wallet knows about its transactions
        # and expects to resend them once evicted txs are in the block
        self.stop_node(finalizer.index)
        os.remove(os.path.join(finalizer.datadir, "regtest", "mempool.dat"))
        self.start_node(finalizer.index, [ESPERANZA_CONFIG, '-validating=1'])
        wait_until(lambda: finalizer.getblockcount(), timeout=10)
        assert_equal(len(finalizer.getrawmempool()), 0)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_CONFIRMATION')
        self.log.info('validator_status is correct after removing mempool.dat')

        # add missing UTXOs and deposit to the node1 fork
        #            t1 t2 d1
        # e0 - e1[1, 2, 3, 4] node0
        #         |     |
        #         |     -- 4, 5] node2
        #         |                 t1 t2 d1
        #         -- 2, 3, 4, 5, 6, 7       ] node1, finalizer
        assert_equal(node1.sendrawtransaction(node0.getrawtransaction(t1)), t1)
        assert_equal(node1.sendrawtransaction(node0.getrawtransaction(t2)), t2)
        assert_equal(node1.sendrawtransaction(node0.getrawtransaction(d1)), d1)
        generate_block(node1)
        connect_nodes(node1, finalizer.index)
        sync_blocks([node1, finalizer], timeout=10)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_FINALIZATION')
        disconnect_nodes(node1, finalizer.index)
        self.log.info(
            'validator_status is correct after re-mining txs on a different fork'
        )

        # finalize deposit and re-org to that fork
        #            t1 t2 d1
        # e0 - e1[1, 2, 3, 4, ...           ] - ... - e4[31] node0, finalizer
        #         |     |
        #         |     -- 4, 5] node2
        #         |                 t1 t2 d1
        #         -- 2, 3, 4, 5, 6, 7       ] node1
        generate_block(node0, count=6)
        assert_equal(node0.getblockcount(), 10)
        assert_equal(node0.getfinalizationstate()['currentDynasty'], 0)
        for _ in range(2):
            generate_block(node0, count=10)
        assert_equal(node0.getblockcount(), 30)
        assert_equal(node0.getfinalizationstate()['currentDynasty'], 1)

        connect_nodes(node0, finalizer.index)
        sync_blocks([node0, finalizer], timeout=60)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_FINALIZATION')

        generate_block(node0)
        assert_equal(node0.getfinalizationstate()['currentDynasty'], 2)
        sync_blocks([node0, finalizer], timeout=10)
        assert_equal(finalizer.getvalidatorinfo()['validator_status'],
                     'IS_VALIDATING')
        self.log.info(
            'validator_status is correct after re-organizing to the fork of finalized deposit'
        )
Esempio n. 11
0
    def run_test(self):
        # used to de-duplicate tx ids
        vote_tx_ids = set()

        inbound = self.nodes[0]
        middle = self.nodes[1]
        outbound = self.nodes[2]
        finalizer = self.nodes[3]

        self.setup_stake_coins(middle, finalizer)

        # create network topology where arrows denote the connection direction:
        #         finalizer
        #             ↑
        # inbound → middle → outbound
        connect_nodes(inbound, middle.index)
        connect_nodes(middle, outbound.index)
        connect_nodes(middle, finalizer.index)

        self.log.info('Topology of the network is configured')

        def mean(l):
            return sum(l) / len(l)

        def median(l):
            assert_equal(len(l) % 2, 1)
            return sorted(l)[int(len(l) / 2)]

        def new_votes_in_mempool(node):
            mempool = node.getrawmempool()
            return [txid for txid in mempool if txid not in vote_tx_ids]

        def calc_vote_relay_delay(record_to):
            # UNIT-E TODO: node can't vote when it processed the checkpoint
            # so we create one extra block to pass that. See https://github.com/dtr-org/unit-e/issues/643
            generate_block(middle)

            # ensure all nodes are synced before recording the delay
            sync_blocks([middle, record_to], timeout=10)
            sync_mempools([middle, record_to], timeout=10)
            assert_equal(len(new_votes_in_mempool(middle)), 0)

            # ensure that record_from node receives the block earlier than the vote
            disconnect_nodes(middle, finalizer.index)
            generate_block(middle)
            connect_nodes(middle, finalizer.index)

            wait_until(lambda: len(new_votes_in_mempool(middle)) > 0,
                       timeout=10)

            now = time.perf_counter()
            sync_mempools([middle, record_to], wait=0.05, timeout=10)
            delay = time.perf_counter() - now

            new_votes = new_votes_in_mempool(middle)
            assert_equal(len(new_votes), 1)
            new_vote = new_votes[0]
            vote_tx_ids.add(new_vote)

            # sanity check: tx we measured is a vote tx
            tx = FromHex(CTransaction(), middle.getrawtransaction(new_vote))
            assert_equal(tx.get_type(), TxType.VOTE)

            self.log.debug(
                "Vote(%s) propagated from %d to %d in %0.3f seconds" %
                (new_vote, middle.index, record_to.index, delay))

            return delay

        # leave IBD
        generate_block(middle)
        sync_blocks(self.nodes, timeout=10)

        # disable instant finalization
        payto = finalizer.getnewaddress('', 'legacy')
        txid = finalizer.deposit(payto, 1500)
        self.wait_for_transaction(txid, timeout=10)

        generate_block(middle, count=8)
        assert_equal(middle.getblockcount(), 9)
        assert_equal(middle.getfinalizationstate()['currentEpoch'], 5)
        sync_blocks(self.nodes, timeout=10)

        # record relay time of the vote transaction to the outbound peer
        outbound_vote_delays = []
        for _ in range(TEST_SAMPLES):
            delay = calc_vote_relay_delay(outbound)
            outbound_vote_delays.append(delay)

        self.log.info(
            'Test outbound vote relay %d times. mean: %0.3f s, median:'
            ' %0.3f s, min: %0.3f s, max: %0.3f s', TEST_SAMPLES,
            mean(outbound_vote_delays), median(outbound_vote_delays),
            min(outbound_vote_delays), max(outbound_vote_delays))

        # record relay time of the vote transaction to the inbound peer
        inbound_vote_delays = []
        for _ in range(TEST_SAMPLES):
            delay = calc_vote_relay_delay(inbound)
            inbound_vote_delays.append(delay)

        self.log.info(
            'Test inbound vote relay %d times. mean: %0.3f s, median: '
            '%0.3f s, min: %0.3f s, max: %0.3f s', TEST_SAMPLES,
            mean(inbound_vote_delays), median(inbound_vote_delays),
            min(inbound_vote_delays), max(inbound_vote_delays))

        assert_less_than(mean(outbound_vote_delays),
                         VOTE_PROPAGATION_THRESHOLD_SEC)
        assert_less_than(mean(inbound_vote_delays),
                         VOTE_PROPAGATION_THRESHOLD_SEC)
Esempio n. 12
0
    def test_double_votes(self):
        fork1 = self.nodes[0]
        fork2 = self.nodes[1]
        finalizer1 = self.nodes[2]
        finalizer2 = self.nodes[3]

        self.setup_stake_coins(fork1, fork2, finalizer1)

        # clone finalizer1
        finalizer2.importmasterkey(finalizer1.mnemonics)

        # leave IBD
        generate_block(fork1)
        sync_blocks([fork1, fork2, finalizer1, finalizer2])

        disconnect_nodes(fork1, finalizer2.index)
        addr = finalizer1.getnewaddress('', 'legacy')
        txid1 = finalizer1.deposit(addr, 1500)
        wait_until(lambda: txid1 in fork1.getrawmempool())

        txid2 = finalizer2.deposit(addr, 1500)
        assert_equal(txid1, txid2)
        connect_nodes(fork1, finalizer2.index)

        generate_block(fork1)
        sync_blocks([fork1, fork2, finalizer1, finalizer2])
        disconnect_nodes(fork1, finalizer1.index)
        disconnect_nodes(fork1, finalizer2.index)

        # pass instant finalization
        # F    F    F
        # e0 - e1 - e2 - e3 - e4[16] fork1, fork2
        generate_block(fork1, count=3 + 5 + 5 + 1)
        assert_equal(fork1.getblockcount(), 16)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 4,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        # change topology where forks are not connected
        # finalizer1 → fork1
        #
        # finalizer2 → fork2
        sync_blocks([fork1, fork2])
        disconnect_nodes(fork1, fork2.index)

        # Create some blocks and cause finalizer to vote, then take the vote and send it to
        # finalizer2, when finalizer2 will vote it should not slash itself
        #                            v1          v2a
        #                          - e5 - e6[26, 27, 28] - e7[31] fork1
        # F    F    F    F    F   /
        # e0 - e1 - e2 - e3 - e4[16]
        #                         \  v1          v2a
        #                          - e5 - e6[26, 27] fork2
        generate_block(fork1)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=fork1)
        generate_block(fork1, count=5)
        raw_vote_1 = self.wait_for_vote_and_disconnect(finalizer=finalizer1,
                                                       node=fork1)
        generate_block(fork1)
        assert_equal(fork1.getblockcount(), 23)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 5,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 4,
                'validators': 1
            })

        # We'll use a second vote to check if there is slashing when a validator tries to send a double vote after it
        # voted.
        generate_block(fork1, count=3)
        raw_vote_2 = self.wait_for_vote_and_disconnect(finalizer=finalizer1,
                                                       node=fork1)
        assert_equal(fork1.getblockcount(), 26)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 4,
                'validators': 1
            })

        # Send the conflicting vote from the other chain to finalizer2, it should record it and slash it later
        assert_raises_rpc_error(-26, "bad-vote-invalid",
                                finalizer2.sendrawtransaction, raw_vote_1)

        generate_block(fork2)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork2)
        generate_block(fork2, count=5)
        assert_equal(fork2.getblockcount(), 22)
        assert_finalizationstate(
            fork2, {
                'currentEpoch': 5,
                'lastJustifiedEpoch': 3,
                'lastFinalizedEpoch': 3,
                'validators': 1
            })

        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork2)

        # check that a vote, and not a slash is actually in the mempool
        vote = fork2.decoderawtransaction(
            fork2.getrawtransaction(fork2.getrawmempool()[0]))
        assert_equal(vote['txtype'], TxType.VOTE.value)

        fork2.generatetoaddress(1, fork1.getnewaddress('', 'bech32'))
        assert_equal(len(fork2.getrawmempool()), 0)

        # check if there is slashing after voting
        fork2.generatetoaddress(3, fork1.getnewaddress('', 'bech32'))
        assert_equal(fork2.getblockcount(), 26)
        assert_finalizationstate(
            fork2, {
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 4,
                'validators': 1
            })

        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork2)

        assert_raises_rpc_error(-26, "bad-vote-invalid",
                                finalizer2.sendrawtransaction, raw_vote_2)

        # The vote hasn't been replaces by a slash
        vote = finalizer2.decoderawtransaction(
            finalizer2.getrawtransaction(finalizer2.getrawmempool()[0]))
        assert_equal(vote['txtype'], TxType.VOTE.value)
Esempio n. 13
0
    def run_test(self):
        p, v = self.nodes
        self.setup_stake_coins(p, v)
        self.generate_sync(p)

        self.log.info("Setup deposit")
        setup_deposit(self, p, [v])

        self.log.info("Generate few epochs")
        self.generate_epoch(p, v, count=2)
        assert_equal(p.getblockcount(), 35)

        assert_finalizationstate(
            p, {
                'currentEpoch': 7,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 5,
                'validators': 1
            })

        self.log.info("Restarting proposer")
        self.restart_node(p)

        # check it doesn't have peers -- i.e., loaded data from disk
        assert_equal(p.getpeerinfo(), [])

        self.log.info("Generate few epochs more")
        generate_block(p, count=9)
        assert_equal(p.getblockcount(), 44)

        # it is not connected to validator so that finalization shouldn't move
        assert_finalizationstate(
            p, {
                'currentEpoch': 9,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 5,
                'validators': 1
            })

        # connect validator and check how it votes
        self.wait_for_vote_and_disconnect(v, p)
        generate_block(p, count=1)

        assert_equal(p.getblockcount(), 45)
        assert_finalizationstate(
            p, {
                'currentEpoch': 9,
                'lastJustifiedEpoch': 8,
                'lastFinalizedEpoch': 5,
                'validators': 1
            })

        self.generate_epoch(p, v, count=2)

        assert_equal(p.getblockcount(), 55)
        assert_finalizationstate(
            p, {
                'currentEpoch': 11,
                'lastJustifiedEpoch': 10,
                'lastFinalizedEpoch': 9,
                'validators': 1
            })

        self.log.info("Restarting validator")
        self.restart_node(v)

        # check it doesn't have peers -- i.e., loaded data from disk
        assert_equal(v.getpeerinfo(), [])

        self.log.info("Generate more epochs")
        self.generate_epoch(p, v, count=2)
        assert_equal(p.getblockcount(), 65)
        assert_finalizationstate(
            p, {
                'currentEpoch': 13,
                'lastJustifiedEpoch': 12,
                'lastFinalizedEpoch': 11,
                'validators': 1
            })
        connect_nodes(p, v.index)
        sync_blocks([p, v])

        self.log.info("Restart proposer from empty cache")
        self.stop_node(p.index)
        cleanup_datadir(self.options.tmpdir, p.index)
        initialize_datadir(self.options.tmpdir, p.index)
        self.start_node(p.index)
        assert_equal(p.getblockcount(), 0)

        connect_nodes(p, v.index)
        sync_blocks([p, v])
        assert_finalizationstate(
            p, {
                'currentEpoch': 13,
                'lastJustifiedEpoch': 12,
                'lastFinalizedEpoch': 11,
                'validators': 1
            })

        self.log.info("Restart proposer")
        self.restart_node(p)
        assert_finalizationstate(
            p, {
                'currentEpoch': 13,
                'lastJustifiedEpoch': 12,
                'lastFinalizedEpoch': 11,
                'validators': 1
            })
Esempio n. 14
0
    def test_sync_with_restarts(self):
        """
        This test creates the following nodes:
        1. snap_node - full node that has the the snapshot
        2. snap_p2p - mini node that is used as a helper to retrieve the snapshot content
        3. node - the node which syncs the snapshot
        4. p2p - mini node that sends snapshot in stages
        """
        snap_node = self.nodes[2]
        node = self.nodes[3]

        self.start_node(snap_node.index)
        self.start_node(node.index)

        self.setup_stake_coins(snap_node)

        # generate 2 epochs + 1 block to create the first finalized snapshot
        generate_block(snap_node, count=5 + 5 + 1)
        assert_equal(snap_node.getblockcount(), 11)
        wait_until(lambda: has_valid_snapshot(snap_node, 4), timeout=10)

        # configure p2p to have snapshot header and parent block
        p2p = WaitNode()
        p2p.update_snapshot_from(snap_node)
        p2p.update_headers_and_blocks_from(snap_node)
        node.add_p2p_connection(p2p, services=SERVICE_FLAGS_WITH_SNAPSHOT)

        # helper p2p connection to fetch snapshot content
        snap_p2p = snap_node.add_p2p_connection(BaseNode())

        # fetch snapshot content for p2p
        snap_p2p.wait_for_verack()
        p2p.snapshot_data = snap_p2p.fetch_snapshot_data(p2p.snapshot_header)
        snap_node.disconnect_p2ps()

        # test 1. the node can be restarted after it discovered the snapshot
        wait_until(lambda: p2p.snapshot_chunk1_requested, timeout=10)
        node.disconnect_p2ps()
        self.restart_node(node.index)
        self.log.info(
            'Node restarted successfully after it discovered the snapshot')

        # test 2. the node can be restarted after it downloaded half of the snapshot
        # and deletes it's partially downloaded snapshot
        p2p.return_snapshot_chunk1 = True
        node.add_p2p_connection(p2p, services=SERVICE_FLAGS_WITH_SNAPSHOT)
        wait_until(lambda: p2p.snapshot_chunk2_requested, timeout=10)
        node.disconnect_p2ps()
        assert_has_snapshot_on_disk(node, p2p.snapshot_header.snapshot_hash)
        self.restart_node(node.index)
        assert_no_snapshot_on_disk(node, p2p.snapshot_header.snapshot_hash)
        assert_equal(
            len(os.listdir(os.path.join(node.datadir, "regtest",
                                        "snapshots"))), 0)
        self.log.info(
            'Node restarted successfully after it downloaded half of the snapshot'
        )

        # test 3. the node can be restarted after it downloaded the full snapshot
        # and doesn't delete it
        p2p.return_snapshot_chunk2 = True
        node.add_p2p_connection(p2p, services=SERVICE_FLAGS_WITH_SNAPSHOT)
        wait_until(lambda: p2p.parent_block_requested, timeout=10)
        node.disconnect_p2ps()
        assert_has_snapshot_on_disk(node, p2p.snapshot_header.snapshot_hash)
        self.restart_node(node.index)
        assert_has_snapshot_on_disk(node, p2p.snapshot_header.snapshot_hash)
        self.log.info(
            'Node restarted successfully after it downloaded the full snapshot'
        )

        # test 4. the node can be restarted after it downloaded the parent block
        p2p.snapshot_header_requested = False
        p2p.snapshot_chunk1_requested = False
        p2p.snapshot_chunk2_requested = False
        p2p.return_parent_block = True
        node.add_p2p_connection(p2p, services=SERVICE_FLAGS_WITH_SNAPSHOT)
        wait_until(lambda: node.getblockcount() == snap_node.getblockcount(),
                   timeout=10)
        assert_chainstate_equal(node, snap_node)

        # node didn't request a new snapshot as it already downloaded the one
        assert_equal(p2p.snapshot_header_requested, False)
        assert_equal(p2p.snapshot_chunk1_requested, False)
        assert_equal(p2p.snapshot_chunk2_requested, False)

        node.disconnect_p2ps()
        self.restart_node(node.index)
        self.restart_node(snap_node.index)
        assert_chainstate_equal(node, snap_node)
        assert_equal(node.listsnapshots(), snap_node.listsnapshots())
        self.log.info(
            'Node restarted successfully after it downloaded the parent block')

        # clean up test
        self.stop_node(snap_node.index)
        self.stop_node(node.index)
        self.log.info('test_sync_with_restarts passed')
Esempio n. 15
0
    def test_double_votes(self):
        def corrupt_script(script, n_byte):
            script = bytearray(script)
            script[n_byte] = 1 if script[n_byte] == 0 else 0
            return bytes(script)

        # initial topology where arrows denote the direction of connections
        # finalizer2 ← fork1 → finalizer1
        #                ↓  ︎
        #              fork2
        fork1 = self.nodes[0]
        fork2 = self.nodes[1]

        fork1.importmasterkey(regtest_mnemonics[0]['mnemonics'])
        fork2.importmasterkey(regtest_mnemonics[1]['mnemonics'])

        finalizer1 = self.nodes[2]
        finalizer2 = self.nodes[3]

        connect_nodes(fork1, fork2.index)
        connect_nodes(fork1, finalizer1.index)
        connect_nodes(fork1, finalizer2.index)

        # leave IBD
        generate_block(fork1)
        sync_blocks([fork1, fork2, finalizer1, finalizer2])

        # clone finalizer
        finalizer2.importmasterkey(regtest_mnemonics[2]['mnemonics'])
        finalizer1.importmasterkey(regtest_mnemonics[2]['mnemonics'])

        disconnect_nodes(fork1, finalizer2.index)
        addr = finalizer1.getnewaddress('', 'legacy')
        txid1 = finalizer1.deposit(addr, 1500)
        wait_until(lambda: txid1 in fork1.getrawmempool())

        finalizer2.setlabel(addr, '')
        txid2 = finalizer2.deposit(addr, 1500)
        assert_equal(txid1, txid2)
        connect_nodes(fork1, finalizer2.index)

        generate_block(fork1)
        sync_blocks([fork1, fork2, finalizer1, finalizer2])
        disconnect_nodes(fork1, finalizer1.index)
        disconnect_nodes(fork1, finalizer2.index)

        # pass instant finalization
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e[26] fork1, fork2
        generate_block(fork1, count=3 + 5 + 5 + 5 + 5 + 1)
        assert_equal(fork1.getblockcount(), 26)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 3,
                'validators': 1
            })

        # change topology where forks are not connected
        # finalizer1 → fork1
        #
        # finalizer2 → fork2
        sync_blocks([fork1, fork2])
        disconnect_nodes(fork1, fork2.index)

        # test that same vote included on different forks
        # doesn't create a slash transaction
        #                                        v1
        #                                    - e6[27, 28, 29, 30] fork1
        # F    F    F    F    F    J        /
        # e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
        #                                   \     v1
        #                                    - e6[27, 28, 29, 30] fork2
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=fork1)
        v1 = fork1.getrawtransaction(fork1.getrawmempool()[0])
        generate_block(fork1, count=4)
        assert_equal(fork1.getblockcount(), 30)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 4,
                'validators': 1
            })

        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork2)
        generate_block(fork2)
        assert_raises_rpc_error(-27, 'transaction already in block chain',
                                fork2.sendrawtransaction, v1)
        assert_equal(len(fork2.getrawmempool()), 0)
        generate_block(fork2, count=3)
        assert_equal(fork2.getblockcount(), 30)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 4,
                'validators': 1
            })
        self.log.info('same vote on two forks was accepted')

        # test that double-vote with invalid vote signature is ignored
        # and doesn't cause slashing
        #                                      v1          v2a
        #                                    - e6 - e7[31, 32] fork1
        # F    F    F    F    F    F    J   /
        # e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
        #                                   \  v1          v2a
        #                                    - e6 - e7[31, 32] fork2
        generate_block(fork1)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=fork1)
        v2a = fork1.getrawtransaction(fork1.getrawmempool()[0])
        generate_block(fork1)
        assert_equal(fork1.getblockcount(), 32)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 7,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 5,
                'validators': 1
            })

        generate_block(fork2)
        tx_v2a = FromHex(CTransaction(), v2a)

        # corrupt the 1st byte of the validators pubkey in the commit script
        # see schema in CScript::CommitScript
        tx_v2a.vout[0].scriptPubKey = corrupt_script(
            script=tx_v2a.vout[0].scriptPubKey, n_byte=2)

        assert_raises_rpc_error(-26, 'bad-vote-signature',
                                fork2.sendrawtransaction, ToHex(tx_v2a))
        assert_equal(len(fork2.getrawmempool()), 0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork2)
        time.sleep(
            10
        )  # slash transactions are processed every 10 sec. UNIT-E TODO: remove once optimized
        assert_equal(len(fork2.getrawmempool()), 1)
        v2b = fork2.getrawtransaction(fork2.getrawmempool()[0])
        tx_v2b = FromHex(CTransaction(), v2b)
        assert_equal(tx_v2b.get_type(), TxType.VOTE)

        generate_block(fork2)
        assert_equal(len(fork2.getrawmempool()), 0)
        assert_equal(fork2.getblockcount(), 32)
        assert_finalizationstate(
            fork1, {
                'currentEpoch': 7,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 5,
                'validators': 1
            })
        self.log.info('double-vote with invalid signature is ignored')

        # test that valid double-vote but corrupt withdraw address
        # creates slash tx it is included in the next block
        #                                      v1          v2a
        #                                    - e6 - e7[31, 32] fork1
        # F    F    F    F    F    F    J   /
        # e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
        #                                   \  v1          v2a s1
        #                                    - e6 - e7[31, 32, 33] fork2
        # corrupt the 1st byte of the address in the scriptpubkey
        # but keep the correct vote signature see schema in CScript::CommitScript
        tx_v2a = FromHex(CTransaction(), v2a)

        # Remove the signature
        tx_v2a.vin[0].scriptSig = list(CScript(tx_v2a.vin[0].scriptSig))[1]
        tx_v2a.vout[0].scriptPubKey = corrupt_script(
            script=tx_v2a.vout[0].scriptPubKey, n_byte=42)
        tx_v2a = sign_transaction(finalizer2, tx_v2a)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork2.sendrawtransaction, ToHex(tx_v2a))
        wait_until(lambda: len(fork2.getrawmempool()) == 1, timeout=20)
        s1_hash = fork2.getrawmempool()[0]
        s1 = FromHex(CTransaction(), fork2.getrawtransaction(s1_hash))
        assert_equal(s1.get_type(), TxType.SLASH)

        b33 = generate_block(fork2)[0]
        block = FromHex(CBlock(), fork2.getblock(b33, 0))
        assert_equal(len(block.vtx), 2)
        block.vtx[1].rehash()
        assert_equal(block.vtx[1].hash, s1_hash)
        self.log.info('slash tx for double-vote was successfully created')
Esempio n. 16
0
    def test_justification_over_chain_work(self):
        """
        Test that justification has priority over chain work
        """

        def seen_block(node, blockhash):
            try:
                node.getblock(blockhash)
                return True
            except JSONRPCException:
                return False

        def connect_sync_disconnect(node1, node2, blockhash):
            connect_nodes(node1, node2.index)
            wait_until(lambda: seen_block(node1, blockhash), timeout=10)
            wait_until(lambda: node1.getblockcount() == node2.getblockcount(), timeout=5)
            assert_equal(node1.getblockhash(node1.getblockcount()), blockhash)
            disconnect_nodes(node1, node2.index)

        node0 = self.nodes[0]
        node1 = self.nodes[1]
        node2 = self.nodes[2]
        validator = self.nodes[3]

        self.setup_stake_coins(node0, node1, node2, validator)

        connect_nodes(node0, node1.index)
        connect_nodes(node0, node2.index)
        connect_nodes(node0, validator.index)

        # leave IBD
        generate_block(node0)
        sync_blocks([node0, node1, node2, validator], timeout=10)

        payto = validator.getnewaddress('', 'legacy')
        txid = validator.deposit(payto, 1500)
        wait_until(lambda: self.have_tx_in_mempool([node0, node1, node2], txid), timeout=10)

        disconnect_nodes(node0, node1.index)
        disconnect_nodes(node0, node2.index)
        disconnect_nodes(node0, validator.index)
        assert_equal(len(node0.getpeerinfo()), 0)

        # F    F    F
        # e0 - e1 - e2 - e3 - e4[16]
        generate_block(node0, count=15)
        assert_equal(node0.getblockcount(), 16)
        assert_finalizationstate(node0, {'currentDynasty': 2,
                                         'currentEpoch': 4,
                                         'lastJustifiedEpoch': 2,
                                         'lastFinalizedEpoch': 2,
                                         'validators': 1})

        connect_nodes(node0, node1.index)
        connect_nodes(node0, node2.index)
        sync_blocks([node0, node1, node2])
        disconnect_nodes(node0, node1.index)
        disconnect_nodes(node0, node2.index)

        # generate fork with no commits. node0 must switch to it
        # 16 node1
        #   \
        #    - b17 node0, node2
        b17 = generate_block(node2)[-1]
        connect_sync_disconnect(node0, node2, b17)
        assert_equal(node0.getblockcount(), 17)

        # generate fork with justified commits. node0 must switch to it
        #    - 17 - b18 node0, node1
        #   /
        # 16
        #   \
        #    - b17 node2
        self.wait_for_vote_and_disconnect(finalizer=validator, node=node1)
        b18 = generate_block(node1, count=2)[-1]
        connect_sync_disconnect(node0, node1, b18)
        assert_equal(node0.getblockcount(), 18)
        assert_finalizationstate(node0, {'currentDynasty': 2,
                                         'currentEpoch': 4,
                                         'lastJustifiedEpoch': 3,
                                         'lastFinalizedEpoch': 3,
                                         'validators': 1})
        self.log.info('node successfully switched to longest justified fork')

        # generate longer but not justified fork. node0 shouldn't switch
        #    - 17 - b18 node0, node1, node2
        #   /
        # 16
        #   \
        #    - 17 - 18 - 19 - b20
        generate_block(node2, count=3)[-1]  # b20
        assert_equal(node2.getblockcount(), 20)
        assert_equal(node0.getblockcount(), 18)

        connect_nodes(node0, node2.index)
        sync_blocks([node0, node2], timeout=10)

        assert_equal(node0.getblockcount(), 18)
        assert_equal(node0.getblockhash(18), b18)
        assert_equal(node0.getfinalizationstate()['lastJustifiedEpoch'], 3)
        self.log.info('node did not switch to heaviest but less justified fork')

        assert_equal(node2.getblockcount(), 18)
        assert_equal(node2.getblockhash(18), b18)
        assert_equal(node2.getfinalizationstate()['lastJustifiedEpoch'], 3)
        self.log.info('node switched to longest justified fork with less work')

        self.stop_node(node0.index)
        self.stop_node(node1.index)
        self.stop_node(node2.index)
        self.stop_node(validator.index)
Esempio n. 17
0
    def run_test(self):
        def assert_vote(vote_raw_tx, input_raw_tx, source_epoch, target_epoch,
                        target_hash):
            vote_tx = FromHex(CTransaction(), vote_raw_tx)
            assert vote_tx.is_finalizer_commit()

            input_tx = FromHex(CTransaction(), input_raw_tx)
            input_tx.rehash()
            prevout = "%064x" % vote_tx.vin[0].prevout.hash
            assert_equal(prevout, input_tx.hash)

            vote = self.nodes[0].extractvotefromsignature(
                bytes_to_hex_str(vote_tx.vin[0].scriptSig))
            assert_equal(vote['source_epoch'], source_epoch)
            assert_equal(vote['target_epoch'], target_epoch)
            assert_equal(vote['target_hash'], target_hash)

        fork0 = self.nodes[0]
        fork1 = self.nodes[1]
        finalizer = self.nodes[2]  # main finalizer that being checked
        finalizer2 = self.nodes[
            3]  # secondary finalizer to control finalization

        self.setup_stake_coins(fork0, fork1, finalizer, finalizer2)

        connect_nodes(fork0, fork1.index)
        connect_nodes(fork0, finalizer.index)
        connect_nodes(fork0, finalizer2.index)

        # leave IBD
        generate_block(fork0)
        sync_blocks(self.nodes)

        # deposit
        d1_hash = finalizer.deposit(finalizer.getnewaddress('', 'legacy'),
                                    1500)
        d2_hash = finalizer2.deposit(finalizer2.getnewaddress('', 'legacy'),
                                     4000)
        d1 = finalizer.getrawtransaction(d1_hash)
        self.wait_for_transaction(d1_hash, timeout=10)
        self.wait_for_transaction(d2_hash, timeout=10)
        generate_block(fork0)
        disconnect_nodes(fork0, finalizer.index)
        disconnect_nodes(fork0, finalizer2.index)

        # leave instant justification
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6[26]
        generate_block(fork0, count=3 + 5 + 5 + 5 + 5 + 1)
        assert_equal(fork0.getblockcount(), 26)
        assert_finalizationstate(
            fork0, {
                'currentDynasty': 3,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 3,
                'validators': 2
            })

        # move tip to one block before checkpoint to be able to
        # revert checkpoint on the fork
        #       J           v0
        # ... - e5 - e6[26, 27, 28, 29] fork0
        #                            \
        #                             - fork1
        v0 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork0)
        assert_vote(vote_raw_tx=v0,
                    input_raw_tx=d1,
                    source_epoch=4,
                    target_epoch=5,
                    target_hash=fork0.getblockhash(25))
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork0)
        generate_block(fork0, count=3)
        sync_blocks([fork0, fork1], timeout=10)
        disconnect_nodes(fork0, fork1.index)
        assert_equal(fork0.getblockcount(), 29)
        assert_finalizationstate(
            fork0, {
                'currentDynasty': 3,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 4,
                'validators': 2
            })

        # vote v1 on target_epoch=6 target_hash=30
        #       J           v0                       v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7[31, 32] fork0
        #                            \
        #                             - fork1
        generate_block(fork0, count=2)
        assert_equal(fork0.getblockcount(), 31)
        v1 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork0)
        assert_vote(vote_raw_tx=v1,
                    input_raw_tx=v0,
                    source_epoch=5,
                    target_epoch=6,
                    target_hash=fork0.getblockhash(30))
        generate_block(fork0)
        connect_nodes(finalizer, fork0.index)
        sync_blocks([finalizer, fork0], timeout=10)
        disconnect_nodes(finalizer, fork0.index)
        assert_equal(fork0.getblockcount(), 32)
        assert_equal(finalizer.getblockcount(), 32)
        self.log.info('finalizer successfully voted on the checkpoint')

        # re-org last checkpoint and check that finalizer doesn't vote
        #       J           v0                       v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7[31, 32] fork0
        #                            \
        #                             - 30] - e7[31, 32, 33] fork1
        generate_block(fork1, count=4)
        assert_equal(fork1.getblockcount(), 33)
        connect_nodes(finalizer, fork1.index)
        sync_blocks([finalizer, fork1], timeout=10)
        assert_equal(finalizer.getblockcount(), 33)
        assert_equal(len(fork1.getrawmempool()), 0)
        disconnect_nodes(finalizer, fork1.index)
        self.log.info(
            'finalizer successfully detected potential double vote and did not vote'
        )

        # continue to new epoch and check that finalizer votes on fork1
        #       J           v0                       v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7[31, 32] fork0
        #                            \                         v2
        #                             - 30] - e7[...] - e8[36, 37] fork1
        generate_block(fork1, count=3)
        assert_equal(fork1.getblockcount(), 36)
        v2 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork1)
        assert_vote(vote_raw_tx=v2,
                    input_raw_tx=v0,
                    source_epoch=5,
                    target_epoch=7,
                    target_hash=fork1.getblockhash(35))
        generate_block(fork1)
        assert_equal(fork1.getblockcount(), 37)

        # create new epoch on fork1 and check that finalizer votes
        #       J           v0                       v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7[31, 32] fork0
        #                            \                         v2                v3
        #                             - 30] - e7[...] - e8[36, 37, ...] - e9[41, 42] fork1
        generate_block(fork1, count=4)
        assert_equal(fork1.getblockcount(), 41)
        v3 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork1)
        assert_vote(vote_raw_tx=v3,
                    input_raw_tx=v2,
                    source_epoch=5,
                    target_epoch=8,
                    target_hash=fork1.getblockhash(40))
        generate_block(fork1)
        assert_equal(fork1.getblockcount(), 42)

        # create longer fork0 and check that after reorg finalizer doesn't vote
        #       J           v0                v1
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7 - e8 - e9[41,42, 43] fork0
        #                            \             v2          v3
        #                             - 30] - e7 - e8 - e9[41, 42] fork1
        generate_block(fork0, count=11)
        assert_equal(fork0.getblockcount(), 43)
        connect_nodes(finalizer, fork0.index)
        sync_blocks([finalizer, fork0])
        assert_equal(finalizer.getblockcount(), 43)
        assert_equal(len(fork0.getrawmempool()), 0)
        disconnect_nodes(finalizer, fork0.index)
        self.log.info(
            'finalizer successfully detected potential two consecutive double votes and did not vote'
        )

        # check that finalizer can vote from next epoch on fork0
        #       J           v0                v1                          v4
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7 - e8 - e9[...] - e10[46, 47] fork0
        #                            \             v2          v3
        #                             - 30] - e7 - e8 - e9[41, 42] fork1
        generate_block(fork0, count=3)
        assert_equal(fork0.getblockcount(), 46)
        v4 = self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork0)
        assert_vote(vote_raw_tx=v4,
                    input_raw_tx=v1,
                    source_epoch=5,
                    target_epoch=9,
                    target_hash=fork0.getblockhash(45))
        generate_block(fork0)
        assert_equal(fork0.getblockcount(), 47)

        # finalize epoch8 on fork1 and re-broadcast all vote txs
        # which must not create slash tx
        #       J           v0                v1                                      v4
        # ... - e5 - e6[26, 27, 28, 29, 30] - e7 - e8[   ...    ] - e9[...] - e10[46, 47] fork0
        #                            \             F      v2        J      v3
        #                             - 30] - e7 - e8[36, 37,...] - e9[41, 42, 43] - e10[46, 47] fork1
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork1)
        generate_block(fork1)
        assert_equal(fork1.getblockcount(), 43)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 4,
                'currentEpoch': 9,
                'lastJustifiedEpoch': 8,
                'lastFinalizedEpoch': 4,
                'validators': 2
            })

        generate_block(fork1, count=3)
        assert_equal(fork1.getblockcount(), 46)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=fork1)
        generate_block(fork1)
        assert_equal(fork1.getblockcount(), 47)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 4,
                'currentEpoch': 10,
                'lastJustifiedEpoch': 9,
                'lastFinalizedEpoch': 8,
                'validators': 2
            })

        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork1.sendrawtransaction, v1)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork1.sendrawtransaction, v4)
        assert_equal(len(fork1.getrawmempool()), 0)

        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork0.sendrawtransaction, v2)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                fork0.sendrawtransaction, v3)
        assert_equal(len(fork0.getrawmempool()), 0)
        self.log.info('re-broadcasting existing votes did not create slash tx')
    def run_test(self):
        def sync_node_to_fork(node, fork, force=False):
            if force:
                self.restart_node(node.index, cleanup=True)
                node.importmasterkey(
                    regtest_mnemonics[node.index]['mnemonics'])
            connect_nodes(node, fork.index)
            block_hash = fork.getblockhash(fork.getblockcount())
            node.waitforblock(block_hash, 5000)
            assert_equal(node.getblockhash(node.getblockcount()), block_hash)
            disconnect_nodes(node, fork.index)

        def generate_epoch_and_vote(node, finalizer, finalizer_address,
                                    prevtx):
            assert node.getblockcount() % 5 == 0
            fs = node.getfinalizationstate()
            checkpoint = node.getbestblockhash()
            generate_block(node)
            vtx = make_vote_tx(finalizer,
                               finalizer_address,
                               checkpoint,
                               source_epoch=fs['lastJustifiedEpoch'],
                               target_epoch=fs['currentEpoch'],
                               input_tx_id=prevtx)
            node.sendrawtransaction(vtx)
            generate_block(node, count=4)
            vtx = FromHex(CTransaction(), vtx)
            vtx.rehash()
            return vtx.hash

        node = self.nodes[0]
        fork1 = self.nodes[1]
        fork2 = self.nodes[2]
        finalizer = self.nodes[3]

        node.importmasterkey(regtest_mnemonics[0]['mnemonics'])
        fork1.importmasterkey(regtest_mnemonics[1]['mnemonics'])
        fork2.importmasterkey(regtest_mnemonics[2]['mnemonics'])
        finalizer.importmasterkey(regtest_mnemonics[3]['mnemonics'])

        connect_nodes(node, fork1.index)
        connect_nodes(node, fork2.index)
        connect_nodes(node, finalizer.index)

        # leave IBD
        self.generate_sync(node, 1)

        finalizer_address = finalizer.getnewaddress('', 'legacy')
        deptx = finalizer.deposit(finalizer_address, 1500)
        self.wait_for_transaction(deptx)

        # leave insta justification
        #                   -  fork1
        # F    F    F       |
        # e0 - e1 - e2 - e3 -  node
        #                   |
        #                   -  fork2
        generate_block(node, count=14)
        assert_equal(node.getblockcount(), 15)
        sync_blocks([node, finalizer])
        assert_finalizationstate(
            node, {
                'currentDynasty': 1,
                'currentEpoch': 3,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 0
            })
        sync_blocks(self.nodes)
        disconnect_nodes(node, fork1.index)
        disconnect_nodes(node, fork2.index)
        disconnect_nodes(node, finalizer.index)

        # create first justified epoch on fork1
        #                     J
        #                   - e4 - e5 - e6 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |
        #                   -  fork2

        generate_block(fork1, count=5)
        vtx1 = generate_epoch_and_vote(fork1, finalizer, finalizer_address,
                                       deptx)
        generate_block(fork1, count=5)
        assert_equal(fork1.getblockcount(), 30)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        sync_node_to_fork(node, fork1)

        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info('node successfully switched to the justified fork')

        # create longer justified epoch on fork2
        # node must switch ("zig") to this fork
        #                     J
        #                   - e4 - e5 - e6 fork1
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J
        #                   - e4 - e5 - e6 fork2 node

        generate_block(fork2, count=10)
        vtx2 = generate_epoch_and_vote(fork2, finalizer, finalizer_address,
                                       deptx)
        assert_equal(fork2.getblockcount(), 30)
        assert_finalizationstate(
            fork2, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        sync_node_to_fork(node, fork2)

        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info(
            'node successfully switched to the longest justified fork')

        # create longer justified epoch on the previous fork1
        # node must switch ("zag") to this fork
        #                     J              J
        #                   - e4 - e5 - e6 - e7 - e8 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J
        #                   - e4 - e5 - e6 fork2
        generate_block(fork1, count=5)
        sync_node_to_fork(finalizer, fork1)
        vtx1 = generate_epoch_and_vote(fork1, finalizer, finalizer_address,
                                       vtx1)
        assert_equal(fork1.getblockcount(), 40)
        assert_finalizationstate(
            fork1, {
                'currentDynasty': 2,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 7,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        assert_not_equal(fork1.getbestblockhash(), fork2.getbestblockhash())
        sync_node_to_fork(node, fork1)
        assert_finalizationstate(
            node, {
                'currentDynasty': 2,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 7,
                'lastFinalizedEpoch': 2,
                'validators': 1
            })

        self.log.info(
            'node successfully switched back to the longest justified fork')

        # UNIT-E TODO: node must follow longest finalized chain
        # node follows longest finalization
        #                     J              J
        #                   - e4 - e5 - e6 - e7 - e8 fork1 node
        # F    F    F       |
        # e0 - e1 - e2 - e3 -
        #                   |      J    F
        #                   - e4 - e5 - e6 - e7 fork2
        sync_node_to_fork(finalizer, fork2, force=True)
        vtx2 = generate_epoch_and_vote(fork2, finalizer, finalizer_address,
                                       vtx2)
        assert_equal(fork2.getblockcount(), 35)
        assert_finalizationstate(
            fork2, {
                'currentDynasty': 2,
                'currentEpoch': 7,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 6,
                'validators': 1
            })
Esempio n. 19
0
    def run_test(self):
        self.setup_stake_coins(*self.nodes)
        assert all(n.getbalance() == 10000 for n in self.nodes)

        # create topology where arrows denote non-persistent connection
        # finalizer1 → node0 ← finalizer2
        #                ↑
        #            finalizer3
        node0 = self.nodes[0]
        finalizer1 = self.nodes[1]
        finalizer2 = self.nodes[2]
        finalizer3 = self.nodes[3]

        connect_nodes(finalizer1, node0.index)
        connect_nodes(finalizer2, node0.index)
        connect_nodes(finalizer3, node0.index)

        # leave IBD
        generate_block(node0)
        sync_blocks(self.nodes)

        # leave instant finalization
        address1 = self.nodes[1].getnewaddress("", "legacy")
        address2 = self.nodes[2].getnewaddress("", "legacy")
        address3 = self.nodes[3].getnewaddress("", "legacy")

        deptx1 = self.nodes[1].deposit(address1, 1500)
        deptx2 = self.nodes[2].deposit(address2, 2000)
        deptx3 = self.nodes[3].deposit(address3, 1500)

        self.wait_for_transaction(deptx1, timeout=10)
        self.wait_for_transaction(deptx2, timeout=10)
        self.wait_for_transaction(deptx3, timeout=10)

        disconnect_nodes(finalizer1, node0.index)
        disconnect_nodes(finalizer2, node0.index)
        disconnect_nodes(finalizer3, node0.index)
        assert_equal(len(node0.getpeerinfo()), 0)

        # move tip to the height when finalizers are activated
        # complete epoch + 3 epochs + 1 block of new epoch
        generate_block(node0, count=4 + 5 + 5 + 5 + 5 + 1)
        assert_equal(node0.getblockcount(), 26)
        assert_finalizationstate(node0, {'currentDynasty': 3,
                                         'currentEpoch': 6,
                                         'lastJustifiedEpoch': 4,
                                         'lastFinalizedEpoch': 3,
                                         'validators': 3})

        # test that finalizers vote after processing 1st block of new epoch
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer3, node=node0)
        assert_equal(len(node0.getrawmempool()), 3)

        generate_block(node0, count=4)
        assert_equal(node0.getblockcount(), 30)
        assert_finalizationstate(node0, {'currentDynasty': 3,
                                         'currentEpoch': 6,
                                         'lastJustifiedEpoch': 5,
                                         'lastFinalizedEpoch': 4,
                                         'validators': 3})
        self.log.info('Finalizers voted after first block of new epoch')

        # test that finalizers can vote on a configured epoch block number
        self.restart_node(finalizer1.index, ['-validating=1', '-finalizervotefromepochblocknumber=1'])
        self.restart_node(finalizer2.index, ['-validating=1', '-finalizervotefromepochblocknumber=2'])
        self.restart_node(finalizer3.index, ['-validating=1', '-finalizervotefromepochblocknumber=3'])

        generate_block(node0)
        assert_equal(node0.getblockcount(), 31)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node0)
        connect_nodes(finalizer2, node0.index)
        connect_nodes(finalizer3, node0.index)
        sync_blocks([finalizer2, finalizer3, node0], timeout=10)
        assert_equal(len(node0.getrawmempool()), 1)  # no votes from finalizer2 and finalizer3
        disconnect_nodes(finalizer2, node0.index)
        disconnect_nodes(finalizer3, node0.index)

        generate_block(node0)
        assert_equal(node0.getblockcount(), 32)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node0)
        connect_nodes(finalizer3, node0.index)
        sync_blocks([finalizer3, node0], timeout=10)
        assert_equal(len(node0.getrawmempool()), 1)  # no votes from finalizer3
        disconnect_nodes(finalizer3, node0.index)

        generate_block(node0)
        assert_equal(node0.getblockcount(), 33)
        self.wait_for_vote_and_disconnect(finalizer=finalizer3, node=node0)
        generate_block(node0, count=2)
        assert_equal(node0.getblockcount(), 35)
        assert_finalizationstate(node0, {'currentDynasty': 4,
                                         'currentEpoch': 7,
                                         'lastJustifiedEpoch': 6,
                                         'lastFinalizedEpoch': 5,
                                         'validators': 3})
        self.log.info('Finalizers voted on a configured block number')

        # test that finalizers can vote after configured epoch block number
        generate_block(node0, count=4)
        assert_equal(node0.getblockcount(), 39)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer3, node=node0)
        generate_block(node0)
        assert_equal(node0.getblockcount(), 40)
        assert_finalizationstate(node0, {'currentDynasty': 5,
                                         'currentEpoch': 8,
                                         'lastJustifiedEpoch': 7,
                                         'lastFinalizedEpoch': 6,
                                         'validators': 3})
        self.log.info('Finalizers voted after configured block number')
Esempio n. 20
0
    def test_cannot_sync_with_snapshot(self):
        """
        This test creates the following nodes:
        1. snap_node - snapshot node that is used as a helper node to generate the snapshot
        2. helper_p2p - mini node that retrieves the content of the snapshot
        3. full_snap_p2p - mini node that has full 2nd best snapshot
        3. half_snap_p2p - mini node that has half of the best snapshot
        4. no_snap_p2p - mini node that doesn't have snapshot
        5. sync_node - the node which syncs with the snapshot
        """
        snap_node = self.nodes[6]
        sync_node = self.nodes[7]
        self.start_node(snap_node.index)
        self.start_node(sync_node.index)

        self.setup_stake_coins(snap_node)

        # add 2nd best snapshot to full_snap_p2p
        generate_block(snap_node, count=5 + 5 + 1)
        assert_equal(snap_node.getblockcount(), 11)
        wait_until(lambda: has_valid_snapshot(snap_node, 4), timeout=10)
        full_snap_p2p = WaitNode()
        no_snap_p2p = WaitNode()
        for p2p in [full_snap_p2p, no_snap_p2p]:
            p2p.update_snapshot_from(snap_node)

        # add the best snapshot to half_snap_p2p
        generate_block(snap_node, count=5)
        assert_equal(snap_node.getblockcount(), 16)
        wait_until(lambda: has_valid_snapshot(snap_node, 9), timeout=10)
        half_snap_p2p = WaitNode()
        half_snap_p2p.update_snapshot_from(snap_node)
        for p2p in [half_snap_p2p, full_snap_p2p, no_snap_p2p]:
            p2p.update_headers_and_blocks_from(snap_node)

        # retrieve snapshot data
        helper_p2p = snap_node.add_p2p_connection(BaseNode())
        helper_p2p.wait_for_verack()
        full_snap_p2p.snapshot_data = helper_p2p.fetch_snapshot_data(
            full_snap_p2p.snapshot_header)
        half_snap_p2p.snapshot_data = helper_p2p.fetch_snapshot_data(
            half_snap_p2p.snapshot_header)
        self.stop_node(snap_node.index)

        full_snap_p2p.return_snapshot_header = False
        half_snap_p2p.return_snapshot_header = False
        sync_node.add_p2p_connection(no_snap_p2p)
        sync_node.add_p2p_connection(full_snap_p2p,
                                     services=SERVICE_FLAGS_WITH_SNAPSHOT)
        sync_node.add_p2p_connection(half_snap_p2p,
                                     services=SERVICE_FLAGS_WITH_SNAPSHOT)

        # test 1. the node requests snapshot from peers that have service flag set
        full_snap_p2p.wait_for_verack()
        half_snap_p2p.wait_for_verack()
        no_snap_p2p.wait_for_verack()

        wait_until(lambda: full_snap_p2p.snapshot_header_requested, timeout=10)
        wait_until(lambda: half_snap_p2p.snapshot_header_requested, timeout=10)
        assert (full_snap_p2p.snapshot_header_requested is True)
        assert (half_snap_p2p.snapshot_header_requested is True)
        assert (no_snap_p2p.snapshot_header_requested is False)

        full_snap_p2p.send_message(msg_snaphead(full_snap_p2p.snapshot_header))
        half_snap_p2p.send_message(msg_snaphead(half_snap_p2p.snapshot_header))
        wait_until(lambda: half_snap_p2p.snapshot_chunk1_requested, timeout=10)
        assert (full_snap_p2p.snapshot_chunk1_requested is False
                )  # didn't start asking for the 2nd best
        self.log.info('Service flag are correctly recognized')

        # test 2. the node can't receive the 2nd part of the snapshot
        half_snap_p2p.return_snapshot_chunk1 = True
        half_snap_p2p.on_getsnapshot(half_snap_p2p.last_getsnapshot_message)
        wait_until(lambda: half_snap_p2p.snapshot_chunk2_requested, timeout=10)
        assert_has_snapshot_on_disk(
            sync_node, half_snap_p2p.snapshot_header.snapshot_hash)
        wait_until(lambda: full_snap_p2p.snapshot_chunk1_requested,
                   timeout=10)  # fallback to 2nd best
        assert_no_snapshot_on_disk(sync_node,
                                   half_snap_p2p.snapshot_header.snapshot_hash)
        self.log.info('Node cannot receive 2nd half of the snapshot')

        # test 3. the node can't receive the parent block
        full_snap_p2p.return_snapshot_chunk1 = True
        full_snap_p2p.return_snapshot_chunk2 = True
        full_snap_p2p.on_getsnapshot(full_snap_p2p.last_getsnapshot_message)
        wait_until(lambda: full_snap_p2p.parent_block_requested, timeout=10)
        wait_until(lambda: no_snap_p2p.parent_block_requested, timeout=10)
        assert_has_snapshot_on_disk(
            sync_node, full_snap_p2p.snapshot_header.snapshot_hash)
        self.log.info(
            'Node cannot receive parent block from already connected peers')

        # test 4. the node can't receive the parent block from new peers
        sync_node.disconnect_p2ps()

        for p2p in [full_snap_p2p, no_snap_p2p]:
            wait_until(lambda: p2p.is_connected is False, timeout=5)
            p2p.snapshot_chunk1_requested = False
            p2p.snapshot_chunk2_requested = False
            p2p.parent_block_requested = False

        sync_node.add_p2p_connection(full_snap_p2p)
        sync_node.add_p2p_connection(no_snap_p2p)
        full_snap_p2p.wait_for_verack()
        no_snap_p2p.wait_for_verack()

        wait_until(lambda: full_snap_p2p.parent_block_requested, timeout=10)
        wait_until(lambda: no_snap_p2p.parent_block_requested, timeout=10)
        assert full_snap_p2p.snapshot_chunk1_requested is False
        assert no_snap_p2p.snapshot_chunk1_requested is False
        assert_has_snapshot_on_disk(
            sync_node, full_snap_p2p.snapshot_header.snapshot_hash)
        self.log.info('Node cannot receive parent block from new peers')

        self.stop_node(sync_node.index)

        self.log.info('test_cannot_sync_with_snapshot passed')
Esempio n. 21
0
    def test_invalid_snapshot(self):
        """
        This test creates the following nodes:
        1. snap_node - full node that has the the snapshot
        2. snap_p2p - mini node that is used as a helper to retrieve the snapshot content
        3. node - the node which syncs the snapshot
        4. broken_p2p - mini node that claims has the best snapshot but it's broken
        5. valid_p2p - mini node that sends a valid snapshot
        6. not_finalized_p2p - mini node that claims has the best snapshot but it's not finalized
        """

        snap_node = self.nodes[4]
        node = self.nodes[5]

        self.start_node(snap_node.index)
        self.start_node(node.index)

        self.setup_stake_coins(snap_node)

        # generate 1 epoch + 1 block to create the first finalized snapshot
        # and store it in valid_p2p
        generate_block(snap_node, count=5 + 1)
        assert_equal(snap_node.getblockcount(), 6)
        wait_until(lambda: has_valid_snapshot(snap_node, 4), timeout=10)

        valid_p2p = WaitNode()
        valid_p2p.update_snapshot_from(snap_node)

        # create the second snapshot and store it in broken_p2p
        generate_block(snap_node, count=9)
        assert_equal(snap_node.getblockcount(), 15)
        wait_until(lambda: has_valid_snapshot(snap_node, 9), timeout=10)
        wait_until(lambda: has_valid_snapshot(snap_node, 14), timeout=10)

        broken_p2p = WaitNode()
        broken_p2p.update_snapshot_from(snap_node)
        broken_p2p.snapshot_data[-1].outputs[0].nValue *= 2  # break snapshot
        broken_p2p.update_headers_and_blocks_from(snap_node)
        valid_p2p.update_headers_and_blocks_from(snap_node)

        not_finalized_p2p = WaitNode()
        not_finalized_p2p.update_snapshot_from(snap_node, finalized=False)
        not_finalized_p2p.update_headers_and_blocks_from(snap_node)

        broken_p2p.return_snapshot_header = False
        valid_p2p.return_snapshot_header = False
        not_finalized_p2p.return_snapshot_header = False
        node.add_p2p_connection(valid_p2p,
                                services=SERVICE_FLAGS_WITH_SNAPSHOT)
        node.add_p2p_connection(broken_p2p,
                                services=SERVICE_FLAGS_WITH_SNAPSHOT)
        node.add_p2p_connection(not_finalized_p2p,
                                services=SERVICE_FLAGS_WITH_SNAPSHOT)

        # make sure that node knows about all the peers
        valid_p2p.wait_for_verack()
        broken_p2p.wait_for_verack()
        not_finalized_p2p.wait_for_verack()

        valid_p2p.send_message(msg_snaphead(valid_p2p.snapshot_header))
        broken_p2p.send_message(msg_snaphead(broken_p2p.snapshot_header))
        not_finalized_p2p.send_message(
            msg_snaphead(not_finalized_p2p.snapshot_header))

        # node must pick the best snapshot
        wait_until(lambda: broken_p2p.snapshot_chunk1_requested, timeout=10)
        broken_p2p.return_snapshot_chunk1 = True
        broken_p2p.on_getsnapshot(broken_p2p.last_getsnapshot_message)
        wait_until(lambda: broken_p2p.snapshot_chunk2_requested, timeout=10)
        assert_has_snapshot_on_disk(node,
                                    broken_p2p.snapshot_header.snapshot_hash)
        assert_no_snapshot_on_disk(node,
                                   valid_p2p.snapshot_header.snapshot_hash)
        assert_equal(valid_p2p.snapshot_chunk1_requested, False)

        # node detects broken snapshot, removes it and switches to the second best
        broken_p2p.return_snapshot_chunk2 = True
        broken_p2p.on_getsnapshot(broken_p2p.last_getsnapshot_message)
        wait_until(lambda: valid_p2p.snapshot_chunk1_requested, timeout=10)
        assert_no_snapshot_on_disk(node,
                                   broken_p2p.snapshot_header.snapshot_hash)
        valid_p2p.return_snapshot_chunk1 = True
        valid_p2p.on_getsnapshot(valid_p2p.last_getsnapshot_message)
        wait_until(lambda: valid_p2p.snapshot_chunk2_requested, timeout=10)
        assert_has_snapshot_on_disk(node,
                                    valid_p2p.snapshot_header.snapshot_hash)
        valid_p2p.return_snapshot_chunk2 = True
        valid_p2p.return_parent_block = True
        valid_p2p.on_getsnapshot(valid_p2p.last_getsnapshot_message)

        # node doesn't request not finalized snapshot
        assert_equal(not_finalized_p2p.snapshot_header_requested, True)
        assert_equal(not_finalized_p2p.snapshot_chunk1_requested, False)

        # node requests parent block and finishes ISD
        wait_until(lambda: node.getblockcount() == 15, timeout=20)
        node.disconnect_p2ps()
        assert_chainstate_equal(snap_node, node)

        self.log.info('test_invalid_snapshot passed')
Esempio n. 22
0
    def test_p2p_schema(self):
        """
        This test creates the following nodes:
        1. serving_node - full node that has the the snapshot
        2. syncing_p2p - mini node that downloads snapshot from serving_node and tests the protocol
        3. syncing_node - the node which starts with fast sync
        4. serving_p2p - mini node that sends snapshot to syncing_node and tests the protocol
        """
        serving_node = self.nodes[0]
        syncing_node = self.nodes[1]

        self.start_node(serving_node.index)
        self.start_node(syncing_node.index)

        self.setup_stake_coins(serving_node)

        # generate 2 epochs + 1 block to create the first finalized snapshot
        generate_block(serving_node, count=5 + 5 + 1)
        assert_equal(serving_node.getblockcount(), 11)
        wait_until(lambda: has_valid_snapshot(serving_node, 4), timeout=10)

        syncing_p2p = serving_node.add_p2p_connection(BaseNode())
        serving_p2p = BaseNode()

        # configure serving_p2p to have snapshot header and parent block
        serving_p2p.update_snapshot_from(serving_node)
        serving_p2p.update_headers_and_blocks_from(serving_node)

        syncing_node.add_p2p_connection(serving_p2p,
                                        services=SERVICE_FLAGS_WITH_SNAPSHOT)

        syncing_p2p.wait_for_verack()

        # test snapshot downloading in chunks
        syncing_p2p.send_message(msg_getsnaphead())
        wait_until(lambda: syncing_p2p.snapshot_header.total_utxo_subsets > 0,
                   timeout=10)
        chunks = math.ceil(syncing_p2p.snapshot_header.total_utxo_subsets / 2)
        for i in range(1, chunks + 1):
            getsnapshot = GetSnapshot(
                syncing_p2p.snapshot_header.snapshot_hash,
                len(syncing_p2p.snapshot_data), 2)
            syncing_p2p.send_message(msg_getsnapshot(getsnapshot))

            snapshot_size = min(i * 2,
                                syncing_p2p.snapshot_header.total_utxo_subsets)
            wait_until(lambda: len(syncing_p2p.snapshot_data) == snapshot_size,
                       timeout=10)
        assert_equal(len(syncing_p2p.snapshot_data),
                     syncing_p2p.snapshot_header.total_utxo_subsets)

        self.log.info('Snapshot was downloaded successfully')

        # validate the snapshot hash
        utxos = []
        for subset in syncing_p2p.snapshot_data:
            for n in subset.outputs:
                out = COutPoint(subset.tx_id, n)
                utxo = UTXO(subset.height, subset.tx_type, out,
                            subset.outputs[n])
                utxos.append(utxo)
        inputs = bytes_to_hex_str(ser_vector([]))
        outputs = bytes_to_hex_str(ser_vector(utxos))
        stake_modifier = "%064x" % syncing_p2p.snapshot_header.stake_modifier
        chain_work = bytes_to_hex_str(
            ser_uint256(syncing_p2p.snapshot_header.chain_work))
        res = self.nodes[0].calcsnapshothash(inputs, outputs, stake_modifier,
                                             chain_work)
        snapshot_hash = uint256_from_hex(res['hash'])
        assert_equal(snapshot_hash, syncing_p2p.snapshot_header.snapshot_hash)

        self.log.info('Snapshot was validated successfully')

        # test snapshot serving
        wait_until(lambda: serving_p2p.snapshot_requested, timeout=10)
        snapshot = Snapshot(
            snapshot_hash=serving_p2p.snapshot_header.snapshot_hash,
            utxo_subset_index=0,
            utxo_subsets=syncing_p2p.snapshot_data,
        )
        serving_p2p.send_message(msg_snapshot(snapshot))
        wait_until(lambda: syncing_node.getblockcount() == 11, timeout=10)
        assert_equal(serving_node.gettxoutsetinfo(),
                     syncing_node.gettxoutsetinfo())

        self.log.info('Snapshot was sent successfully')

        # clean up test
        serving_node.disconnect_p2ps()
        syncing_node.disconnect_p2ps()
        self.stop_node(serving_node.index)
        self.stop_node(syncing_node.index)
        self.log.info('test_p2p_schema passed')
Esempio n. 23
0
    def run_test(self):
        def wait_for_deleted_snapshot(node, height):
            def check():
                block_hash = node.getblockhash(height)
                res = node.getblocksnapshot(block_hash)
                keys = sorted(res.keys())
                expected_keys = [
                    'block_hash',
                    'snapshot_deleted',
                    'snapshot_hash',
                    'valid',
                ]
                if keys != expected_keys:
                    return False
                return all([
                    res['valid'] == False,
                    res['snapshot_deleted'] == True,
                    res['block_hash'] == block_hash,
                    len(res['snapshot_hash']) == 64,
                ])

            wait_until(check)

        def wait_for_valid_finalized(node, height):
            def check():
                block_hash = node.getblockhash(height)
                res = node.getblocksnapshot(block_hash)
                keys = sorted(res.keys())
                expected_keys = [
                    'block_hash',
                    'block_height',
                    'chain_work',
                    'snapshot_finalized',
                    'snapshot_hash',
                    'stake_modifier',
                    'total_outputs',
                    'total_utxo_subsets',
                    'valid',
                ]
                if keys != expected_keys:
                    return False
                return all([
                    res['valid'] is True,
                    res['block_hash'] == block_hash,
                    res['block_height'] == height,
                    res['snapshot_finalized'] is True,
                    len(res['snapshot_hash']) == 64,
                    len(res['stake_modifier']) == 64,
                    len(res['chain_work']) == 64,
                ])

            wait_until(check)

        def wait_for_valid_non_finalized(node, height):
            def check():
                block_hash = node.getblockhash(height)
                res = node.getblocksnapshot(block_hash)
                keys = sorted(res.keys())
                expected_keys = [
                    'block_hash',
                    'block_height',
                    'chain_work',
                    'snapshot_finalized',
                    'snapshot_hash',
                    'stake_modifier',
                    'total_outputs',
                    'total_utxo_subsets',
                    'valid',
                ]
                if keys != expected_keys:
                    return False
                return all([
                    res['valid'] is True,
                    res['block_hash'] == block_hash,
                    res['block_height'] == height,
                    res['snapshot_finalized'] is False,
                    len(res['snapshot_hash']) == 64,
                    len(res['stake_modifier']) == 64,
                    len(res['chain_work']) == 64,
                ])

            wait_until(check)

        # generate two forks that are available for node0
        # 0 ... 7 ... 10 ... 29 node2
        #       \
        #         ... 8 node1
        node0 = self.nodes[0]  # test node that switches between forks
        node1 = self.nodes[1]  # shorter chain
        node2 = self.nodes[2]  # longer chain

        self.setup_stake_coins(node0, node1, node2)

        generate_block(node0, count=7)
        connect_nodes(node1, node0.index)
        connect_nodes(node2, node0.index)
        sync_blocks([node0, node1])
        sync_blocks([node0, node2])

        disconnect_nodes(node1, node0.index)
        disconnect_nodes(node2, node0.index)

        # generated shorter fork
        forked_block_hash = generate_block(node1)[0]
        connect_nodes(node0, node1.index)
        sync_blocks([node0, node1])
        disconnect_nodes(node0, node1.index)
        assert_equal(node0.getblockcount(), 8)
        assert_equal(node0.getblockhash(node0.getblockcount()),
                     forked_block_hash)

        # generate longer fork
        generate_block(node2, count=22)
        connect_nodes(node0, node2.index)
        sync_blocks([node0, node2])
        disconnect_nodes(node0, node2.index)

        # make sure the node generated snapshots up to expected height
        wait_until(
            lambda: 'valid' in node0.getblocksnapshot(node0.getblockhash(28)),
            timeout=10)

        wait_for_deleted_snapshot(node0, height=4)  # actually deleted
        wait_for_deleted_snapshot(node0, height=5)  # wasn't created
        wait_for_valid_finalized(node0, height=9)
        wait_for_valid_non_finalized(node0, height=29)

        res = node0.getblocksnapshot(forked_block_hash)
        assert_equal(res['error'], "can't retrieve snapshot hash of the fork")
Esempio n. 24
0
    def test_fast_sync(self):
        """
        This test covers the following scenarios:
        1. node can discover the peer that has snapshot
        2. sync using snapshot
        3. after the sync, the node can accept/propose new blocks
        4. the node can switch to the fork which is created right after the snapshot
        """
        def restart_node(node):
            self.stop_node(node.index)
            self.start_node(node.index)

        def has_finalized_snapshot(node, height):
            res = node.getblocksnapshot(node.getblockhash(height))
            if 'valid' not in res:
                return False
            if 'snapshot_finalized' not in res:
                return False
            return True

        def has_snapshot(node, height):
            res = node.getblocksnapshot(node.getblockhash(height))
            if 'valid' not in res:
                return False
            return res['valid']

        blank_node = self.nodes[0]
        full_node = self.nodes[1]
        isd_node = self.nodes[2]
        rework_node = self.nodes[3]

        self.setup_stake_coins(full_node, rework_node)

        # generate 2 epochs to create the snapshot which will be finalized
        # one block after
        #               s0      s1
        # G------------(h=4)...(h=9)-(h=10) full_node
        # | isd_node
        # | rework_node
        # | blank_node
        generate_block(full_node, count=5 + 5)
        assert_equal(full_node.getblockcount(), 10)
        wait_until(lambda: has_snapshot(full_node, 4), timeout=3)
        wait_until(lambda: has_snapshot(full_node, 9), timeout=3)

        # generate the longest fork that will be used later
        #               s0      s1
        # G------------(h=4)...(h=9)-(h=10) full_node
        # | isd_node                   \
        # | blank_node                  -------------------(h=15) rework_node
        connect_nodes(rework_node, full_node.index)
        sync_blocks([rework_node, full_node])
        disconnect_nodes(rework_node, full_node.index)
        generate_block(rework_node, count=5)
        assert_equal(rework_node.getblockcount(), 15)

        # generate 1 more block creates new epoch and instantly finalizes the previous one
        # to make the first snapshot be part of finalized epoch
        #               s0      s1
        # G------------(h=4)...(h=9)-(h=10)-(h=11) full_node, blank_node
        # | isd_node                   \
        #                               -------------------(h=15) rework_node
        generate_block(full_node)
        assert_equal(full_node.getblockcount(), 11)
        wait_until(lambda: has_finalized_snapshot(full_node, height=4),
                   timeout=5)
        wait_until(lambda: has_finalized_snapshot(full_node, height=9),
                   timeout=5)
        assert_equal(len(full_node.listsnapshots()), 2)
        connect_nodes(blank_node, full_node.index)
        sync_blocks([blank_node, full_node])
        assert_equal(len(blank_node.listsnapshots()), 0)

        # sync isd_node with blank_node and full_node using ISD
        #               s0      s1
        # G------------(h=4)...(h=9)-(h=10)-(h=11) full_node, blank_node, isd_node
        #                              \
        #                               -------------------(h=15) rework_node
        connect_nodes(isd_node, blank_node.index)
        connect_nodes(isd_node, full_node.index)
        sync_blocks([full_node, isd_node])
        wait_until(lambda: has_snapshot(isd_node, height=9), timeout=5)
        assert_equal(len(isd_node.listsnapshots()), 1)
        chain = isd_node.getblockchaininfo()
        assert_equal(chain['headers'], 11)
        assert_equal(chain['blocks'], 11)
        assert_equal(chain['initialblockdownload'], False)
        assert_equal(chain['initialsnapshotdownload'], False)
        assert_equal(chain['pruned'], True)
        assert_equal(chain['pruneheight'], 10)
        assert_equal(full_node.gettxoutsetinfo(), isd_node.gettxoutsetinfo())

        # test that isd_node can be restarted
        restart_node(isd_node)
        wait_until(lambda: isd_node.getblockcount() == 11, timeout=5)
        chain = isd_node.getblockchaininfo()
        assert_equal(chain['headers'], 11)
        assert_equal(chain['blocks'], 11)
        assert_equal(chain['initialblockdownload'], False)
        assert_equal(chain['initialsnapshotdownload'], False)

        # test that isd_node can create blocks
        #               s0      s1
        # G------------(h=4)...(h=9)-(h=10)-(h=11) full_node, blank_node
        #                              \     \
        #                               \     -(h=12) isd_node
        #                                -------------------(h=15) rework_node

        # Import funds for the node in pruning mode
        isd_node.importmasterkey(regtest_mnemonics[5]['mnemonics'], '', False)
        isd_node.initial_stake = regtest_mnemonics[5]['balance']

        genesis = isd_node.getblock(isd_node.getblockhash(0))
        funding_txid = genesis['tx'][0]
        genesis_tx_hex = isd_node.getrawtransaction(funding_txid)
        fund_proof = isd_node.gettxoutproof([funding_txid])
        isd_node.importprunedfunds(genesis_tx_hex, fund_proof)

        generate_block(isd_node)
        assert_equal(isd_node.getblockcount(), 12)

        # test that reorg one epoch after finalization is possible
        #               s0      s1
        # G------------(h=3)...(h=8)-(h=9)-(h=10)-(h=11) full_node, blank_node
        #                                    \
        #                                     ---------------(h=15) rework_node, isd_node
        connect_nodes(isd_node, rework_node.index)
        sync_blocks([isd_node, rework_node])
        assert_equal(isd_node.getblockcount(), 15)
        self.log.info('Test fast sync passed')
Esempio n. 25
0
    def run_test(self):
        proposer = self.nodes[0]
        finalizer1 = self.nodes[1]
        finalizer2 = self.nodes[2]

        self.setup_stake_coins(*self.nodes)
        assert_equal(finalizer1.getbalance(), Decimal('10000'))

        # Leave IBD
        generate_block(proposer)
        sync_blocks([proposer, finalizer1, finalizer2], timeout=10)

        finalizer1_address = finalizer1.getnewaddress('', 'legacy')

        # create deposits
        # F
        # e0 - e1
        #      d1
        #      d2
        d1 = finalizer1.deposit(finalizer1_address, 1500)
        d2 = finalizer2.deposit(finalizer2.getnewaddress('', 'legacy'), 1500)
        self.wait_for_transaction(d1, timeout=10)
        self.wait_for_transaction(d2, timeout=10)
        generate_block(proposer)
        sync_blocks([proposer, finalizer1, finalizer2], timeout=10)
        disconnect_nodes(finalizer1, proposer.index)
        disconnect_nodes(finalizer2, proposer.index)
        assert_equal(proposer.getblockcount(), 2)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 0,
                'currentEpoch': 1,
                'lastJustifiedEpoch': 0,
                'lastFinalizedEpoch': 0,
                'validators': 0
            })
        self.log.info('deposits are created')

        # Generate enough blocks to activate deposits
        # F    F    F    F
        # e0 - e1 - e2 - e3 - e4[16]
        #      d1
        #      d2
        generate_block(proposer, count=3 + 5 + 5)
        assert_equal(proposer.getblockcount(), 15)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 1,
                'currentEpoch': 3,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 0
            })

        generate_block(proposer)
        assert_equal(proposer.getblockcount(), 16)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 2,
                'currentEpoch': 4,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 2
            })
        self.log.info('finalizers are created')

        # Logout finalizer1
        # F    F    F    F
        # e0 - e1 - e2 - e3 - e4[16, 17, 18, 19, 20]
        #      d1                        l1
        #      d2
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)

        # TODO UNIT-E: logout tx can't be created if its vote is not in the block
        # we should check that input of logout tx is in the mempool too
        generate_block(proposer)

        connect_nodes(finalizer1, proposer.index)
        sync_blocks([finalizer1, proposer], timeout=10)
        l1 = finalizer1.logout()
        wait_until(lambda: l1 in proposer.getrawmempool(), timeout=10)
        disconnect_nodes(finalizer1, proposer.index)

        generate_block(proposer, count=3)
        assert_equal(proposer.getblockcount(), 20)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 2,
                'currentEpoch': 4,
                'lastJustifiedEpoch': 3,
                'lastFinalizedEpoch': 3,
                'validators': 2
            })
        self.log.info('finalizer1 logged out in dynasty=2')

        # During LOGOUT_DYNASTY_DELAY both finalizers can vote.
        # Since the finalization happens at every epoch,
        # number of dynasties is equal to number of epochs.
        for _ in range(LOGOUT_DYNASTY_DELAY):
            generate_block(proposer)
            self.wait_for_vote_and_disconnect(finalizer=finalizer1,
                                              node=proposer)
            self.wait_for_vote_and_disconnect(finalizer=finalizer2,
                                              node=proposer)
            generate_block(proposer, count=4)
            assert_raises_rpc_error(
                -25, "Logout delay hasn't passed yet. Can't withdraw.",
                finalizer1.withdraw, finalizer1_address)

        assert_equal(proposer.getblockcount(), 35)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 5,
                'currentEpoch': 7,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 6,
                'validators': 2
            })

        self.log.info('finalizer1 voted during logout delay successfully')

        # During WITHDRAW_DELAY finalizer1 can't vote and can't withdraw
        generate_block(proposer)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 6,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 6,
                'validators': 1
            })
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        generate_block(proposer)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 6,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 7,
                'lastFinalizedEpoch': 7,
                'validators': 1
            })

        # finalizer1 can't vote so we keep it connected
        connect_nodes(finalizer1, proposer.index)
        time.sleep(2)  # ensure no votes from finalizer1
        assert_equal(len(proposer.getrawmempool()), 0)

        generate_block(proposer, count=3)
        assert_equal(proposer.getblockcount(), 40)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 6,
                'currentEpoch': 8,
                'lastJustifiedEpoch': 7,
                'lastFinalizedEpoch': 7,
                'validators': 1
            })
        assert_equal(finalizer1.getvalidatorinfo()['validator_status'],
                     'WAITING_FOR_WITHDRAW_DELAY')
        assert_raises_rpc_error(
            -25, "Withdraw delay hasn't passed yet. Can't withdraw.",
            finalizer1.withdraw, finalizer1_address)

        # WITHDRAW_DELAY - 1 is because we checked the first loop manually
        for _ in range(WITHDRAW_EPOCH_DELAY - 1):
            generate_block(proposer)
            self.wait_for_vote_and_disconnect(finalizer=finalizer2,
                                              node=proposer)
            generate_block(proposer, count=4)

        assert_equal(proposer.getblockcount(), 95)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 17,
                'currentEpoch': 19,
                'lastJustifiedEpoch': 18,
                'lastFinalizedEpoch': 18,
                'validators': 1
            })

        # last block that finalizer1 can't withdraw
        # TODO UNIT-E: allow to create a withdraw tx on checkpoint
        # as it will be added to the block on the next epoch only.
        # We have an known issue https://github.com/dtr-org/unit-e/issues/643
        # that finalizer can't vote after checkpoint is processed, it looks that
        # finalizer can't create any finalizer commits at this point (and only at this point).
        assert_equal(finalizer1.getvalidatorinfo()['validator_status'],
                     'WAITING_FOR_WITHDRAW_DELAY')
        assert_raises_rpc_error(
            -25, "Withdraw delay hasn't passed yet. Can't withdraw.",
            finalizer1.withdraw, finalizer1_address)

        self.log.info(
            'finalizer1 could not withdraw during WITHDRAW_DELAY period')

        # test that deposit can be withdrawn
        # e0 - e1 - ... - e4 - ... - e20[95, 96, 97]
        #      d1         l1                     w1
        #      d2
        generate_block(proposer)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        assert_equal(proposer.getblockcount(), 96)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 18,
                'currentEpoch': 20,
                'lastJustifiedEpoch': 18,
                'lastFinalizedEpoch': 18,
                'validators': 1
            })
        sync_blocks([proposer, finalizer1], timeout=10)
        assert_equal(finalizer1.getvalidatorinfo()['validator_status'],
                     'WAITING_TO_WITHDRAW')
        assert_equal(finalizer1.getbalance(), Decimal('9999.99993840'))
        w1 = finalizer1.withdraw(finalizer1_address)
        wait_until(lambda: w1 in proposer.getrawmempool(), timeout=10)
        generate_block(proposer)
        sync_blocks([proposer, finalizer1])
        assert_equal(finalizer1.getvalidatorinfo()['validator_status'],
                     'NOT_VALIDATING')
        assert_equal(finalizer1.getbalance(), Decimal('9999.99992140'))

        self.log.info('finalizer1 was able to withdraw deposit at dynasty=18')

        # test that withdraw commit can be spent
        # test that deposit can be withdrawn
        # e0 - e1 - ... - e4 - ... - e20[95, 96, 97, 98]
        #      d1         l1                     w1  spent_w1
        #      d2
        spent_w1_raw = finalizer1.createrawtransaction(
            [{
                'txid': w1,
                'vout': 0
            }], {finalizer1_address: Decimal('1499.999')})
        spent_w1_signed = finalizer1.signrawtransactionwithwallet(spent_w1_raw)
        spent_w1 = finalizer1.sendrawtransaction(spent_w1_signed['hex'])
        self.wait_for_transaction(spent_w1, nodes=[proposer])

        # mine block
        block_hash = generate_block(proposer)[0]
        assert spent_w1 in proposer.getblock(block_hash)['tx']

        self.log.info('finalizer1 was able to spend withdraw commit')

        # Test that after withdraw the node can deposit again
        sync_blocks([proposer, finalizer1], timeout=10)
        assert_equal(proposer.getblockcount(), 98)
        assert_equal(finalizer1.getvalidatorinfo()['validator_status'],
                     'NOT_VALIDATING')
        deposit = finalizer1.deposit(finalizer1.getnewaddress('', 'legacy'),
                                     1500)
        assert_equal(finalizer1.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_CONFIRMATION')

        self.wait_for_transaction(deposit,
                                  timeout=10,
                                  nodes=[proposer, finalizer1])
        proposer.generate(1)
        sync_blocks([proposer, finalizer1], timeout=10)
        assert_equal(proposer.getblockcount(), 99)

        assert_equal(finalizer1.getvalidatorinfo()['validator_status'],
                     'WAITING_DEPOSIT_FINALIZATION')

        self.log.info('finalizer1 deposits again')

        # Test that finalizer is voting after depositing again
        disconnect_nodes(finalizer1, proposer.index)

        proposer.generate(2)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        assert_equal(proposer.getblockcount(), 101)

        proposer.generate(5)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=proposer)
        assert_equal(proposer.getblockcount(), 106)
        assert_finalizationstate(
            proposer, {
                'currentDynasty': 20,
                'currentEpoch': 22,
                'lastJustifiedEpoch': 20,
                'lastFinalizedEpoch': 20,
                'validators': 2
            })

        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=proposer)
        self.log.info('finalizer1 votes again')
Esempio n. 26
0
    def test_heaviest_justified_epoch(self):
        """
        Test that heaviest justified epoch wins
        """
        fork1 = self.nodes[4]
        fork2 = self.nodes[5]
        fork3 = self.nodes[6]
        finalizer = self.nodes[7]

        self.setup_stake_coins(fork1, fork2, fork3, finalizer)

        connect_nodes(fork1, fork2.index)
        connect_nodes(fork1, fork3.index)
        connect_nodes(fork1, finalizer.index)

        # leave IBD
        generate_block(fork1)
        sync_blocks([fork1, fork2, finalizer], timeout=10)

        # add deposit
        payto = finalizer.getnewaddress('', 'legacy')
        txid = finalizer.deposit(payto, 1500)
        wait_until(lambda: self.have_tx_in_mempool([fork1, fork2], txid), timeout=10)
        generate_block(fork1)
        sync_blocks([fork1, fork2, finalizer], timeout=10)
        disconnect_nodes(fork1, finalizer.index)

        # leave instant justification
        # F    F    F
        # e0 - e1 - e2 - e3 - e4[16]
        generate_block(fork1, count=3 + 5 + 5 + 1)
        assert_equal(fork1.getblockcount(), 16)
        assert_finalizationstate(fork1, {'currentDynasty': 2,
                                         'currentEpoch': 4,
                                         'lastJustifiedEpoch': 2,
                                         'lastFinalizedEpoch': 2,
                                         'validators': 1})

        # finalize epoch=3
        # F
        # e3 - e4 fork1, fork2, fork3
        self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork1)
        generate_block(fork1, count=4)
        assert_equal(fork1.getblockcount(), 20)
        assert_finalizationstate(fork1, {'currentDynasty': 2,
                                         'currentEpoch': 4,
                                         'lastJustifiedEpoch': 3,
                                         'lastFinalizedEpoch': 3})

        # create two forks at epoch=4 that use the same votes to justify epoch=3
        #             fork3
        # F     F     |
        # e3 - e4[.., 20] - e5[21, 22] fork1
        #                       \
        #                        - 22, 23] fork2
        sync_blocks([fork1, fork3], timeout=10)
        disconnect_nodes(fork1, fork3.index)
        generate_block(fork1)
        sync_blocks([fork1, fork2], timeout=10)

        self.wait_for_vote_and_disconnect(finalizer=finalizer, node=fork1)
        for fork in [fork1, fork2]:
            wait_until(lambda: len(fork.getrawmempool()) == 1, timeout=10)
            assert_equal(fork.getblockcount(), 21)
            assert_finalizationstate(fork, {'currentDynasty': 3,
                                            'currentEpoch': 5,
                                            'lastJustifiedEpoch': 3,
                                            'lastFinalizedEpoch': 3})

        disconnect_nodes(fork1, fork2.index)
        vote = fork1.getrawtransaction(fork1.getrawmempool()[0])

        for fork in [fork1, fork2]:
            generate_block(fork)
            assert_equal(fork.getblockcount(), 22)
            assert_finalizationstate(fork, {'currentDynasty': 3,
                                            'currentEpoch': 5,
                                            'lastJustifiedEpoch': 4,
                                            'lastFinalizedEpoch': 4})

        b23 = generate_block(fork2)[0]

        # test that fork1 switches to the heaviest fork
        #             fork3
        # F     F     |
        # e3 - e4[.., 20] - e5[21, 22]
        #                       \      v
        #                        - 22, 23] fork2, fork1
        connect_nodes(fork1, fork2.index)
        fork1.waitforblock(b23)

        assert_equal(fork1.getblockcount(), 23)
        assert_equal(fork1.getblockhash(23), b23)
        assert_finalizationstate(fork1, {'currentDynasty': 3,
                                         'currentEpoch': 5,
                                         'lastJustifiedEpoch': 4,
                                         'lastFinalizedEpoch': 4})

        disconnect_nodes(fork1, fork2.index)

        # test that fork1 switches to the heaviest fork
        #                                      v
        #                 - e5[21, 22, 23, 24, 25] fork3, fork1
        # F     F       /
        # e3 - e4[.., 20] - e5[21, 22]
        #                       \      v
        #                        - 22, 23] fork2
        assert_equal(fork3.getblockcount(), 20)
        generate_block(fork3, count=4)
        fork3.sendrawtransaction(vote)
        wait_until(lambda: len(fork3.getrawmempool()) == 1, timeout=10)
        b25 = generate_block(fork3)[0]
        assert_equal(fork3.getblockcount(), 25)

        connect_nodes(fork1, fork3.index)
        fork1.waitforblock(b25)

        assert_equal(fork1.getblockcount(), 25)
        assert_equal(fork1.getblockhash(25), b25)
        assert_finalizationstate(fork1, {'currentDynasty': 3,
                                         'currentEpoch': 5,
                                         'lastJustifiedEpoch': 4,
                                         'lastFinalizedEpoch': 4})

        self.stop_node(fork1.index)
        self.stop_node(fork2.index)
        self.stop_node(fork3.index)
        self.stop_node(finalizer.index)
Esempio n. 27
0
    def run_test(self):
        self.setup_stake_coins(*self.nodes)
        assert all(n.getbalance() == 10000 for n in self.nodes)

        # create topology where arrows denote non-persistent connection
        # finalizer1 → node0 ← finalizer2
        #                ↑
        #            finalizer3
        node0 = self.nodes[0]
        finalizer1 = self.nodes[1]
        finalizer2 = self.nodes[2]
        finalizer3 = self.nodes[3]

        connect_nodes(finalizer1, node0.index)
        connect_nodes(finalizer2, node0.index)
        connect_nodes(finalizer3, node0.index)

        # leave IBD
        generate_block(node0)
        sync_blocks(self.nodes)

        # leave instant finalization
        address1 = self.nodes[1].getnewaddress("", "legacy")
        address2 = self.nodes[2].getnewaddress("", "legacy")
        address3 = self.nodes[3].getnewaddress("", "legacy")

        deptx1 = self.nodes[1].deposit(address1, 1500)
        deptx2 = self.nodes[2].deposit(address2, 2000)
        deptx3 = self.nodes[3].deposit(address3, 1500)

        self.wait_for_transaction(deptx1, timeout=10)
        self.wait_for_transaction(deptx2, timeout=10)
        self.wait_for_transaction(deptx3, timeout=10)

        disconnect_nodes(finalizer1, node0.index)
        disconnect_nodes(finalizer2, node0.index)
        disconnect_nodes(finalizer3, node0.index)
        assert_equal(len(node0.getpeerinfo()), 0)

        # move tip to the height when finalizers are activated
        # complete epoch + 2 epochs + 1 block of new epoch
        generate_block(node0, count=4 + 5 + 5 + 1)
        assert_equal(node0.getblockcount(), 16)
        assert_finalizationstate(
            node0, {
                'currentDynasty': 2,
                'currentEpoch': 4,
                'lastJustifiedEpoch': 2,
                'lastFinalizedEpoch': 2,
                'validators': 3
            })

        # test that finalizers vote after processing 1st block of new epoch
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer3, node=node0)
        assert_equal(len(node0.getrawmempool()), 3)

        generate_block(node0, count=4)
        assert_equal(node0.getblockcount(), 20)
        assert_finalizationstate(
            node0, {
                'currentDynasty': 2,
                'currentEpoch': 4,
                'lastJustifiedEpoch': 3,
                'lastFinalizedEpoch': 3,
                'validators': 3
            })
        self.log.info('Finalizers voted after first block of new epoch')

        # test that finalizers can vote on a configured epoch block number
        self.restart_node(
            finalizer1.index,
            ['-validating=1', '-finalizervotefromepochblocknumber=1'])
        self.restart_node(
            finalizer2.index,
            ['-validating=1', '-finalizervotefromepochblocknumber=2'])
        self.restart_node(
            finalizer3.index,
            ['-validating=1', '-finalizervotefromepochblocknumber=3'])

        generate_block(node0)
        assert_equal(node0.getblockcount(), 21)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node0)
        connect_nodes(finalizer2, node0.index)
        connect_nodes(finalizer3, node0.index)
        sync_blocks([finalizer2, finalizer3, node0], timeout=10)
        assert_equal(len(node0.getrawmempool()),
                     1)  # no votes from finalizer2 and finalizer3
        disconnect_nodes(finalizer2, node0.index)
        disconnect_nodes(finalizer3, node0.index)

        generate_block(node0)
        assert_equal(node0.getblockcount(), 22)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node0)
        connect_nodes(finalizer3, node0.index)
        sync_blocks([finalizer3, node0], timeout=10)
        assert_equal(len(node0.getrawmempool()), 1)  # no votes from finalizer3
        disconnect_nodes(finalizer3, node0.index)

        generate_block(node0)
        assert_equal(node0.getblockcount(), 23)
        self.wait_for_vote_and_disconnect(finalizer=finalizer3, node=node0)
        generate_block(node0, count=2)
        assert_equal(node0.getblockcount(), 25)
        assert_finalizationstate(
            node0, {
                'currentDynasty': 3,
                'currentEpoch': 5,
                'lastJustifiedEpoch': 4,
                'lastFinalizedEpoch': 4,
                'validators': 3
            })
        self.log.info('Finalizers voted on a configured block number')

        # test that finalizers can vote after configured epoch block number
        generate_block(node0, count=4)
        assert_equal(node0.getblockcount(), 29)
        prev_tx = self.wait_for_vote_and_disconnect(finalizer=finalizer1,
                                                    node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer3, node=node0)
        generate_block(node0)
        assert_equal(node0.getblockcount(), 30)
        assert_finalizationstate(
            node0, {
                'currentDynasty': 4,
                'currentEpoch': 6,
                'lastJustifiedEpoch': 5,
                'lastFinalizedEpoch': 5,
                'validators': 3
            })
        self.log.info('Finalizers voted after configured block number')

        generate_block(node0, count=4)
        prev_tx = finalizer1.decoderawtransaction(prev_tx)['txid']

        # check that make_vote_tx works as expected
        tx = make_vote_tx(finalizer1, address1, node0.getblockhash(30), 5, 6,
                          prev_tx)
        node0.sendrawtransaction(tx)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node0)
        self.wait_for_vote_and_disconnect(finalizer=finalizer3, node=node0)
        generate_block(node0)
        assert_equal(node0.getblockcount(), 35)
        assert_finalizationstate(
            node0, {
                'currentDynasty': 5,
                'currentEpoch': 7,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 6,
                'validators': 3
            })
        self.log.info('make_vote_tx works together with real finalizers')

        # test that node recognizes old and invalid votes.
        tx = make_vote_tx(finalizer1, address1, node0.getblockhash(30), 1, 2,
                          prev_tx)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                node0.sendrawtransaction, tx)
        tx = make_vote_tx(finalizer1, address1, node0.getblockhash(30), 2, 3,
                          prev_tx)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                node0.sendrawtransaction, tx)
        tx = make_vote_tx(finalizer1, address1, node0.getblockhash(30), 7, 9,
                          prev_tx)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                node0.sendrawtransaction, tx)
        tx = make_vote_tx(finalizer1, address1, node0.getblockhash(30), 7, 6,
                          prev_tx)
        assert_raises_rpc_error(-26, 'bad-vote-invalid',
                                node0.sendrawtransaction, tx)
        self.log.info('Tested outdated and invalid vote votes')
Esempio n. 28
0
    def test_getfinalizationstate(self):
        def create_deposit(finalizer, node):
            connect_nodes(finalizer, node.index)
            payto = finalizer.getnewaddress('', 'legacy')
            txid = finalizer.deposit(payto, 1500)
            wait_until(lambda: txid in node.getrawmempool())
            disconnect_nodes(finalizer, node.index)

        node = self.nodes[0]
        finalizer1 = self.nodes[1]
        finalizer2 = self.nodes[2]

        self.setup_stake_coins(node, finalizer1, finalizer2)

        # initial setup
        # F
        # e0
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 0)
        assert_equal(state['lastJustifiedEpoch'], 0)
        assert_equal(state['lastFinalizedEpoch'], 0)
        assert_equal(state['validators'], 0)

        # start epoch=1
        # F
        # e0 - e1[1]
        connect_nodes(node, finalizer1.index)
        connect_nodes(node, finalizer2.index)
        generate_block(node)
        sync_blocks([node, finalizer1, finalizer2])
        disconnect_nodes(node, finalizer1.index)
        disconnect_nodes(node, finalizer2.index)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 1)
        assert_equal(state['lastJustifiedEpoch'], 0)
        assert_equal(state['lastFinalizedEpoch'], 0)
        assert_equal(state['validators'], 0)

        self.log.info('initial finalization state is correct')

        # add finalizer1
        create_deposit(finalizer1, node)

        # test instant justification 1
        # F
        # e0 - e1
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 5)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 1)
        assert_equal(state['lastJustifiedEpoch'], 0)
        assert_equal(state['lastFinalizedEpoch'], 0)
        assert_equal(state['validators'], 0)
        self.log.info('finalization state includes new validators')

        # test instant justification 2
        # F    J
        # e0 - e1 - e2
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 10)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 2)
        assert_equal(state['lastJustifiedEpoch'], 1)
        assert_equal(state['lastFinalizedEpoch'], 0)
        assert_equal(state['validators'], 0)
        self.log.info('instant finalization 1 is correct')

        # test instant justification 3
        # F    F    J
        # e0 - e1 - e2 - e3
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 15)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 0)
        assert_equal(state['currentDynastyStartsAtEpoch'], 1)
        assert_equal(state['currentEpoch'], 3)
        assert_equal(state['lastJustifiedEpoch'], 2)
        assert_equal(state['lastFinalizedEpoch'], 1)
        assert_equal(state['validators'], 0)
        self.log.info('instant finalization 2 is correct')

        # test instant justification 4
        # F    F    F    J
        # e0 - e1 - e2 - e3 - e4
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 20)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 1)
        assert_equal(state['currentDynastyStartsAtEpoch'], 4)
        assert_equal(state['currentEpoch'], 4)
        assert_equal(state['lastJustifiedEpoch'], 3)
        assert_equal(state['lastFinalizedEpoch'], 2)
        assert_equal(state['validators'], 0)
        self.log.info('instant finalization 3 is correct')

        # test instant justification 5 (must be last one)
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 25)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 2)
        assert_equal(state['currentDynastyStartsAtEpoch'], 5)
        assert_equal(state['currentEpoch'], 5)
        assert_equal(state['lastJustifiedEpoch'], 4)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 0)

        # no justification
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6
        generate_block(node)
        assert_equal(node.getblockcount(), 26)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 6)
        assert_equal(state['lastJustifiedEpoch'], 4)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)

        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 30)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 6)
        assert_equal(state['lastJustifiedEpoch'], 4)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)

        # no justification
        # F    F    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7[31]
        generate_block(node)
        assert_equal(node.getblockcount(), 31)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 7)
        assert_equal(state['lastJustifiedEpoch'], 4)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)
        self.log.info('finalization state without justification is correct')

        # create first justification
        # F    F    F    F    J         J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7[31, 32]
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node)

        assert_equal(node.getblockcount(), 32)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 7)
        assert_equal(state['lastJustifiedEpoch'], 6)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)
        self.log.info('finalization state after justification is correct')

        # skip 1 justification
        # F    F    F    F    J         J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9[41]
        generate_block(node, count=9)
        assert_equal(node.getblockcount(), 41)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 9)
        assert_equal(state['lastJustifiedEpoch'], 6)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)
        self.log.info('finalization state without justification is correct')

        # create finalization
        # F    F    F    J              J         J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9[41, 42]
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node)
        assert_equal(node.getblockcount(), 42)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 9)
        assert_equal(state['lastJustifiedEpoch'], 8)
        assert_equal(state['lastFinalizedEpoch'], 3)
        assert_equal(state['validators'], 1)

        # F    F    F    F    J         J         F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10[46, 47]
        generate_block(node, count=4)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node)
        assert_equal(node.getblockcount(), 47)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 10)
        assert_equal(state['lastJustifiedEpoch'], 9)
        assert_equal(state['lastFinalizedEpoch'], 8)
        assert_equal(state['validators'], 1)

        # F    F    F    F    J         J         F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10
        generate_block(node, count=3)
        assert_equal(node.getblockcount(), 50)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 3)
        assert_equal(state['currentDynastyStartsAtEpoch'], 6)
        assert_equal(state['currentEpoch'], 10)
        assert_equal(state['lastJustifiedEpoch'], 9)
        assert_equal(state['lastFinalizedEpoch'], 8)
        assert_equal(state['validators'], 1)

        self.log.info('finalization state after finalization is correct')

        # F    F    F    F    J              J    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11[51]
        generate_block(node)
        assert_equal(node.getblockcount(), 51)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 4)
        assert_equal(state['currentDynastyStartsAtEpoch'], 11)
        assert_equal(state['currentEpoch'], 11)
        assert_equal(state['lastJustifiedEpoch'], 9)
        assert_equal(state['lastFinalizedEpoch'], 8)
        assert_equal(state['validators'], 1)
        self.log.info('dynasty after finalization is updated correctly')

        # add finalizer2 deposit at dynasty=5. will vote at dynasty=8
        create_deposit(finalizer2, node)

        # F    F    F    F    J              J    F    F    J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 55)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 4)
        assert_equal(state['currentDynastyStartsAtEpoch'], 11)
        assert_equal(state['currentEpoch'], 11)
        assert_equal(state['lastJustifiedEpoch'], 10)
        assert_equal(state['lastFinalizedEpoch'], 9)
        assert_equal(state['validators'], 1)

        # F    F    F    F    J              J    F    F    F     J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11 - e12
        generate_block(node)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 60)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 5)
        assert_equal(state['currentDynastyStartsAtEpoch'], 12)
        assert_equal(state['currentEpoch'], 12)
        assert_equal(state['lastJustifiedEpoch'], 11)
        assert_equal(state['lastFinalizedEpoch'], 10)
        assert_equal(state['validators'], 1)

        # F    F    F    J                   J    F    F    F     F     J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11 - e12 - e13
        generate_block(node)
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 65)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 6)
        assert_equal(state['currentDynastyStartsAtEpoch'], 13)
        assert_equal(state['currentEpoch'], 13)
        assert_equal(state['lastJustifiedEpoch'], 12)
        assert_equal(state['lastFinalizedEpoch'], 11)
        assert_equal(state['validators'], 1)

        # F    F    F    F    J              J    F    F    F     F     J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11 - e12 - e13 - e14[66]
        generate_block(node)
        assert_equal(node.getblockcount(), 66)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 7)
        assert_equal(state['currentDynastyStartsAtEpoch'], 14)
        assert_equal(state['currentEpoch'], 14)
        assert_equal(state['lastJustifiedEpoch'], 12)
        assert_equal(state['lastFinalizedEpoch'], 11)
        assert_equal(state['validators'], 2)
        self.log.info('new deposit was activated correctly')

        # F    F    F    F    J              J    F    F    F     F     F     J
        # e0 - e1 - e2 - e3 - e4 - e5 - e6 - e7 - e8 - e9 - e10 - e11 - e12 - e13 - e14
        self.wait_for_vote_and_disconnect(finalizer=finalizer1, node=node)
        self.wait_for_vote_and_disconnect(finalizer=finalizer2, node=node)
        generate_block(node, count=4)
        assert_equal(node.getblockcount(), 70)
        state = node.getfinalizationstate()
        assert_equal(state['currentDynasty'], 7)
        assert_equal(state['currentDynastyStartsAtEpoch'], 14)
        assert_equal(state['currentEpoch'], 14)
        assert_equal(state['lastJustifiedEpoch'], 13)
        assert_equal(state['lastFinalizedEpoch'], 12)
        assert_equal(state['validators'], 2)
        self.log.info('new finalizer votes')
Esempio n. 29
0
    def run_test(self):
        p0, p1, p2, v0 = self.nodes

        self.setup_stake_coins(p0, p1, p2, v0)

        # Leave IBD
        self.generate_sync(p0)

        self.log.info("Setup deposit")
        setup_deposit(self, p0, [v0])
        sync_blocks([p0, p1, p2, v0])

        self.log.info("Setup test prerequisites")
        # get to up to block 49, just one before the new checkpoint
        generate_block(p0, count=18)

        assert_equal(p0.getblockcount(), 49)
        sync_blocks([p0, p1, p2, v0])

        assert_finalizationstate(p0, {
            'currentEpoch': 5,
            'lastJustifiedEpoch': 4,
            'lastFinalizedEpoch': 3
        })

        # disconnect p0
        # v0: p1, p2
        # p0:
        # p1: v0
        # p2: v0
        disconnect_nodes(p0, v0.index)
        disconnect_nodes(p0, p1.index)

        # disconnect p2
        # v0: p1
        # p0:
        # p1: v0
        # p2:
        disconnect_nodes(p2, v0.index)

        # disconnect p1
        # v0:
        # p0:
        # p1:
        # p2:
        disconnect_nodes(p1, v0.index)

        # generate long chain in p0 but don't justify it
        #  F     J
        # 30 .. 40 .. 89    -- p0
        generate_block(p0, count=40)

        assert_equal(p0.getblockcount(), 89)
        assert_finalizationstate(p0, {
            'currentEpoch': 9,
            'lastJustifiedEpoch': 4,
            'lastFinalizedEpoch': 3
        })

        # generate short chain in p1 and justify it
        # on the 6th and 7th epochs sync with validator
        #  F     J
        # 30 .. 40 .. 49 .. .. .. .. .. .. 89    -- p0
        #               \
        #                50 .. 60 .. 69          -- p1
        #                 F     J
        # get to the 6th epoch
        generate_block(p1, count=2)
        self.wait_for_vote_and_disconnect(finalizer=v0, node=p1)
        # get to the 7th epoch
        generate_block(p1, count=10)
        self.wait_for_vote_and_disconnect(finalizer=v0, node=p1)
        # generate the rest of the blocks
        generate_block(p1, count=8)
        connect_nodes(p1, v0.index)
        sync_blocks([p1, v0])

        assert_equal(p1.getblockcount(), 69)
        assert_finalizationstate(p1, {
            'currentEpoch': 7,
            'lastJustifiedEpoch': 6,
            'lastFinalizedEpoch': 5
        })

        # connect p2 with p0 and p1; p2 must switch to the longest justified p1
        # v0: p1
        # p0: p2
        # p1: v0, p2
        # p2: p0, p1
        self.log.info("Test fresh node sync")
        connect_nodes(p2, p0.index)
        connect_nodes(p2, p1.index)

        sync_blocks([p1, p2])
        assert_equal(p1.getblockcount(), 69)
        assert_equal(p2.getblockcount(), 69)

        assert_finalizationstate(p1, {
            'currentEpoch': 7,
            'lastJustifiedEpoch': 6,
            'lastFinalizedEpoch': 5
        })
        assert_finalizationstate(p2, {
            'currentEpoch': 7,
            'lastJustifiedEpoch': 6,
            'lastFinalizedEpoch': 5
        })

        # connect p0 with p1, p0 must disconnect its longest but not justified fork and choose p1
        # v0: p1
        # p0: p1, p2
        # p1: v0, p0, p2
        # p2: p0, p1
        self.log.info("Test longest node reverts to justified")
        connect_nodes(p0, p1.index)
        sync_blocks([p0, p1])

        # check if p0 accepted shortest in terms of blocks but longest justified chain
        assert_equal(p0.getblockcount(), 69)
        assert_equal(p1.getblockcount(), 69)
        assert_equal(v0.getblockcount(), 69)

        # generate more blocks to make sure they're processed
        self.log.info("Test all nodes continue to work as usual")
        generate_block(p0, count=30)
        sync_blocks([p0, p1, p2, v0])
        assert_equal(p0.getblockcount(), 99)

        generate_block(p1, count=30)
        sync_blocks([p0, p1, p2, v0])
        assert_equal(p1.getblockcount(), 129)

        generate_block(p2, count=30)
        sync_blocks([p0, p1, p2, v0])
        assert_equal(p2.getblockcount(), 159)

        # disconnect all nodes
        # v0:
        # p0:
        # p1:
        # p2:
        self.log.info("Test nodes sync after reconnection")
        disconnect_nodes(v0, p1.index)
        disconnect_nodes(p0, p1.index)
        disconnect_nodes(p0, p2.index)
        disconnect_nodes(p1, p2.index)

        generate_block(p0, count=10)
        generate_block(p1, count=20)
        generate_block(p2, count=30)

        assert_equal(p0.getblockcount(), 169)
        assert_equal(p1.getblockcount(), 179)
        assert_equal(p2.getblockcount(), 189)

        # connect validator back to p1
        # v0: p1
        # p0: p1
        # p1: v0, p0, p2
        # p2: p1
        connect_nodes(p1, v0.index)
        sync_blocks([p1, v0])
        connect_nodes(p1, p0.index)
        connect_nodes(p1, p2.index)
        sync_blocks([p0, p1, p2, v0])
Esempio n. 30
0
    def run_test(self):
        def verify_snapshot_result(res):
            if 'snapshot_hash' not in res:
                return False
            if 'valid' not in res:
                return False
            return res['valid'] is True

        def has_valid_snapshot_for_height(node, height):
            res = node.getblocksnapshot(node.getblockhash(height))
            return verify_snapshot_result(res)

        validator = self.nodes[0]
        node = self.nodes[1]

        validator.importmasterkey(regtest_mnemonics[0]['mnemonics'])
        node.importmasterkey(regtest_mnemonics[1]['mnemonics'])

        generate_block(node)  # IBD

        # test 1. node generates snapshots with the expected interval
        generate_block(node, count=23)
        wait_until(lambda: len(node.listsnapshots()) == 5)
        assert has_valid_snapshot_for_height(node, 4)
        assert has_valid_snapshot_for_height(node, 9)
        assert has_valid_snapshot_for_height(node, 14)
        assert has_valid_snapshot_for_height(node, 19)
        assert has_valid_snapshot_for_height(node, 24)

        # test 2. node keeps up to 5 snapshots
        generate_block(node, count=5)
        assert_equal(node.getblockcount(), 29)
        wait_until(lambda: has_valid_snapshot_for_height(node, 29), timeout=10)
        assert_equal(len(node.listsnapshots()), 5)
        assert has_valid_snapshot_for_height(node, 4) is False
        assert has_valid_snapshot_for_height(node, 9)
        assert has_valid_snapshot_for_height(node, 14)
        assert has_valid_snapshot_for_height(node, 19)
        assert has_valid_snapshot_for_height(node, 24)

        # disable instant justification
        payto = validator.getnewaddress("", "legacy")
        txid = validator.deposit(payto, 1500)
        self.wait_for_transaction(txid, 10)
        self.stop_node(validator.index)

        generate_block(node, count=12)
        assert_equal(node.getblockcount(), 41)
        assert_finalizationstate(
            node, {
                'currentDynasty': 6,
                'currentEpoch': 9,
                'lastJustifiedEpoch': 6,
                'lastFinalizedEpoch': 6,
                'validators': 1
            })

        wait_until(lambda: has_valid_snapshot_for_height(node, 39), timeout=10)
        assert_equal(len(node.listsnapshots()), 5)
        assert has_valid_snapshot_for_height(node, 9) is False
        assert has_valid_snapshot_for_height(node, 14) is False
        assert node.getblocksnapshot(
            node.getblockhash(19))['snapshot_finalized']
        assert node.getblocksnapshot(
            node.getblockhash(24))['snapshot_finalized']
        assert node.getblocksnapshot(
            node.getblockhash(29))['snapshot_finalized']
        assert node.getblocksnapshot(
            node.getblockhash(34))['snapshot_finalized'] is False
        assert node.getblocksnapshot(
            node.getblockhash(39))['snapshot_finalized'] is False

        # test 3. node keeps at least 2 finalized snapshots
        generate_block(node, count=9)
        wait_until(lambda: has_valid_snapshot_for_height(node, 49), timeout=10)
        assert_equal(len(node.listsnapshots()), 5)
        assert has_valid_snapshot_for_height(node, 19) is False
        assert node.getblocksnapshot(
            node.getblockhash(24))['snapshot_finalized']
        assert node.getblocksnapshot(
            node.getblockhash(29))['snapshot_finalized']
        assert has_valid_snapshot_for_height(node, 34) is False
        assert node.getblocksnapshot(
            node.getblockhash(39))['snapshot_finalized'] is False
        assert node.getblocksnapshot(
            node.getblockhash(44))['snapshot_finalized'] is False
        assert node.getblocksnapshot(
            node.getblockhash(49))['snapshot_finalized'] is False

        generate_block(node, count=5)
        wait_until(lambda: has_valid_snapshot_for_height(node, 54), timeout=10)
        assert_equal(len(node.listsnapshots()), 5)
        assert node.getblocksnapshot(
            node.getblockhash(24))['snapshot_finalized']
        assert node.getblocksnapshot(
            node.getblockhash(29))['snapshot_finalized']
        assert has_valid_snapshot_for_height(node, 39) is False
        assert node.getblocksnapshot(
            node.getblockhash(44))['snapshot_finalized'] is False
        assert node.getblocksnapshot(
            node.getblockhash(49))['snapshot_finalized'] is False

        generate_block(node, count=5)
        wait_until(lambda: has_valid_snapshot_for_height(node, 59), timeout=10)
        assert_equal(len(node.listsnapshots()), 5)
        assert node.getblocksnapshot(
            node.getblockhash(24))['snapshot_finalized']
        assert node.getblocksnapshot(
            node.getblockhash(29))['snapshot_finalized']
        assert has_valid_snapshot_for_height(node, 44) is False
        assert node.getblocksnapshot(
            node.getblockhash(49))['snapshot_finalized'] is False
        assert node.getblocksnapshot(
            node.getblockhash(54))['snapshot_finalized'] is False