Esempio n. 1
0
 def run_test(self):
     client = RpcClient(self.nodes[0])
     genesis_address = "0x" + encode_hex(
         priv_to_addr(default_config['GENESIS_PRI_KEY']))
     genesis_balance = default_config["TOTAL_COIN"]
     client.generate_empty_blocks(ERA_EPOCH_COUNT * 10)
     print(client.epoch_number("latest_checkpoint"))
     assert client.epoch_number("latest_checkpoint") > 0
     # Just assert we can still get the balance
     assert_equal(client.get_balance(genesis_address, client.EPOCH_NUM(1)),
                  genesis_balance)
Esempio n. 2
0
    def run_test(self):
        num_blocks = 200
        checkpoint_epoch = 100

        # Generate checkpoint on node[0]
        client = RpcClient(self.nodes[0])
        genesis_nonce = client.get_nonce(client.GENESIS_ADDR)
        for _ in range(num_blocks):
            tx = client.new_tx(nonce=genesis_nonce)
            tx_hash = client.send_tx(tx)
            assert tx_hash == tx.hash_hex()
            genesis_nonce += 1
            client.generate_block(100)

        # Start node[1] as full node to sync checkpoint
        # Change phase from CatchUpSyncBlockHeader to CatchUpCheckpoint
        # only when there is at least one connected peer.
        self.start_node(1, ["--full"], phase_to_wait=None)
        connect_nodes(self.nodes, 1, 0)

        # FIXME full node issue that hang at phase CatchUpRecoverBlockFromDbPhase
        self.nodes[1].wait_for_phase(["NormalSyncPhase"], wait_time=30)

        sync_blocks(self.nodes, sync_count=False)

        client = RpcClient(self.nodes[1])

        # At epoch 1, block header exists while body not synchronized
        try:
            print(client.block_by_epoch(client.EPOCH_NUM(1)))
        except ReceivedErrorResponseError as e:
            assert 'Internal error' == e.response.message

        # There is no state from epoch 1 to checkpoint_epoch
        # Note, state of genesis epoch always exists
        assert client.epoch_number() >= checkpoint_epoch
        for i in range(1, checkpoint_epoch):
            try:
                client.get_balance(client.GENESIS_ADDR, client.EPOCH_NUM(i))
                raise AssertionError(
                    "should be not state for epoch {}".format(i))
            except ReceivedErrorResponseError as e:
                assert "State for epoch" in e.response.message
                assert "does not exist" in e.response.message

        # State should exist at checkpoint
        client.get_balance(client.GENESIS_ADDR,
                           client.EPOCH_NUM(checkpoint_epoch))

        # There should be states after checkpoint
        for i in range(checkpoint_epoch + 1, client.epoch_number() - 3):
            client.get_balance(client.GENESIS_ADDR, client.EPOCH_NUM(i))
    def run_test(self):
        num_blocks = 200
        checkpoint_epoch = 100

        # Generate checkpoint on node[0]
        client = RpcClient(self.nodes[0])
        self.genesis_nonce = client.get_nonce(client.GENESIS_ADDR)
        for _ in range(num_blocks):
            txs = self._generate_txs(0, random.randint(5, 10))
            client.generate_block_with_fake_txs(txs)

        # Start node[1] as full node to sync checkpoint
        # Change phase from CatchUpSyncBlockHeader to CatchUpCheckpoint
        # only when there is at least one connected peer.
        self.start_node(1, ["--full"], phase_to_wait=None)
        connect_nodes(self.nodes, 1, 0)

        # FIXME full node issue that hang at phase CatchUpRecoverBlockFromDbPhase
        self.nodes[1].wait_for_phase(["NormalSyncPhase"], wait_time=30)

        sync_blocks(self.nodes, sync_count=False)

        client = RpcClient(self.nodes[1])

        # At epoch 1, block header exists while body not synchronized
        try:
            print(client.block_by_epoch(client.EPOCH_NUM(1)))
        except ReceivedErrorResponseError as e:
            assert 'Internal error' == e.response.message

        # There is no state from epoch 1 to checkpoint_epoch
        # Note, state of genesis epoch always exists
        assert client.epoch_number() >= checkpoint_epoch
        # FIXME: we minus REWARD_EPOCH_COUNT here as a workaround.
        # FIXME: after the state boundary is implemented in consensus,
        # FIXME: this workaround should be removed.
        for i in range(1, checkpoint_epoch - 11):
            try:
                client.get_balance(client.GENESIS_ADDR, client.EPOCH_NUM(i))
                raise AssertionError(
                    "should not have state for epoch {}".format(i))
            except ReceivedErrorResponseError as e:
                assert "State for epoch" in e.response.message
                assert "does not exist" in e.response.message

        # State should exist at checkpoint
        client.get_balance(client.GENESIS_ADDR,
                           client.EPOCH_NUM(checkpoint_epoch))

        # There should be states after checkpoint
        for i in range(checkpoint_epoch, client.epoch_number() - 3):
            client.get_balance(client.GENESIS_ADDR, client.EPOCH_NUM(i))
    def run_test(self):
        time.sleep(7)
        client = RpcClient(self.nodes[0])
        genesis = self.nodes[0].best_block_hash()
        self.log.info(genesis)
        genesis_epoch = client.epoch_number(client.EPOCH_LATEST_CONFIRMED)
        assert_equal(genesis_epoch, 0)

        # generate blocks in 0.5 sec interval like default
        for i in range(0, 160):
            self.nodes[0].generate_empty_blocks(1)
            time.sleep(0.5)
            last_mined = client.epoch_number(client.EPOCH_LATEST_MINED)
            confirmed = client.epoch_number(client.EPOCH_LATEST_CONFIRMED)
            self.log.info("Mined epoch: " + str(last_mined) +
                          " Confirmed epoch: " + str(confirmed))
            # This is a very loose bound given the default parameter for Conflux.
            # If we change consensus/confirmation related parameters, this needs to be
            # changed as well.
            assert (last_mined <= 70 or last_mined - confirmed > 70)
            assert (last_mined - confirmed < 100)
    def run_test(self):
        num_blocks = 200
        snapshot_epoch = 150

        # Generate checkpoint on node[0]
        archive_node_client = RpcClient(self.nodes[0])
        self.genesis_nonce = archive_node_client.get_nonce(
            archive_node_client.GENESIS_ADDR)
        blocks_in_era = []
        for i in range(num_blocks):
            txs = self._generate_txs(0, random.randint(50, 100))
            block_hash = archive_node_client.generate_block_with_fake_txs(txs)
            if i >= snapshot_epoch:
                blocks_in_era.append(block_hash)
        sync_blocks(self.nodes[:-1])
        self.log.info("All archive nodes synced")

        # Start node[full_node_index] as full node to sync checkpoint
        # Change phase from CatchUpSyncBlockHeader to CatchUpCheckpoint
        # only when there is at least one connected peer.
        full_node_index = self.num_nodes - 1
        self.start_node(full_node_index, ["--full"], phase_to_wait=None)
        for i in range(self.num_nodes - 1):
            connect_nodes(self.nodes, full_node_index, i)

        self.log.info("Wait for full node to sync, index=%d", full_node_index)
        self.nodes[full_node_index].wait_for_phase(["NormalSyncPhase"],
                                                   wait_time=240)

        sync_blocks(self.nodes, sync_count=False)

        full_node_client = RpcClient(self.nodes[full_node_index])

        # At epoch 1, block header exists while body not synchronized
        try:
            print(
                full_node_client.block_by_epoch(full_node_client.EPOCH_NUM(1)))
        except ReceivedErrorResponseError as e:
            assert 'Internal error' == e.response.message

        # There is no state from epoch 1 to snapshot_epoch
        # Note, state of genesis epoch always exists
        assert full_node_client.epoch_number() >= snapshot_epoch
        wait_until(
            lambda: full_node_client.epoch_number() == archive_node_client.
            epoch_number() and full_node_client.epoch_number("latest_state") ==
            archive_node_client.epoch_number("latest_state"))
        # We have snapshot_epoch for state execution but
        # don't offer snapshot_epoch for Rpc clients.
        for i in range(1, snapshot_epoch + 1):
            try:
                full_node_client.get_balance(full_node_client.GENESIS_ADDR,
                                             full_node_client.EPOCH_NUM(i))
                raise AssertionError(
                    "should not have state for epoch {}".format(i))
            except ReceivedErrorResponseError as e:
                assert "State for epoch" in e.response.message
                assert "does not exist" in e.response.message

        # Wait for execution to complete.
        time.sleep(1)

        # There should be states after checkpoint
        for i in range(snapshot_epoch + 1,
                       full_node_client.epoch_number() - 3):
            full_balance = full_node_client.get_balance(
                full_node_client.GENESIS_ADDR, full_node_client.EPOCH_NUM(i))
            archive_balance = archive_node_client.get_balance(
                archive_node_client.GENESIS_ADDR,
                archive_node_client.EPOCH_NUM(i))
            assert_equal(full_balance, archive_balance)

        # Blocks within execution defer (5 epochs) and reward_defer (12 epochs) do not have state_valid
        available_blocks = blocks_in_era[:-17]
        assert_blocks_valid(self.nodes[:-1], available_blocks)
        assert_blocks_valid(self.nodes[-1:], available_blocks)
Esempio n. 6
0
    def run_test(self):
        # Pos contract enabled, stake and register in the first hard-fork phase.
        client = RpcClient(self.nodes[self.num_nodes - 1])
        client.generate_empty_blocks(300)
        sync_blocks(self.nodes)
        for node in self.nodes[:-1]:
            client = RpcClient(node)
            pos_identifier, _ = client.wait_for_pos_register()
            sync_blocks(self.nodes)
        client = RpcClient(self.nodes[self.num_nodes - 1])

        # generate blocks until we are after pos initialization and before pos start.
        best_epoch = client.epoch_number()
        client.generate_empty_blocks(600 - best_epoch)
        sync_blocks(self.nodes)

        voting_power_map = {}
        pub_keys_map = {}
        logs = client.get_logs(filter=Filter(from_epoch="earliest", to_epoch="latest_state", address=["0x0888000000000000000000000000000000000005"]))
        for log in logs:
            pos_identifier = log["topics"][1]
            if log["topics"][0] == REGISTER_TOPIC:
                bls_pub_key, vrf_pub_key = eth_abi.decode_abi(["bytes", "bytes"], decode_hex(log["data"]))
                pub_keys_map[pos_identifier] = (encode_hex_0x(bls_pub_key), encode_hex_0x(vrf_pub_key))
            elif log["topics"][0] == INCREASE_STAKE_TOPIC:
                assert pos_identifier in pub_keys_map
                voting_power_map[pos_identifier] = parse_as_int(log["data"])
        with open(os.path.join(self.options.tmpdir, "public_keys"), "w") as f:
            for pos_identifier in pub_keys_map.keys():
                f.write(",".join([pub_keys_map[pos_identifier][0][2:], pub_keys_map[pos_identifier][1][2:], str(voting_power_map[pos_identifier])]) + "\n")
        initialize_tg_config(self.options.tmpdir, len(self.nodes), len(self.nodes), DEFAULT_PY_TEST_CHAIN_ID, pkfile="public_keys")

        # generate blocks until pos start
        self.nodes[0].generate_empty_blocks(500)
        sync_blocks(self.nodes)
        pos_identifier, _ = client.wait_for_pos_register()
        client.generate_empty_blocks(400)
        sync_blocks(self.nodes)
        time.sleep(2)

        latest_pos_ref = self.latest_pos_ref()
        for i in range(55):
            print(i)
            if i == 10:
                self.stop_node(5, clean=True)
                self.start_node(5, phase_to_wait=None)
                self.nodes[5].wait_for_recovery(["NormalSyncPhase"], 30)
            if i == 12:
                self.maybe_restart_node(5, 1, 0)
            if i == 15:
                assert_equal(int(client.pos_get_account(pos_identifier)["status"]["availableVotes"], 0), 2000)
                client.pos_retire_self()
            if i == 30:
                self.maybe_restart_node(5, 1, 1)
            # Retire node 3 after 5 min.
            # Generate enough PoW block for PoS to progress
            self.nodes[0].generate_empty_blocks(60)
            # Leave some time for PoS to reach consensus
            time.sleep(3)
            self.nodes[0].generate_empty_blocks(1)
            new_pos_ref = self.latest_pos_ref()
            if i >= 10:
                assert_ne(latest_pos_ref, new_pos_ref)

        client.wait_for_unstake(client.node.pow_sk)
        assert client.get_balance(eth_utils.encode_hex(priv_to_addr(client.node.pow_sk))) > 10000 * 10**18
        assert_equal(int(client.pos_get_account(pos_identifier)["status"]["availableVotes"], 0), 0)
    def _test_new_block(self):
        self.log.info("Test New Block")
        client = RpcClient(self.nodes[0])
        best_block = client.best_block_hash()
        best_epoch = client.epoch_number()
        new_block = create_block(decode_hex(best_block), best_epoch + 1)
        self.send_msg(NewBlock(block=new_block))
        wait_until(
            lambda: self.nodes[0].best_block_hash() == new_block.hash_hex())

        # Wrong payload
        self.nodes[0].p2p.send_protocol_packet(
            rlp.encode([0]) + int_to_bytes(NEW_BLOCK))
        time.sleep(1)
        assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex())
        assert_equal(self.nodes[0].getblockcount(), 3)
        self.reconnect(self.nodes[0])

        # Wrong-length parent hash
        invalid_block = create_block(parent_hash=b'', height=2)
        self.send_msg(NewBlock(block=invalid_block))
        time.sleep(1)
        assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex())
        assert_equal(self.nodes[0].getblockcount(), 3)
        self.reconnect(self.nodes[0])

        # Wrong-length author
        invalid_block = create_block(author=b'', height=2)
        self.send_msg(NewBlock(block=invalid_block))
        time.sleep(1)
        assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex())
        assert_equal(self.nodes[0].getblockcount(), 3)
        self.reconnect(self.nodes[0])

        # Wrong-length root
        invalid_block = create_block(deferred_state_root=b'',
                                     height=2,
                                     deferred_receipts_root=b'')
        self.send_msg(NewBlock(block=invalid_block))
        time.sleep(1)
        assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex())
        assert_equal(self.nodes[0].getblockcount(), 3)
        self.reconnect(self.nodes[0])

        # Nonexistent parent
        invalid_block = create_block(parent_hash=b'\x00' * 32, height=2)
        self.send_msg(NewBlock(block=invalid_block))
        time.sleep(1)
        assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex())
        assert_equal(self.nodes[0].getblockcount(), 3)
        self.reconnect(self.nodes[0])

        # Invalid height
        invalid_block = create_block(new_block.hash, 1)
        self.send_msg(NewBlock(block=invalid_block))
        time.sleep(1)
        assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex())
        assert_equal(self.nodes[0].getblockcount(), 3)
        self.reconnect(self.nodes[0])

        sync_blocks(self.nodes)

        # TODO Generate some random blocks that have wrong ref edges
        pass