Ejemplo n.º 1
0
class Issue2159Test(ConfluxTestFramework):
    def set_test_params(self):
        self.num_nodes = 1

        self.conf_parameters = {
            # make `cfx_getEpochReceipts` available through ws
            "public_rpc_apis": "\"cfx,debug\"",

            # limit max response payload size
            "jsonrpc_ws_max_payload_bytes": 1024,
        }

    def setup_network(self):
        self.add_nodes(self.num_nodes)
        self.start_node(FULLNODE, ["--archive"])

        # set up RPC over HTTP
        node = self.nodes[FULLNODE]
        self.rpc = RpcClient(node)

        # set up RPC over WS
        url = pubsub_url(node.index, node.rpchost, node.pubsubport)
        self.ws = WebSocketsClient(block_on(websockets.connect(url)))

        # wait for phase changes to complete
        self.nodes[FULLNODE].wait_for_phase(["NormalSyncPhase"])

    def run_test(self):
        # generate block with many transactions
        parent_hash = self.rpc.block_by_epoch("latest_mined")['hash']
        start_nonce = self.rpc.get_nonce(self.rpc.GENESIS_ADDR)
        txs = [self.rpc.new_tx(nonce=start_nonce + ii) for ii in range(0, 100)]
        hash = self.rpc.generate_custom_block(parent_hash=parent_hash,
                                              referee=[],
                                              txs=txs)
        epoch = self.rpc.block_by_hash(hash)["epochNumber"]

        # make sure block is executed
        self.rpc.generate_empty_blocks(5)

        # getting epoch receipts should result in error
        try:
            resp = block_on(
                self.ws.send(Request("cfx_getEpochReceipts", epoch)))
            assert False, "cfx_getEpochReceipts request should have failed"
        except ReceivedErrorResponseError as e:
            self.log.info(e.response)
            assert e.response.data.startswith("\"Oversized payload")
        except Exception as e:
            assert False, f"unexpected error: {e}"

        # this should succeed
        # resp = self.rpc.node.cfx_getEpochReceipts(epoch)

        self.log.info("Pass")
Ejemplo n.º 2
0
 def run_test(self):
     new_node = RpcClient(self.nodes[0])
     new_node2 = RpcClient(self.nodes[1])
     old_node = RpcClient(self.nodes[len(self.nodes) - 1])
     old_node.generate_empty_blocks(FORK_HEIGHT - 1)
     sync_blocks(self.nodes)
     old_fork = old_node.generate_empty_blocks(FORK_HEIGHT)
     new_fork = new_node.generate_empty_blocks(2 * FORK_HEIGHT)
     wait_until(lambda: old_node.get_block_count() == 4 * FORK_HEIGHT)
     wait_until(lambda: new_node.get_block_count() == 3 * FORK_HEIGHT)
     wait_until(lambda: new_node2.get_block_count() == 3 * FORK_HEIGHT)
     for h in new_fork:
         b = new_node.block_by_hash(h)
         assert_equal(int(b["blame"], 0), 0)
     for h in old_fork:
         assert_equal(new_node.block_by_hash(h), None)
         assert_equal(new_node2.block_by_hash(h), None)
Ejemplo n.º 3
0
 def run_test(self):
     n_generate_batch = 1000
     n_initial_chain = 50000
     self.log.info(
         f"Prepare the initial chain of node 0 with {n_initial_chain} blocks"
     )
     n_batches = int(n_initial_chain / n_generate_batch)
     for i in range(n_batches):
         batch_generate(self.nodes[0], n_generate_batch, self.log)
     n_fork_height = 1000
     n_star_count = 15000
     for i in range(self.num_nodes):
         self.log.info(
             f"Prepare node {i} with a chain of the length {n_fork_height} and then a star of {n_star_count} blocks."
         )
         client = RpcClient(self.nodes[i])
         fork_point = client.generate_empty_blocks(n_fork_height)[-1]
         for _ in range(n_star_count):
             client.generate_block_with_parent(fork_point)
     for i in range(self.num_nodes - 1):
         connect_nodes(self.nodes, i, i + 1)
     self.log.info(
         "Nodes connected, normal mining start at the interval of 0.5")
     block_gen_thread = BlockGenThread(self.nodes,
                                       self.log,
                                       interval_base=0.5)
     block_gen_thread.start()
     start_time = time.time()
     original_cnt = self.nodes[0].getblockcount()
     for _ in range(100):
         time.sleep(1)
         cnt = self.nodes[0].getblockcount()
         try:
             cnt1 = self.nodes[1].getblockcount()
         except Exception as e:
             self.log.info(
                 "Unable to get Node1 block count. Maybe it is busy.")
             cnt1 = -1
         try:
             cnt2 = self.nodes[2].getblockcount()
         except Exception as e:
             self.log.info(
                 "Unable to get Node2 block count. Maybe it is busy.")
             cnt2 = -1
         elapsed = time.time() - start_time
         avg_block_processing = (cnt - original_cnt) / elapsed
         self.log.info(
             f"Nodes block count {cnt};{cnt1};{cnt2}, elapsed {elapsed}, {avg_block_processing} blocks/s"
         )
     self.log.info(
         f"Merge bench average block processing speed: {avg_block_processing} blocks/s"
     )
 def run_test(self):
     attacker = RpcClient(self.nodes[0])
     victim = RpcClient(self.nodes[1])
     n_generate_batch = 1000
     n_attack_blocks = 15000
     self.log.info(f"Attacker start to prepare {n_attack_blocks} blocks")
     fork_point = attacker.generate_empty_blocks(1000)[-1]
     for _ in range(n_attack_blocks):
         attacker.generate_block_with_parent(fork_point)
     self.log.info("Honest node generate")
     for _ in range(int(20000 / n_generate_batch)):
         batch_generate(victim, n_generate_batch, self.log)
     connect_nodes(self.nodes, 0, 1)
     self.log.info("Nodes connected")
     for _ in range(1000):
         batch_generate(victim, n_generate_batch, self.log)
Ejemplo n.º 5
0
    def run_test(self):
        # Pos contract enabled, stake and register in the first hard-fork phase.
        client = RpcClient(self.nodes[self.num_nodes - 1])
        client.generate_empty_blocks(300)
        sync_blocks(self.nodes)
        for node in self.nodes[:-1]:
            client = RpcClient(node)
            pos_identifier, _ = client.wait_for_pos_register()
            sync_blocks(self.nodes)
        client = RpcClient(self.nodes[self.num_nodes - 1])

        # generate blocks until we are after pos initialization and before pos start.
        best_epoch = client.epoch_number()
        client.generate_empty_blocks(600 - best_epoch)
        sync_blocks(self.nodes)

        voting_power_map = {}
        pub_keys_map = {}
        logs = client.get_logs(filter=Filter(from_epoch="earliest", to_epoch="latest_state", address=["0x0888000000000000000000000000000000000005"]))
        for log in logs:
            pos_identifier = log["topics"][1]
            if log["topics"][0] == REGISTER_TOPIC:
                bls_pub_key, vrf_pub_key = eth_abi.decode_abi(["bytes", "bytes"], decode_hex(log["data"]))
                pub_keys_map[pos_identifier] = (encode_hex_0x(bls_pub_key), encode_hex_0x(vrf_pub_key))
            elif log["topics"][0] == INCREASE_STAKE_TOPIC:
                assert pos_identifier in pub_keys_map
                voting_power_map[pos_identifier] = parse_as_int(log["data"])
        with open(os.path.join(self.options.tmpdir, "public_keys"), "w") as f:
            for pos_identifier in pub_keys_map.keys():
                f.write(",".join([pub_keys_map[pos_identifier][0][2:], pub_keys_map[pos_identifier][1][2:], str(voting_power_map[pos_identifier])]) + "\n")
        initialize_tg_config(self.options.tmpdir, len(self.nodes), len(self.nodes), DEFAULT_PY_TEST_CHAIN_ID, pkfile="public_keys")

        # generate blocks until pos start
        self.nodes[0].generate_empty_blocks(500)
        sync_blocks(self.nodes)
        pos_identifier, _ = client.wait_for_pos_register()
        client.generate_empty_blocks(400)
        sync_blocks(self.nodes)
        time.sleep(2)

        latest_pos_ref = self.latest_pos_ref()
        for i in range(55):
            print(i)
            if i == 10:
                self.stop_node(5, clean=True)
                self.start_node(5, phase_to_wait=None)
                self.nodes[5].wait_for_recovery(["NormalSyncPhase"], 30)
            if i == 12:
                self.maybe_restart_node(5, 1, 0)
            if i == 15:
                assert_equal(int(client.pos_get_account(pos_identifier)["status"]["availableVotes"], 0), 2000)
                client.pos_retire_self()
            if i == 30:
                self.maybe_restart_node(5, 1, 1)
            # Retire node 3 after 5 min.
            # Generate enough PoW block for PoS to progress
            self.nodes[0].generate_empty_blocks(60)
            # Leave some time for PoS to reach consensus
            time.sleep(3)
            self.nodes[0].generate_empty_blocks(1)
            new_pos_ref = self.latest_pos_ref()
            if i >= 10:
                assert_ne(latest_pos_ref, new_pos_ref)

        client.wait_for_unstake(client.node.pow_sk)
        assert client.get_balance(eth_utils.encode_hex(priv_to_addr(client.node.pow_sk))) > 10000 * 10**18
        assert_equal(int(client.pos_get_account(pos_identifier)["status"]["availableVotes"], 0), 0)