Exemple #1
0
def test_newly_joined_node_should_not_gossip_blocks(two_node_network):
    """
    Feature file: node_added_to_established_network.feature
    Scenario: New node should not gossip old blocks back to network
    """
    network = two_node_network

    def propose(node):
        block_hash = node.deploy_and_propose(
            session_contract=HELLO_NAME,
            payment_contract=HELLO_NAME,
            from_address=node.genesis_account.public_key_hex,
            public_key=node.genesis_account.public_key_path,
            private_key=node.genesis_account.private_key_path,
        )

        return block_hash

    block_hashes = [propose(node) for node in network.docker_nodes]
    wait_for_block_hashes_propagated_to_all_nodes(network.docker_nodes, block_hashes)

    # Add a new node, it should sync with the old ones.
    network.add_new_node_to_network()
    wait_for_block_hashes_propagated_to_all_nodes(network.docker_nodes, block_hashes)

    node0, node1, node2 = network.docker_nodes

    # Verify that the new node didn't do any gossiping.
    wait_for_gossip_metrics_and_assert_blocks_gossiped(node2, node2.timeout, 0)

    # Verify that the original nodes didn't get their NewBlocks method called more times then expected.
    for node in network.docker_nodes[:2]:
        # node0 tells node1 about its block then node1 will try to reflect that back; 2 blocks
        # node2 should not have called the old ones during sync.
        assert get_new_blocks_requests_total(node) <= 2
Exemple #2
0
def test_neglected_invalid_block(three_node_network):
    """
    Feature file: neglected_invalid_justification.feature
    Scenario: 3 Nodes doing simultaneous deploys and proposes do not have neglected invalid blocks
    """
    bootstrap, node1, node2 = three_node_network.docker_nodes

    for cycle_count in range(4):
        logging.info(f"DEPLOY_PROPOSE CYCLE COUNT: {cycle_count + 1}")
        start_time = time() + 1

        boot_deploy = DeployTimedTread(bootstrap,
                                       {"session_contract": CONTRACT_1},
                                       start_time)
        node1_deploy = DeployTimedTread(node1,
                                        {"session_contract": CONTRACT_2},
                                        start_time)
        node2_deploy = DeployTimedTread(node2,
                                        {"session_contract": CONTRACT_2},
                                        start_time)

        # Simultaneous Deploy
        node1_deploy.start()
        boot_deploy.start()
        node2_deploy.start()

        boot_deploy.join()
        node1_deploy.join()
        node2_deploy.join()

        start_time = time() + 1

        boot_deploy = ProposeTimedThread(bootstrap, {}, start_time)
        node1_deploy = ProposeTimedThread(node1, {}, start_time)
        node2_deploy = ProposeTimedThread(node2, {}, start_time)

        # Simultaneous Propose
        node1_deploy.start()
        boot_deploy.start()
        node2_deploy.start()

        boot_deploy.join()
        node1_deploy.join()
        node2_deploy.join()

    # Assure deploy and proposes occurred
    block_hashes = [
        h for h in [
            boot_deploy.block_hash,
            node1_deploy.block_hash,
            node2_deploy.block_hash,
        ] if h
    ]
    wait_for_block_hashes_propagated_to_all_nodes(
        three_node_network.docker_nodes, block_hashes)

    assert " for NeglectedInvalidBlock." not in bootstrap.logs()
    assert " for NeglectedInvalidBlock." not in node1.logs()
    assert " for NeglectedInvalidBlock." not in node2.logs()
def test_multiple_deploys_at_once(
    three_node_network,
    contract_paths: List[List[str]],
    expected_deploy_counts_in_blocks,
):
    """
    Feature file : multiple_simultaneous_deploy.feature
    Scenario: Multiple simultaneous deploy after single deploy
    """
    nodes = three_node_network.docker_nodes
    # Wait for the genesis block reacing each node.

    deploy_threads = [
        DeployThread("node" + str(i + 1),
                     node,
                     contract_paths,
                     max_attempts=5,
                     retry_seconds=3) for i, node in enumerate(nodes)
    ]

    for t in deploy_threads:
        t.start()

    for t in deploy_threads:
        t.join()

    # See COMMENT_EXPECTED_BLOCKS
    block_hashes = reduce(add, [t.block_hashes for t in deploy_threads])
    wait_for_block_hashes_propagated_to_all_nodes(nodes, block_hashes)

    for node in nodes:
        blocks = parse_show_blocks(
            node.client.show_blocks(
                len(expected_deploy_counts_in_blocks) * 100))
        n_blocks = len(expected_deploy_counts_in_blocks)
        assert [
            b.summary.header.deploy_count for b in blocks
        ][:
          n_blocks] == expected_deploy_counts_in_blocks, "Unexpected deploy counts in blocks"
def test_storage_after_multiple_node_deploy_propose_and_shutdown(
        two_node_network):
    """
    Feature file: storage.feature
    Scenario: Stop nodes and restart with correct dag and blockstorage
    """
    tnn = two_node_network
    node0, node1 = tnn.docker_nodes
    block_hashes = [
        node.d_client.deploy_and_propose(
            from_address=GENESIS_ACCOUNT.public_key_hex,
            public_key=GENESIS_ACCOUNT.public_key_path,
            private_key=GENESIS_ACCOUNT.private_key_path,
            session_contract=Contract.HELLONAME,
        ) for node in (node0, node1)
    ]

    wait_for_block_hashes_propagated_to_all_nodes(tnn.docker_nodes,
                                                  block_hashes)

    dag0 = node0.d_client.vdag(10)
    dag1 = node1.d_client.vdag(10)
    blocks0 = node0.d_client.show_blocks(10)
    blocks1 = node1.d_client.show_blocks(10)

    for node_num in range(2):
        tnn.stop_cl_node(node_num)
    for node_num in range(2):
        tnn.start_cl_node(node_num)

    wait_for_block_hashes_propagated_to_all_nodes(tnn.docker_nodes,
                                                  block_hashes)

    assert dag0 == node0.d_client.vdag(10)
    assert dag1 == node1.d_client.vdag(10)
    assert blocks0 == node0.d_client.show_blocks(10)
    assert blocks1 == node1.d_client.show_blocks(10)