Ejemplo n.º 1
0
def test_restart_primaries_then_demote(
        looper, txnPoolNodeSet,
        tconf, tdir, allPluginsPath,
        sdk_pool_handle, sdk_wallet_steward,
        poolTxnStewardNames,
        poolTxnData,
        tdirWithClientPoolTxns):
    """
    """
    logger.info("1. Restart Node1")
    pool_of_nodes = ensure_view_change_by_primary_restart(looper,
                                                          txnPoolNodeSet,
                                                          tconf,
                                                          tdir,
                                                          allPluginsPath,
                                                          customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    logger.info("2. Restart Node2")
    pool_of_nodes = ensure_view_change_by_primary_restart(looper,
                                                          pool_of_nodes,
                                                          tconf,
                                                          tdir,
                                                          allPluginsPath,
                                                          customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    logger.info("3. Demote Node3")
    # demote the node
    pool_of_nodes = demote_primary_node(looper,
                                        txnPoolNodeSet,
                                        pool_of_nodes,
                                        poolTxnStewardNames,
                                        poolTxnData,
                                        tdirWithClientPoolTxns)

    # make sure view changed
    waitForViewChange(looper, pool_of_nodes, expectedViewNo=3)

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 10)
    ensure_all_nodes_have_same_data(looper, nodes=pool_of_nodes)
def test_restart_primaries_then_demote(
        looper, txnPoolNodeSet,
        tconf, tdir, allPluginsPath,
        sdk_pool_handle,
        sdk_wallet_stewards):
    """
    """
    sdk_wallet_steward = sdk_wallet_stewards[0]
    logger.info("1. Restart Node1")
    pool_of_nodes = ensure_view_change_by_primary_restart(looper,
                                                          txnPoolNodeSet,
                                                          tconf,
                                                          tdir,
                                                          allPluginsPath,
                                                          customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    logger.info("2. Restart Node2")
    pool_of_nodes = ensure_view_change_by_primary_restart(looper,
                                                          pool_of_nodes,
                                                          tconf,
                                                          tdir,
                                                          allPluginsPath,
                                                          customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    logger.info("3. Demote Node3")
    # demote the node
    pool_of_nodes = demote_primary_node(looper,
                                        txnPoolNodeSet,
                                        pool_of_nodes,
                                        sdk_pool_handle,
                                        sdk_wallet_stewards)

    # make sure view changed
    waitForViewChange(looper, pool_of_nodes, expectedViewNo=3)

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 10)
    ensure_all_nodes_have_same_data(looper, nodes=pool_of_nodes)
def test_restart_primaries_then_demote(looper, txnPoolNodeSet, tconf, tdir,
                                       allPluginsPath, sdk_pool_handle,
                                       sdk_wallet_stewards):
    """
    """
    sdk_wallet_steward = sdk_wallet_stewards[0]
    logger.info("1. Restart Node1")
    pool_of_nodes = ensure_view_change_by_primary_restart(
        looper,
        txnPoolNodeSet,
        tconf,
        tdir,
        allPluginsPath,
        customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT,
        exclude_from_check=['check_last_ordered_3pc_backup'])

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    logger.info("2. Restart Node2")
    pool_of_nodes = ensure_view_change_by_primary_restart(
        looper,
        pool_of_nodes,
        tconf,
        tdir,
        allPluginsPath,
        customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT,
        exclude_from_check=['check_last_ordered_3pc_backup'])

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    logger.info("3. Demote Node3")
    # demote the node
    pool_of_nodes = demote_primary_node(looper, txnPoolNodeSet, pool_of_nodes,
                                        sdk_pool_handle, sdk_wallet_stewards)

    # make sure view changed
    waitForViewChange(looper, pool_of_nodes, expectedViewNo=3)

    # ensure pool is working properly
    sdk_send_random_and_check(looper, pool_of_nodes, sdk_pool_handle,
                              sdk_wallet_steward, 10)
    ensure_all_nodes_have_same_data(looper, nodes=pool_of_nodes)
Ejemplo n.º 4
0
def test_view_change_after_back_to_quorum_with_disconnected_primary(txnPoolNodeSet, looper,
                                                                    sdk_pool_handle,
                                                                    sdk_wallet_client,
                                                                    tdir, tconf, allPluginsPath):
    assert len(txnPoolNodeSet) == 4

    pr_node = get_master_primary_node(txnPoolNodeSet)
    assert pr_node.name == "Alpha"

    # 1. Initiate view change be primary (Alpha) restart
    nodes = ensure_view_change_by_primary_restart(looper,
                                                  txnPoolNodeSet,
                                                  tconf,
                                                  tdir,
                                                  allPluginsPath,
                                                  customTimeout=2 * tconf.NEW_VIEW_TIMEOUT,
                                                  exclude_from_check=['check_last_ordered_3pc_backup'])

    # Now primary should be Beta
    pr_node = get_master_primary_node(nodes)
    assert pr_node.name == "Beta"

    # 2. Stop non-primary node Delta, no any view changes are expected
    non_primary_to_stop = [n for n in nodes if n.name == "Delta"][0]
    disconnect_node_and_ensure_disconnected(
        looper, txnPoolNodeSet, non_primary_to_stop)
    looper.removeProdable(non_primary_to_stop)

    remaining_nodes = list(set(nodes) - {non_primary_to_stop})
    # Primary is going to be stopped, remember instance change messages count
    # to ensure that no view change happened as number of connected nodes is less
    # than quorum.
    ic_cnt = {}
    for n in remaining_nodes:
        ic_cnt[n.name] = n.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)

    # 3. Disconnect primary
    disconnect_node_and_ensure_disconnected(
        looper, remaining_nodes, pr_node)
    looper.removeProdable(pr_node)

    # Wait for more than ToleratePrimaryDisconnection timeout and check that no IC messages presented.
    looper.runFor(tconf.ToleratePrimaryDisconnection + 5)
    remaining_nodes = list(set(remaining_nodes) - {pr_node})
    for n in remaining_nodes:
        assert ic_cnt[n.name] == n.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)

    view_no = checkViewNoForNodes(remaining_nodes)

    # 4. Start Delta (non-primary), now primary (Beta) is disconnected but there is a quorum
    # to choose a new one.
    restartedNode = start_stopped_node(non_primary_to_stop, looper, tconf,
                                       tdir, allPluginsPath,
                                       delay_instance_change_msgs=False)

    remaining_nodes = remaining_nodes + [restartedNode]

    # 5. Check that view change happened eventually because
    # Delta may re-send InstanceChang for view=2 after it finished catchup
    waitForViewChange(looper, remaining_nodes, expectedViewNo=(view_no + 1),
                      customTimeout=3 * tconf.NEW_VIEW_TIMEOUT)

    # 6. ensure pool is working properly
    sdk_send_random_and_check(looper, remaining_nodes, sdk_pool_handle,
                              sdk_wallet_client, 3)
    ensure_all_nodes_have_same_data(looper, nodes=remaining_nodes)
def test_view_change_after_back_to_quorum_with_disconnected_primary(txnPoolNodeSet, looper,
                                                                    sdk_pool_handle,
                                                                    sdk_wallet_client,
                                                                    tdir, tconf, allPluginsPath):
    assert len(txnPoolNodeSet) == 4

    pr_node = get_master_primary_node(txnPoolNodeSet)
    assert pr_node.name == "Alpha"

    # 1. Initiate view change be primary (Alpha) restart
    nodes = ensure_view_change_by_primary_restart(looper,
                                                  txnPoolNodeSet,
                                                  tconf,
                                                  tdir,
                                                  allPluginsPath,
                                                  customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)

    # Now primary should be Beta
    pr_node = get_master_primary_node(nodes)
    assert pr_node.name == "Beta"

    # 2. Stop non-primary node Delta, no any view changes are expected
    non_primary_to_stop = [n for n in nodes if n.name == "Delta"][0]
    disconnect_node_and_ensure_disconnected(
        looper, txnPoolNodeSet, non_primary_to_stop)
    looper.removeProdable(non_primary_to_stop)

    remaining_nodes = list(set(nodes) - {non_primary_to_stop})
    # Primary is going to be stopped, remember instance change messages count
    # to ensure that no view change happened as number of connected nodes is less
    # than quorum.
    ic_cnt = {}
    for n in remaining_nodes:
        ic_cnt[n.name] = n.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)

    # 3. Disconnect primary
    disconnect_node_and_ensure_disconnected(
        looper, remaining_nodes, pr_node)
    looper.removeProdable(pr_node)

    # Wait for more than ToleratePrimaryDisconnection timeout and check that no IC messages presented.
    looper.runFor(tconf.ToleratePrimaryDisconnection + 5)
    remaining_nodes = list(set(remaining_nodes) - {pr_node})
    for n in remaining_nodes:
        assert ic_cnt[n.name] == n.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)

    view_no = checkViewNoForNodes(remaining_nodes)

    # 4. Start Delta (non-primary), now primary (Beta) is disconnected but there is a quorum
    # to choose a new one.
    restartedNode = start_stopped_node(non_primary_to_stop, looper, tconf,
                                       tdir, allPluginsPath,
                                       delay_instance_change_msgs=False)
    remaining_nodes = remaining_nodes + [restartedNode]

    # 5. Check that view change happened.
    waitForViewChange(looper, remaining_nodes, expectedViewNo=(view_no + 1),
                      customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)

    # ensure pool is working properly
    sdk_send_random_and_check(looper, remaining_nodes, sdk_pool_handle,
                              sdk_wallet_client, 3)
    ensure_all_nodes_have_same_data(looper, nodes=remaining_nodes)
Ejemplo n.º 6
0
def test_that_domain_ledger_the_same_after_restart_for_all_nodes(
        looper, txnPoolNodeSet, tdir, tconf, allPluginsPath,
        limitTestRunningTime):
    """
    Test steps:
    1. Collect domainLedger data for primary node, such as:
       tree hashes,
       root hash,
       tree root hash,
       leaves store (content of binary file),
       nodes store (content of binary file),
       ledger txns
    2. Restart primary node and ensure that view change done
    3. Compare previous collected data with data from all other nodes after restart.
       We don't sent any txns during restart, therefore domainLedgers must be the same
    4. Repeat steps 1-3 for all nodes in pool

    """
    def prepare_for_compare(domain_ledger):
        dict_for_compare = {}
        dict_for_compare['hashes'] = domain_ledger.tree.hashes
        dict_for_compare['root_hash'] = domain_ledger.root_hash
        dict_for_compare['tree_root_hash'] = domain_ledger.tree.root_hash
        dict_for_compare[
            'tree_root_hash_hex'] = domain_ledger.tree.root_hash_hex
        if tconf.hashStore['type'] == HS_FILE:
            """
            save current position of the cursor in stream, move to begin, read content and
            move the cursor back
            """
            c_pos = domain_ledger.tree.hashStore.leavesFile.db_file.tell()
            domain_ledger.tree.hashStore.leavesFile.db_file.seek(0, 0)
            dict_for_compare[
                'leaves_store'] = domain_ledger.tree.hashStore.leavesFile.db_file.read(
                )
            domain_ledger.tree.hashStore.leavesFile.db_file.seek(c_pos)

            c_pos = domain_ledger.tree.hashStore.nodesFile.db_file.tell()
            domain_ledger.tree.hashStore.nodesFile.db_file.seek(0, 0)
            dict_for_compare[
                'nodes_store'] = domain_ledger.tree.hashStore.nodesFile.db_file.read(
                )
            domain_ledger.tree.hashStore.nodesFile.db_file.seek(c_pos)
        elif tconf.hashStore['type'] == HS_LEVELDB or tconf.hashStore[
                'type'] == HS_ROCKSDB:
            dict_for_compare['leaves_store'] = domain_ledger.tree.hashStore.\
                readLeafs(1, domain_ledger.tree.hashStore.leafCount)
            dict_for_compare['nodes_store'] = domain_ledger.tree.hashStore. \
                readNodes(1, domain_ledger.tree.hashStore.nodeCount)

        dict_for_compare['txns'] = [(tno, txn)
                                    for tno, txn in domain_ledger.getAllTxn()]

        return dict_for_compare

    def compare(before, after):
        for k, v in before.items():
            if k in after:
                if v != after[k]:
                    logger.debug(
                        "compare_domain_ledgers: before[{}]!=after[{}]".format(
                            k, k))
                    logger.debug(
                        "compare_domain_ledgers: before value: {}".format(v))
                    logger.debug(
                        "compare_domain_ledgers: after value: {}".format(
                            after[k]))
                    for k, v in before.items():
                        logger.debug(
                            "compare_domain_ledgers: before : {}: {}".format(
                                k, v))
                        logger.debug(
                            "compare_domain_ledgers: after_dict: {}: {}".
                            format(k, after.get(k)))
                    assert False

    pool_of_nodes = txnPoolNodeSet
    for __ in range(4):
        p_node = [node for node in pool_of_nodes if node.has_master_primary][0]
        before_vc_dict = prepare_for_compare(p_node.domainLedger)
        pool_of_nodes = ensure_view_change_by_primary_restart(
            looper,
            pool_of_nodes,
            tconf,
            tdir,
            allPluginsPath,
            customTimeout=tconf.VIEW_CHANGE_TIMEOUT)
        for node in pool_of_nodes:
            logger.debug("compare_domain_ledgers: "
                         "primary node before view_change: {}, "
                         "compared node: {}".format(p_node, node))
            after_vc_dict = prepare_for_compare(node.domainLedger)
            compare(before_vc_dict, after_vc_dict)
def test_6th_node_join_after_view_change_by_master_restart(
        looper, txnPoolNodeSet, tdir, tconf, allPluginsPath, sdk_pool_handle,
        sdk_wallet_steward, client_tdir, limitTestRunningTime):
    """
    Test steps:
    1. start pool of 4 nodes
    2. force 4 view change by restarting primary node
    3. now primary node must be Alpha, then add new node, named Epsilon
    4. ensure, that Epsilon was added and catch-up done
    5. send some txns
    6. force 4 view change. Now primary node is new added Epsilon
    7. add 6th node and ensure, that new node is catchuped
    """
    pool_of_nodes = txnPoolNodeSet
    for __ in range(4):
        pool_of_nodes = ensure_view_change_by_primary_restart(
            looper,
            pool_of_nodes,
            tconf,
            tdir,
            allPluginsPath,
            customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
        timeout = waits.expectedPoolCatchupTime(nodeCount=len(pool_of_nodes))
        for node in pool_of_nodes:
            looper.run(eventually(catchuped, node, timeout=2 * timeout))
    ensure_all_nodes_have_same_data(looper,
                                    pool_of_nodes,
                                    custom_timeout=timeout)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)

    new_epsilon_node = add_new_node(looper,
                                    pool_of_nodes,
                                    sdk_pool_handle,
                                    sdk_wallet_steward,
                                    tdir,
                                    client_tdir,
                                    tconf,
                                    allPluginsPath,
                                    name='Epsilon')
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)
    """
    check that pool and domain ledgers for new node are in synced state
    """
    timeout = waits.expectedPoolCatchupTime(nodeCount=len(pool_of_nodes))
    for node in pool_of_nodes:
        looper.run(
            eventually(check_ledger_state,
                       node,
                       DOMAIN_LEDGER_ID,
                       LedgerState.synced,
                       retryWait=.5,
                       timeout=timeout))
        looper.run(
            eventually(check_ledger_state,
                       node,
                       POOL_LEDGER_ID,
                       LedgerState.synced,
                       retryWait=.5,
                       timeout=timeout))
    for __ in range(4):
        pool_of_nodes = ensure_view_change_by_primary_restart(
            looper,
            pool_of_nodes,
            tconf,
            tdir,
            allPluginsPath,
            customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)

        timeout = waits.expectedPoolCatchupTime(nodeCount=len(pool_of_nodes))
        for node in pool_of_nodes:
            looper.run(eventually(catchuped, node, timeout=2 * timeout))
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2)
    new_psi_node = add_new_node(looper,
                                pool_of_nodes,
                                sdk_pool_handle,
                                sdk_wallet_steward,
                                tdir,
                                client_tdir,
                                tconf,
                                allPluginsPath,
                                name='Psi')
    looper.run(
        eventually(check_ledger_state,
                   new_psi_node,
                   DOMAIN_LEDGER_ID,
                   LedgerState.synced,
                   retryWait=.5,
                   timeout=5))
    looper.run(
        eventually(check_ledger_state,
                   new_psi_node,
                   POOL_LEDGER_ID,
                   LedgerState.synced,
                   retryWait=.5,
                   timeout=5))
def test_that_domain_ledger_the_same_after_restart_for_all_nodes(
        looper, txnPoolNodeSet, tdir, tconf,
        allPluginsPath, limitTestRunningTime):
    """
    Test steps:
    1. Collect domainLedger data for primary node, such as:
       tree hashes,
       root hash,
       tree root hash,
       leaves store (content of binary file),
       nodes store (content of binary file),
       ledger txns
    2. Restart primary node and ensure that view change done
    3. Compare previous collected data with data from all other nodes after restart.
       We don't sent any txns during restart, therefore domainLedgers must be the same
    4. Repeat steps 1-3 for all nodes in pool

    """

    def prepare_for_compare(domain_ledger):
        dict_for_compare = {}
        dict_for_compare['hashes'] = domain_ledger.tree.hashes
        dict_for_compare['root_hash'] = domain_ledger.root_hash
        dict_for_compare['tree_root_hash'] = domain_ledger.tree.root_hash
        dict_for_compare['tree_root_hash_hex'] = domain_ledger.tree.root_hash_hex
        if tconf.hashStore['type'] == HS_FILE:
            """
            save current position of the cursor in stream, move to begin, read content and
            move the cursor back
            """
            c_pos = domain_ledger.tree.hashStore.leavesFile.db_file.tell()
            domain_ledger.tree.hashStore.leavesFile.db_file.seek(0, 0)
            dict_for_compare['leaves_store'] = domain_ledger.tree.hashStore.leavesFile.db_file.read()
            domain_ledger.tree.hashStore.leavesFile.db_file.seek(c_pos)

            c_pos = domain_ledger.tree.hashStore.nodesFile.db_file.tell()
            domain_ledger.tree.hashStore.nodesFile.db_file.seek(0, 0)
            dict_for_compare['nodes_store'] = domain_ledger.tree.hashStore.nodesFile.db_file.read()
            domain_ledger.tree.hashStore.nodesFile.db_file.seek(c_pos)
        elif tconf.hashStore['type'] == HS_LEVELDB or tconf.hashStore['type'] == HS_ROCKSDB:
            dict_for_compare['leaves_store'] = domain_ledger.tree.hashStore.\
                readLeafs(1, domain_ledger.tree.hashStore.leafCount)
            dict_for_compare['nodes_store'] = domain_ledger.tree.hashStore. \
                readNodes(1, domain_ledger.tree.hashStore.nodeCount)

        dict_for_compare['txns'] = [(tno, txn) for tno, txn in domain_ledger.getAllTxn()]

        return dict_for_compare

    def compare(before, after):
        for k, v in before.items():
            if k in after:
                if v != after[k]:
                    logger.debug("compare_domain_ledgers: before[{}]!=after[{}]".format(k, k))
                    logger.debug("compare_domain_ledgers: before value: {}".format(v))
                    logger.debug("compare_domain_ledgers: after value: {}".format(after[k]))
                    for k, v in before.items():
                        logger.debug("compare_domain_ledgers: before : {}: {}".format(k, v))
                        logger.debug("compare_domain_ledgers: after_dict: {}: {}".format(k, after.get(k)))
                    assert False

    pool_of_nodes = txnPoolNodeSet
    for __ in range(4):
        p_node = [node for node in pool_of_nodes if node.has_master_primary][0]
        before_vc_dict = prepare_for_compare(p_node.domainLedger)
        pool_of_nodes = ensure_view_change_by_primary_restart(looper,
                                                              pool_of_nodes,
                                                              tconf,
                                                              tdir,
                                                              allPluginsPath,
                                                              customTimeout=tconf.VIEW_CHANGE_TIMEOUT)
        for node in pool_of_nodes:
            logger.debug("compare_domain_ledgers: "
                         "primary node before view_change: {}, "
                         "compared node: {}".format(p_node, node))
            after_vc_dict = prepare_for_compare(node.domainLedger)
            compare(before_vc_dict, after_vc_dict)