def test_demote_promote_restart_after_promotion_7_nodes(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, tdir, tconf, allPluginsPath): demoted_node = txnPoolNodeSet[-1] rest_nodes = [n for n in txnPoolNodeSet if n != demoted_node] starting_view_no = checkViewNoForNodes(txnPoolNodeSet) demote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node) waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1) ensureElectionsDone(looper, rest_nodes) ensure_all_nodes_have_same_data(looper, rest_nodes) sdk_send_random_and_check(looper, rest_nodes, sdk_pool_handle, sdk_wallet_steward, 5) starting_view_no = checkViewNoForNodes(rest_nodes) promote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node) waitForViewChange(looper, rest_nodes, expectedViewNo=starting_view_no + 1) ensureElectionsDone(looper, rest_nodes, instances_list=[0, 1, 2]) ensure_all_nodes_have_same_data(looper, rest_nodes) restart_node(looper, txnPoolNodeSet, demoted_node, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, txnPoolNodeSet) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
def test_demote_backup_primary(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards, tdir, tconf, allPluginsPath): assert len(txnPoolNodeSet) == 6 node_to_restart = txnPoolNodeSet[-1] node_to_demote = steward_for_demote_node = demote_node_index = None steward_for_demote_node = None for i, n in enumerate(txnPoolNodeSet): if n.name == txnPoolNodeSet[0].primaries[1]: node_to_demote = n steward_for_demote_node = sdk_wallet_stewards[i] demote_node_index = i break assert node_to_demote demote_node(looper, steward_for_demote_node, sdk_pool_handle, node_to_demote) del txnPoolNodeSet[demote_node_index] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_restart) looper.removeProdable(name=node_to_restart.name) node_to_restart = start_stopped_node(node_to_restart, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet[-1] = node_to_restart ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_order_after_demote_and_restart(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath, sdk_wallet_stewards): sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 3, 3) primary_node = txnPoolNodeSet[0] node_to_stop = txnPoolNodeSet[1] node_to_demote = txnPoolNodeSet[2] txnPoolNodeSet.remove(node_to_demote) node_to_stop.cleanupOnStopping = True node_to_stop.stop() looper.removeProdable(node_to_stop) ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet, timeout=2) demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, node_to_demote) config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir) restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf, pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha, cliha=node_to_stop.clientstack.ha) looper.add(restarted_node) txnPoolNodeSet[1] = restarted_node looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1, 1) def get_current_bls_keys(node): return node.master_replica._bls_bft_replica._bls_bft.bls_key_register._current_bls_keys assert get_current_bls_keys(restarted_node) == get_current_bls_keys(primary_node)
def test_twice_demoted_node_dont_write_txns(txnPoolNodeSet, looper, sdk_wallet_stewards, sdk_pool_handle): request_count = 5 demoted_node = txnPoolNodeSet[2] alive_pool = list(txnPoolNodeSet) alive_pool.remove(demoted_node) demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node) demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node) demoted_nym = None for _, txn in txnPoolNodeSet[0].poolManager.ledger.getAllTxn(): txn_data = get_payload_data(txn) if txn_data[DATA][ALIAS] == demoted_node.name: demoted_nym = txn_data[TARGET_NYM] break assert demoted_nym # Every node demote `demoted_node` assert all( node.poolManager.reqHandler.getNodeData(demoted_nym)[SERVICES] == [] for node in alive_pool) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], request_count) looper.run( eventually( lambda: assertExp(txnPoolNodeSet[0].domainLedger.size - request_count == \ demoted_node.domainLedger.size)))
def testDemoteNodeWhichWasNeverActive(looper, nodeSet, sdk_pool_handle, sdk_wallet_trustee, tdir, tconf, allPluginsPath): """ Add a node without services field and check that the ledger does not contain the `services` field and check that it can be demoted and the ledger has `services` as empty list """ new_steward_wallet, new_node = sdk_node_theta_added( looper, nodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, node_config_helper_class=NodeConfigHelper, testNodeClass=TestNode, name="Node-" + randomString(5), services=None) looper.runFor(tconf.PROPAGATE_REQUEST_DELAY * 1.5) for node in nodeSet[:nodeCount]: txn = [t for _, t in node.poolLedger.getAllTxn()][-1] txn_data = get_payload_data(txn) assert txn_data[TARGET_NYM] == hexToFriendly(new_node.nodestack.verhex) assert SERVICES not in txn_data[DATA] demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) for node in nodeSet[:nodeCount]: txn = [t for _, t in node.poolLedger.getAllTxn()][-1] txn_data = get_payload_data(txn) assert txn_data[TARGET_NYM] == hexToFriendly(new_node.nodestack.verhex) assert SERVICES in txn_data[DATA] and txn_data[DATA][SERVICES] == []
def testStewardSuspendsNode(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, sdk_node_theta_added, poolTxnStewardData, allPluginsPath): new_steward_wallet, new_node = sdk_node_theta_added demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name)) # Check that a node does not connect to the suspended # node sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) with pytest.raises(RemoteNotFound): looper.loop.run_until_complete(sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node)) new_node.stop() looper.removeProdable(new_node) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node promote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) nodeTheta = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath, delay_instance_change_msgs=False) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
def testSuspendNode(looper, sdk_pool_handle, sdk_wallet_trustee, nodeSet, tdir, tconf, allPluginsPath): """ Suspend a node and then cancel suspension. Suspend while suspended to test that there is no error """ start_view_no = nodeSet[0].viewNo new_steward_wallet, new_node = sdk_node_theta_added( looper, nodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, node_config_helper_class=NodeConfigHelper, testNodeClass=TestNode, name="Node-" + randomString(5)) waitForViewChange(looper=looper, txnPoolNodeSet=nodeSet, expectedViewNo=start_view_no + 1) ensureElectionsDone(looper=looper, nodes=nodeSet) demote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) _wait_view_change_finish(looper, nodeSet[:-1], start_view_no + 1) demote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) promote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) _wait_view_change_finish(looper, nodeSet[:-1], start_view_no + 2) promote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node)
def test_future_primaries_replicas_decrease(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards, tdir, tconf, allPluginsPath): assert len(txnPoolNodeSet) == 7 initial_primaries = copy.copy(txnPoolNodeSet[0].primaries) last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc starting_view_number = checkViewNoForNodes(txnPoolNodeSet) # Decrease replicas count demote_node(looper, sdk_wallet_stewards[-1], sdk_pool_handle, txnPoolNodeSet[-2]) txnPoolNodeSet.remove(txnPoolNodeSet[-2]) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) new_view_no = checkViewNoForNodes(txnPoolNodeSet) assert new_view_no == starting_view_number + 1 state = txnPoolNodeSet[0].write_manager.future_primary_handler.node_states[ -1] assert len(state.primaries) + 1 == len(initial_primaries) assert len(state.primaries) == len(txnPoolNodeSet[0].primaries) for node in txnPoolNodeSet: node.write_manager.future_primary_handler.commit_batch = old_commit
def test_order_after_demote_and_restart(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath, sdk_wallet_stewards): sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 3, 3) primary_node = txnPoolNodeSet[0] node_to_stop = txnPoolNodeSet[1] node_to_demote = txnPoolNodeSet[2] txnPoolNodeSet.remove(node_to_demote) node_to_stop.cleanupOnStopping = True node_to_stop.stop() looper.removeProdable(node_to_stop) ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet, timeout=2) demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, node_to_demote) config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir) restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf, pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha, cliha=node_to_stop.clientstack.ha) looper.add(restarted_node) txnPoolNodeSet[1] = restarted_node looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, check_primaries=False) sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1, 1) def get_current_bls_keys(node): return node.master_replica._bls_bft_replica._bls_bft.bls_key_register._current_bls_keys assert get_current_bls_keys(restarted_node) == get_current_bls_keys(primary_node)
def test_future_primaries_replicas_decrease(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards, tdir, tconf, allPluginsPath): assert len(txnPoolNodeSet) == 7 initial_primaries = copy.copy(txnPoolNodeSet[0].primaries) last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc starting_view_number = checkViewNoForNodes(txnPoolNodeSet) # Decrease replicas count demote_node(looper, sdk_wallet_stewards[-1], sdk_pool_handle, txnPoolNodeSet[-2]) txnPoolNodeSet.remove(txnPoolNodeSet[-2]) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) new_view_no = checkViewNoForNodes(txnPoolNodeSet) assert new_view_no == starting_view_number + 1 node = txnPoolNodeSet[0] with delay_rules(node.nodeIbStasher, cDelay()): req = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1)[0][0] req = Request(**req) three_pc_batch = ThreePcBatch(DOMAIN_LEDGER_ID, 0, 0, 1, time.time(), randomString(), randomString(), ['a', 'b', 'c'], [req.digest], pp_digest='') primaries = node.write_manager.future_primary_handler.post_batch_applied(three_pc_batch) assert len(primaries) + 1 == len(initial_primaries) assert len(primaries) == len(txnPoolNodeSet[0].primaries) for node in txnPoolNodeSet: node.write_manager.future_primary_handler.commit_batch = old_commit
def test_get_nym_by_name_demoted(txnPoolNodeSet, pool_node_txns, looper, sdk_wallet_steward, sdk_pool_handle): # sdk_wallet_steward fixture is a steward for [0] node, # so we can do things below: demote_node(looper, sdk_wallet_steward, sdk_pool_handle, txnPoolNodeSet[0]) check_get_nym_by_name(txnPoolNodeSet, pool_node_txns)
def demote_primary_node(looper, initial_pool_of_nodes, pool_of_nodes, sdk_pool_handle, sdk_wallet_stewards): demoted_node = [node for node in pool_of_nodes if node.has_master_primary][0] indx = initial_pool_of_nodes.index(demoted_node) demote_node(looper, sdk_wallet_stewards[indx], sdk_pool_handle, demoted_node) pool_of_nodes = list(set(pool_of_nodes) - {demoted_node}) return pool_of_nodes
def test_steward_suspends_node_and_promote_with_new_ha( looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, sdk_node_theta_added, poolTxnStewardData, allPluginsPath): new_steward_wallet, new_node = sdk_node_theta_added looper.run(checkNodesConnected(txnPoolNodeSet + [new_node])) demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name)) # Check that a node does not connect to the suspended # node sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) with pytest.raises(RemoteNotFound): looper.loop.run_until_complete( sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node)) new_node.stop() looper.removeProdable(new_node) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node node_ha, client_ha = genHa(2) node_nym = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_nym, new_node.name, node_ha.host, node_ha.port, client_ha.host, client_ha.port, services=[VALIDATOR]) new_node.nodestack.ha = node_ha new_node.clientstack.ha = client_ha nodeTheta = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath, delay_instance_change_msgs=False) assert all(node.nodestack.remotes[new_node.name].ha == node_ha for node in txnPoolNodeSet) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
def testSuspendNode(looper, sdk_pool_handle, sdk_wallet_trustee, newNodeAdded): """ Suspend a node and then cancel suspension. Suspend while suspended to test that there is no error """ new_steward_wallet, new_node = newNodeAdded demote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) demote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) promote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) promote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node)
def demote_another_one(rest_pool): demoted_node = rest_pool[-1] rest_pool = [n for n in rest_pool if n != demoted_node] starting_view_no = checkViewNoForNodes(rest_pool) demote_node(looper, sdk_wallet_steward, sdk_pool_handle, demoted_node) waitForViewChange(looper, rest_pool, expectedViewNo=starting_view_no + 1) ensureElectionsDone(looper, rest_pool, customTimeout=60) ensure_all_nodes_have_same_data(looper, rest_pool) return rest_pool
def test_demote_node_delay_commit_on_one(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards, tdir, tconf, allPluginsPath): view_no = txnPoolNodeSet[-1].viewNo slow_node = txnPoolNodeSet[-2] # Demote Node8 but don't allow Node7 to be aware of it. with delay_rules(slow_node.nodeIbStasher, cDelay()): demote_node(looper, sdk_wallet_stewards[-1], sdk_pool_handle, txnPoolNodeSet[-1]) del txnPoolNodeSet[-1] waitForViewChange(looper, txnPoolNodeSet, view_no + 1) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_promotion_before_view_change(looper, txnPoolNodeSet, tdir, tconf, allPluginsPath, sdk_wallet_stewards, sdk_pool_handle): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) assert txnPoolNodeSet[0].master_replica.isPrimary assert txnPoolNodeSet[1].replicas[1].isPrimary assert txnPoolNodeSet[2].replicas[2].isPrimary starting_view_number = checkViewNoForNodes(txnPoolNodeSet) node_2 = txnPoolNodeSet[1] node_3 = txnPoolNodeSet[2] node_5 = txnPoolNodeSet[4] # Demote node 2 steward_2 = sdk_wallet_stewards[1] demote_node(looper, steward_2, sdk_pool_handle, node_2) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_2) looper.removeProdable(node_2) txnPoolNodeSet.remove(node_2) # Checking that view change happened # we are expecting 2 view changes here since Beta is selected as a master Primary on view=1 # (since node reg at the beginning of view 0 is used to select it), but it's not available (demoted), # so we do view change to view=2 by timeout waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=starting_view_number + 2) ensureElectionsDone(looper, txnPoolNodeSet, instances_list=[0, 1]) assert node_3.master_replica.isPrimary # Promoting node 3, increasing replica count node_2 = start_stopped_node(node_2, looper, tconf, tdir, allPluginsPath) promote_node(looper, steward_2, sdk_pool_handle, node_2) txnPoolNodeSet.append(node_2) looper.run(checkNodesConnected(txnPoolNodeSet)) waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=starting_view_number + 3) ensureElectionsDone(looper, txnPoolNodeSet, instances_list=[0, 1, 2]) # node 5 is a primary since promoted node is added at the end of the list assert node_5.master_replica.isPrimary sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_update_with_demoted_node(looper, nodeSet, validUpgrade, sdk_pool_handle, sdk_wallet_stewards, sdk_wallet_trustee): # demote one node demote_node(looper, sdk_wallet_stewards[3], sdk_pool_handle, nodeSet[3]) # remove demoted node from upgrade schedule upgr = validUpgrade del upgr[SCHEDULE][nodeSet[3].id] # send upgrade sdk_ensure_upgrade_sent(looper, sdk_pool_handle, sdk_wallet_trustee, upgr) # check upg scheduled looper.run(eventually(checkUpgradeScheduled, nodeSet[:3], upgr[VERSION], retryWait=1, timeout=waits.expectedUpgradeScheduled()))
def test_steward_suspends_node_and_promote_with_new_ha( looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, sdk_node_theta_added, poolTxnStewardData, allPluginsPath): new_steward_wallet, new_node = sdk_node_theta_added looper.run(checkNodesConnected(txnPoolNodeSet + [new_node])) demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name)) # Check that a node does not connect to the suspended # node sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) with pytest.raises(RemoteNotFound): looper.loop.run_until_complete(sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node)) new_node.stop() looper.removeProdable(new_node) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node node_ha, client_ha = genHa(2) node_nym = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_nym, new_node.name, node_ha.host, node_ha.port, client_ha.host, client_ha.port, services=[VALIDATOR]) new_node.nodestack.ha = node_ha new_node.clientstack.ha = client_ha nodeTheta = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath, delay_instance_change_msgs=False) assert all(node.nodestack.remotes[new_node.name].ha == node_ha for node in txnPoolNodeSet) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
def test_node_txn_promote_by_endorser(txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee, looper, sdk_wallet_handle): validators_before = get_pool_validator_count(txnPoolNodeSet) new_end_did, new_end_verkey = looper.loop.run_until_complete( did.create_and_store_my_did(sdk_wallet_trustee[0], "{}")) # Step 1. Demote node using default auth rule demote_node(looper, sdk_wallet_trustee, sdk_pool_handle, txnPoolNodeSet[-1]) # Check, that node was demoted assert validators_before - get_pool_validator_count( txnPoolNodeSet[:-1]) == 1 # Step 2. Add new Endorser sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, 'newEndorser', ENDORSER_STRING, verkey=new_end_verkey, dest=new_end_did) new_constraint = AuthConstraint(ENDORSER, 1) # Step 3. Change auth rule, to allowing endorser promote node back sdk_send_and_check_auth_rule_request(looper, sdk_pool_handle, sdk_wallet_trustee, auth_action=EDIT_PREFIX, auth_type=promote_action.txn_type, field=promote_action.field, new_value=promote_action.new_value, old_value=promote_action.old_value, constraint=new_constraint.as_dict) # Step 4. Promote node back, using new Endorser promote_node(looper, (sdk_wallet_handle, new_end_did), sdk_pool_handle, txnPoolNodeSet[-1]) # Check, that all other nodes return previous demoted node back assert validators_before == get_pool_validator_count(txnPoolNodeSet[:-1])
def test_promotion_before_view_change(looper, txnPoolNodeSet, tdir, tconf, allPluginsPath, sdk_wallet_stewards, sdk_pool_handle): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) assert txnPoolNodeSet[0].master_replica.isPrimary assert txnPoolNodeSet[1].replicas[1].isPrimary assert txnPoolNodeSet[2].replicas[2].isPrimary starting_view_number = checkViewNoForNodes(txnPoolNodeSet) node_2 = txnPoolNodeSet[1] node_3 = txnPoolNodeSet[2] node_4 = txnPoolNodeSet[3] # Demote node 2 steward_2 = sdk_wallet_stewards[1] demote_node(looper, steward_2, sdk_pool_handle, node_2) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_2) looper.removeProdable(node_2) txnPoolNodeSet.remove(node_2) # Checking that view change happened waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=starting_view_number + 1) ensureElectionsDone(looper, txnPoolNodeSet, instances_list=[0, 1]) assert node_3.master_replica.isPrimary # Promoting node 3, increasing replica count node_2 = start_stopped_node(node_2, looper, tconf, tdir, allPluginsPath) promote_node(looper, steward_2, sdk_pool_handle, node_2) txnPoolNodeSet.append(node_2) looper.run(checkNodesConnected(txnPoolNodeSet)) waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=starting_view_number + 3) ensureElectionsDone(looper, txnPoolNodeSet, instances_list=[0, 1, 2]) assert node_4.master_replica.isPrimary sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def testSuspendNode(looper, sdk_pool_handle, sdk_wallet_trustee, newNodeAdded, nodeSet): """ Suspend a node and then cancel suspension. Suspend while suspended to test that there is no error """ start_view_no = nodeSet[0].viewNo new_steward_wallet, new_node = newNodeAdded demote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) _wait_view_change_finish(looper, nodeSet[:-1], start_view_no + 1) demote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) promote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node) _wait_view_change_finish(looper, nodeSet[:-1], start_view_no + 3) promote_node(looper, sdk_wallet_trustee, sdk_pool_handle, new_node)
def test_demote_backup_primary(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards, tdir, tconf, allPluginsPath): assert len(txnPoolNodeSet) == 6 view_no = txnPoolNodeSet[-1].viewNo sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) node_to_restart = txnPoolNodeSet[-1] node_to_demote = steward_for_demote_node = demote_node_index = None steward_for_demote_node = None for i, n in enumerate(txnPoolNodeSet): if n.name == txnPoolNodeSet[0].primaries[1]: node_to_demote = n steward_for_demote_node = sdk_wallet_stewards[i] demote_node_index = i break assert node_to_demote demote_node(looper, steward_for_demote_node, sdk_pool_handle, node_to_demote) del txnPoolNodeSet[demote_node_index] # we are expecting 2 view changes here since Beta is selected as a master Primary on view=1 # (since node reg at the beginning of view 0 is used to select it), but it's not available (demoted), # so we do view change to view=2 by timeout waitForViewChange(looper, txnPoolNodeSet, view_no + 2) ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_restart) looper.removeProdable(name=node_to_restart.name) node_to_restart = start_stopped_node(node_to_restart, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet[-1] = node_to_restart looper.run(checkNodesConnected(txnPoolNodeSet)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20)
def test_update_with_demoted_node(looper, nodeSet, validUpgrade, sdk_pool_handle, sdk_wallet_stewards, sdk_wallet_trustee): # demote one node demote_node(looper, sdk_wallet_stewards[3], sdk_pool_handle, nodeSet[3]) # remove demoted node from upgrade schedule upgr = validUpgrade del upgr[SCHEDULE][nodeSet[3].id] # send upgrade sdk_ensure_upgrade_sent(looper, sdk_pool_handle, sdk_wallet_trustee, upgr) # check upg scheduled looper.run( eventually(checkUpgradeScheduled, nodeSet[:3], upgr[VERSION], retryWait=1, timeout=waits.expectedUpgradeScheduled()))
def test_twice_demoted_node_dont_write_txns(txnPoolNodeSet, looper, sdk_wallet_stewards, sdk_pool_handle): request_count = 5 demoted_node = txnPoolNodeSet[2] alive_pool = list(txnPoolNodeSet) alive_pool.remove(demoted_node) def get_node_prods_count(node): return node.metrics._accumulators[MetricsName.NODE_PROD_TIME].count def is_prods_run(node, old, diff): new = get_node_prods_count(node) assert old + diff <= new demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node) demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node) demoted_nym = None for _, txn in txnPoolNodeSet[0].poolManager.ledger.getAllTxn(): txn_data = get_payload_data(txn) if txn_data[DATA][ALIAS] == demoted_node.name: demoted_nym = txn_data[TARGET_NYM] break assert demoted_nym # Every node demote `demoted_node` assert all( node.poolManager.reqHandler.getNodeData(demoted_nym)[SERVICES] == [] for node in alive_pool) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], request_count) old = get_node_prods_count(txnPoolNodeSet[0]) # Let primary node make 2 prod runs so we make sure that # node did not appear in network reconnection looper.run(eventually(is_prods_run, txnPoolNodeSet[0], old, 2)) assert txnPoolNodeSet[0].domainLedger.size - request_count == \ demoted_node.domainLedger.size
def test_catchup_after_replica_removing(looper, sdk_pool_handle, txnPoolNodeSet, sdk_wallet_stewards, tdir, tconf, allPluginsPath): view_no = txnPoolNodeSet[-1].viewNo sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) waitNodeDataEquality(looper, *txnPoolNodeSet) index, node_for_demote = [(i, n) for i, n in enumerate(txnPoolNodeSet) if n.replicas[1].isPrimary][0] sdk_wallet_steward = sdk_wallet_stewards[index] demote_node(looper, sdk_wallet_steward, sdk_pool_handle, node_for_demote) txnPoolNodeSet.pop(index) looper.run( eventually( lambda: assertExp(n.viewNo == view_no for n in txnPoolNodeSet))) waitNodeDataEquality(looper, *txnPoolNodeSet) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) waitNodeDataEquality(looper, *txnPoolNodeSet)
def test_twice_demoted_node_dont_write_txns(txnPoolNodeSet, looper, sdk_wallet_stewards, sdk_pool_handle): request_count = 5 demoted_node = txnPoolNodeSet[2] alive_pool = list(txnPoolNodeSet) alive_pool.remove(demoted_node) def get_node_prods_count(node): return node.metrics._accumulators[MetricsName.NODE_PROD_TIME].count def is_prods_run(node, old, diff): new = get_node_prods_count(node) assert old + diff <= new demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node) demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node) demoted_nym = None for _, txn in txnPoolNodeSet[0].poolManager.ledger.getAllTxn(): txn_data = get_payload_data(txn) if txn_data[DATA][ALIAS] == demoted_node.name: demoted_nym = txn_data[TARGET_NYM] break assert demoted_nym # Every node demote `demoted_node` assert all(node.poolManager.reqHandler.getNodeData(demoted_nym)[SERVICES] == [] for node in alive_pool) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], request_count) old = get_node_prods_count(txnPoolNodeSet[0]) # Let primary node make 2 prod runs so we make sure that # node did not appear in network reconnection looper.run(eventually(is_prods_run, txnPoolNodeSet[0], old, 2)) assert txnPoolNodeSet[0].domainLedger.size - request_count == \ demoted_node.domainLedger.size
def testDemoteNodeWhichWasNeverActive(looper, nodeSet, sdk_pool_handle, sdk_wallet_trustee, tdir, tconf, allPluginsPath): """ Add a node without services field and check that the ledger does not contain the `services` field and check that it can be demoted and the ledger has `services` as empty list """ alias = randomString(5) new_node_name = "Node-" + alias sdk_wallet_steward = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, alias="Steward-" + alias, role='STEWARD') new_node = sdk_add_new_node(looper, sdk_pool_handle, sdk_wallet_steward, new_node_name, tdir, tconf, allPluginsPath, services=None) looper.runFor(tconf.PROPAGATE_REQUEST_DELAY * 1.5) for node in nodeSet: txn = [t for _, t in node.poolLedger.getAllTxn()][-1] txn_data = get_payload_data(txn) assert txn_data[TARGET_NYM] == hexToFriendly(new_node.nodestack.verhex) assert SERVICES not in txn_data[DATA] demote_node(looper, sdk_wallet_steward, sdk_pool_handle, new_node) for node in nodeSet: txn = [t for _, t in node.poolLedger.getAllTxn()][-1] txn_data = get_payload_data(txn) assert txn_data[TARGET_NYM] == hexToFriendly(new_node.nodestack.verhex) assert SERVICES in txn_data[DATA] and txn_data[DATA][SERVICES] == []
def test_catchup_after_replica_removing(looper, sdk_pool_handle, txnPoolNodeSet, sdk_wallet_stewards, tdir, tconf, allPluginsPath): view_no = txnPoolNodeSet[-1].viewNo sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) waitNodeDataEquality(looper, *txnPoolNodeSet) index, node_for_demote = [(i, n) for i, n in enumerate(txnPoolNodeSet) if n.replicas[1].isPrimary][0] sdk_wallet_steward = sdk_wallet_stewards[index] demote_node(looper, sdk_wallet_steward, sdk_pool_handle, node_for_demote) txnPoolNodeSet.pop(index) # we are expecting 2 view changes here since Beta is selected as a master Primary on view=1 # (since node reg at the beginning of view 0 is used to select it), but it's not available (demoted), # so we do view change to view=2 by timeout waitForViewChange(looper, txnPoolNodeSet, view_no + 2) ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) waitNodeDataEquality(looper, *txnPoolNodeSet) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) waitNodeDataEquality(looper, *txnPoolNodeSet)
def test_promotion_leads_to_correct_primary_selection(looper, txnPoolNodeSet, tdir, tconf, allPluginsPath, sdk_wallet_stewards, sdk_pool_handle): # We are saving pool state at moment of last view_change to send it # to newly connected nodes so they could restore primaries basing on this node set. # When current primaries getting edited because of promotion/demotion we don't take this into account. # That lead us to primary inconsistency on different nodes sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) assert txnPoolNodeSet[0].master_replica.isPrimary assert txnPoolNodeSet[1].replicas._replicas[1].isPrimary assert txnPoolNodeSet[2].replicas._replicas[2].isPrimary starting_view_number = checkViewNoForNodes(txnPoolNodeSet) node_1 = txnPoolNodeSet[0] node_3 = txnPoolNodeSet[2] # Demote node 3 steward_3 = sdk_wallet_stewards[2] demote_node(looper, steward_3, sdk_pool_handle, node_3) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_3) looper.removeProdable(node_3) txnPoolNodeSet.remove(node_3) # Checking that view change happened waitForViewChange(looper, txnPoolNodeSet, starting_view_number + 1) assert all(node.replicas.primary_name_by_inst_id == node_1.replicas.primary_name_by_inst_id for node in txnPoolNodeSet) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) for node in txnPoolNodeSet: assert node.f == 1 assert node.replicas.num_replicas == 2 # restart Node1 disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_1) looper.removeProdable(node_1) txnPoolNodeSet.remove(node_1) node_1 = start_stopped_node(node_1, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet.append(node_1) # Wait so node_1 could start and catch up waitForViewChange(looper, txnPoolNodeSet, starting_view_number + 1) assert all(node.replicas.primary_name_by_inst_id == node_1.replicas.primary_name_by_inst_id for node in txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) # Promoting node 3, increasing replica count node_3 = start_stopped_node(node_3, looper, tconf, tdir, allPluginsPath) promote_node(looper, steward_3, sdk_pool_handle, node_3) txnPoolNodeSet.append(node_3) looper.run(checkNodesConnected(txnPoolNodeSet)) # Wait for view change after promotion waitForViewChange(looper, txnPoolNodeSet, starting_view_number + 2) ensureElectionsDone(looper, txnPoolNodeSet, instances_list=[0, 1, 2]) # Node 3 able to do ordering sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def testValidatorSuspensionByTrustee(sdk_wallet_trustee, sdk_pool_handle, looper, nodeSet): node = nodeSet[-1] demote_node(looper, sdk_wallet_trustee, sdk_pool_handle, node) for n in nodeSet[:-1]: looper.run(eventually(checkNodeNotInNodeReg, n, node.name))
def test_promotion_leads_to_primary_inconsistency(looper, txnPoolNodeSet, tdir, tconf, allPluginsPath, sdk_wallet_stewards, sdk_pool_handle): # We are saving pool state at moment of last view_change to send it # to newly connected nodes so they could restore primaries basing on this node set. # When current primaries getting edited because of promotion/demotion we don't take this into account. # That lead us to primary inconsistency on different nodes sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 1) assert txnPoolNodeSet[0].master_replica.isPrimary assert txnPoolNodeSet[1].replicas._replicas[1].isPrimary assert txnPoolNodeSet[2].replicas._replicas[2].isPrimary starting_view_number = checkViewNoForNodes(txnPoolNodeSet) # Demote node 3 node_3 = txnPoolNodeSet[2] steward_3 = sdk_wallet_stewards[2] demote_node(looper, steward_3, sdk_pool_handle, node_3) disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_3) looper.removeProdable(node_3) txnPoolNodeSet.remove(node_3) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) for node in txnPoolNodeSet: assert node.f == 1 assert node.replicas.num_replicas == 2 # Force a view change by stopping master. In this moment we are saving pool state (without 3rd node) node_1 = txnPoolNodeSet[0] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_1) looper.removeProdable(node_1) txnPoolNodeSet.remove(node_1) # Checking that view change happened ensureElectionsDone(looper, txnPoolNodeSet, instances_list=[0, 1]) view_number = checkViewNoForNodes(txnPoolNodeSet) assert view_number == starting_view_number + 1 node_1 = start_stopped_node(node_1, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet.append(node_1) # Wait so node_1 could start and finish view_change looper.runFor(1) # Promoting node 3, increasing replica count node_3 = start_stopped_node(node_3, looper, tconf, tdir, allPluginsPath) promote_node(looper, steward_3, sdk_pool_handle, node_3) txnPoolNodeSet.append(node_3) looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper, txnPoolNodeSet, instances_list=[0, 1, 2]) # Node 3 able to do ordering sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], 2) view_number = checkViewNoForNodes(txnPoolNodeSet) assert view_number == starting_view_number + 2 ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) # But it has different primary, cause it uses nodeReg without itself to calculate primaries assert all(node.replicas.primary_name_by_inst_id == node_1.replicas.primary_name_by_inst_id for node in txnPoolNodeSet if node is not node_3) # Fails assert all(node.replicas.primary_name_by_inst_id == node_1.replicas.primary_name_by_inst_id for node in txnPoolNodeSet)
def test_get_nym_by_name_demoted(txnPoolNodeSet, pool_node_txns, looper, sdk_wallet_stewards, sdk_pool_handle): demote_node(looper, sdk_wallet_stewards[0], sdk_pool_handle, txnPoolNodeSet[0]) check_get_nym_by_name(txnPoolNodeSet, pool_node_txns)