def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None, new_key_proof=None): if add_wrong: _, new_blspk, key_proof = create_default_bls_crypto_factory().generate_bls_keys() else: new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk bls_key_proof = new_key_proof or key_proof node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None, key_proof=bls_key_proof) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) return new_blspk
def test_number_txns_in_catchup_and_vc_queue_valid(looper, txnPoolNodeSet, tconf, sdk_pool_handle, sdk_wallet_steward, tdir, allPluginsPath): num_txns = 5 master_node = get_master_primary_node(txnPoolNodeSet) master_node_index = txnPoolNodeSet.index(master_node) other_nodes = txnPoolNodeSet.copy() other_nodes.remove(master_node) old_view = master_node.viewNo expected_view_no = old_view + 1 disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, master_node, stopNode=True) looper.removeProdable(master_node) looper.run(eventually(checkViewNoForNodes, other_nodes, expected_view_no, retryWait=1, timeout=tconf.NEW_VIEW_TIMEOUT)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_send_random_and_check(looper, other_nodes, sdk_pool_handle, sdk_wallet_steward, num_txns) master_node = start_stopped_node(master_node, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet[master_node_index] = master_node looper.run(checkNodesConnected(txnPoolNodeSet)) waitNodeDataEquality(looper, master_node, *txnPoolNodeSet[-1:], exclude_from_check=['check_last_ordered_3pc_backup']) latest_info = master_node._info_tool.info assert latest_info['Node_info']['Catchup_status']['Number_txns_in_catchup'][1] == num_txns assert latest_info['Node_info']['View_change_status']['View_No'] == expected_view_no for n in other_nodes: assert n._info_tool.info['Node_info']['View_change_status']['Last_complete_view_no'] == expected_view_no
def testAddInactiveNodeThenActivate(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle, tdir, tconf, allPluginsPath): new_steward_name = "testClientSteward" + randomString(3) new_node_name = "Kappa" # adding a new node without SERVICES field # it means the node is in the inactive state new_steward_wallet, new_node = \ sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, new_node_name, tdir, tconf, allPluginsPath, services=None) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) new_node = update_node_data_and_reconnect(looper, txnPoolNodeSet + [new_node], new_steward_wallet, sdk_pool_handle, new_node, None, None, None, None, tdir, tconf) txnPoolNodeSet.append(new_node) sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle)
def sdk_node_theta_added(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, allPluginsPath, testNodeClass=TestNode, name=None): new_steward_name = "testClientSteward" + randomString(3) new_node_name = name or "Theta" new_steward_wallet, new_node = \ sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, new_node_name, tdir, tconf, allPluginsPath, nodeClass=testNodeClass) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) return new_steward_wallet, new_node
def testClientConnectToRestartedNodes(looper, txnPoolNodeSet, tdir, tconf, poolTxnNodeNames, allPluginsPath, sdk_wallet_new_client, sdk_pool_handle): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_new_client, 1) for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) txnPoolNodeSet = [] for nm in poolTxnNodeNames: config_helper = PNodeConfigHelper(nm, tconf, chroot=tdir) node = TestNode(nm, config_helper=config_helper, config=tconf, pluginPaths=allPluginsPath) looper.add(node) txnPoolNodeSet.append(node) looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) def chk(): for node in txnPoolNodeSet: assert node.isParticipating timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_new_client, sdk_pool_handle)
def add_new_node(looper, nodes, sdk_pool_handle, sdk_wallet_steward, tdir, tconf, all_plugins_path, name=None, wait_till_added=True): node_name = name or "Psi" new_steward_name = "testClientSteward" + randomString(3) _, new_node = sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, node_name, tdir, tconf, allPluginsPath=all_plugins_path, wait_till_added=wait_till_added) if wait_till_added: nodes.append(new_node) looper.run(checkNodesConnected(nodes)) timeout = waits.expectedPoolCatchupTime(nodeCount=len(nodes)) waitNodeDataEquality( looper, new_node, *nodes[:-1], customTimeout=timeout, exclude_from_check=['check_last_ordered_3pc_backup']) sdk_pool_refresh(looper, sdk_pool_handle) return new_node
def testStewardSuspendsNode(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, sdk_node_theta_added, poolTxnStewardData, allPluginsPath): new_steward_wallet, new_node = sdk_node_theta_added demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name)) # Check that a node does not connect to the suspended # node sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) with pytest.raises(RemoteNotFound): looper.loop.run_until_complete(sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node)) new_node.stop() looper.removeProdable(new_node) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node promote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) nodeTheta = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath, delay_instance_change_msgs=False) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
def add_new_node(looper, nodes, sdk_pool_handle, sdk_wallet_steward, tdir, client_tdir, tconf, all_plugins_path, name=None): node_name = name or "Psi" new_steward_name = "testClientSteward" + randomString(3) _, new_node = sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, node_name, tdir, tconf, allPluginsPath=all_plugins_path) nodes.append(new_node) looper.run(checkNodesConnected(nodes)) timeout = waits.expectedPoolCatchupTime(nodeCount=len(nodes)) waitNodeDataEquality(looper, new_node, *nodes[:-1], customTimeout=timeout) sdk_pool_refresh(looper, sdk_pool_handle) return new_node
def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None, new_key_proof=None): if add_wrong: _, new_blspk, key_proof = create_default_bls_crypto_factory( ).generate_bls_keys() else: new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk bls_key_proof = new_key_proof or key_proof node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None, key_proof=bls_key_proof) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) return new_blspk
def test_number_txns_in_catchup_and_vc_queue_valid(looper, txnPoolNodeSet, tconf, sdk_pool_handle, sdk_wallet_steward): num_txns = 5 master_node = get_master_primary_node(txnPoolNodeSet) old_view = master_node.viewNo expected_view_no = old_view + 1 disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, master_node, stopNode=False) looper.run( eventually(checkViewNoForNodes, txnPoolNodeSet[1:], expected_view_no, retryWait=1, timeout=tconf.VIEW_CHANGE_TIMEOUT)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, num_txns) reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, master_node) waitNodeDataEquality(looper, master_node, *txnPoolNodeSet[-1:]) latest_info = master_node._info_tool.info assert latest_info['Node_info']['Catchup_status'][ 'Number_txns_in_catchup'][1] == num_txns assert latest_info['Node_info']['View_change_status'][ 'View_No'] == expected_view_no node_names = [n.name for n in txnPoolNodeSet[1:]] for node_name in node_names: assert latest_info['Node_info']['View_change_status']['VCDone_queue'][ node_name][0] == master_node.master_primary_name assert latest_info['Node_info']['View_change_status']['VCDone_queue'][ node_name][1] assert latest_info['Node_info']['View_change_status'][ 'Last_complete_view_no'] == expected_view_no
def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None): new_blspk = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk \ if not add_wrong \ else base58.b58encode(randomString(128).encode()) node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias=randomString(5)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) return new_blspk
def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None): new_blspk = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk \ if not add_wrong \ else base58.b58encode(randomString(128).encode()).decode("utf-8") node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) return new_blspk
def sdk_node_theta_added(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, node_config_helper_class, testNodeClass, name=None): new_steward_name = "testClientSteward" + randomString(3) new_node_name = name or "Theta" new_steward_wallet = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, alias=new_steward_name, role=STEWARD_STRING) sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \ prepare_new_node_data(tconf, tdir, new_node_name, configClass=node_config_helper_class) # filling node request _, steward_did = new_steward_wallet node_request = looper.loop.run_until_complete( prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, nodeIp=nodeIp, nodePort=nodePort, bls_key=bls_key, sigseed=sigseed, services=[VALIDATOR], key_proof=key_proof)) # sending request using 'sdk_' functions request_couple = sdk_sign_and_send_prepared_request( looper, new_steward_wallet, sdk_pool_handle, node_request) # waitng for replies sdk_get_and_check_replies(looper, [request_couple]) new_node = create_and_start_new_node(looper, new_node_name, tdir, sigseed, (nodeIp, nodePort), (clientIp, clientPort), tconf, True, allPluginsPath, testNodeClass, configClass=node_config_helper_class) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) return new_steward_wallet, new_node
def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward): new_steward_wallet, new_node = \ sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, 'AnotherSteward' + randomString(4), 'AnotherNode' + randomString(4), tdir, tconf) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) node_new_ha, client_new_ha = genHa(2) logger.debug("{} changing HAs to {} {}".format(new_node, node_new_ha, client_new_ha)) # Making the change HA txn an confirming its succeeded node_dest = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, node_new_ha.host, node_new_ha.port, client_new_ha.host, client_new_ha.port) # Stopping existing nodes for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) # Starting nodes again by creating `Node` objects since that simulates # what happens when starting the node with script restartedNodes = [] for node in txnPoolNodeSet[:-1]: config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir) restartedNode = TestNode(node.name, config_helper=config_helper, config=tconf, ha=node.nodestack.ha, cliha=node.clientstack.ha) looper.add(restartedNode) restartedNodes.append(restartedNode) # Starting the node whose HA was changed config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir) node = TestNode(new_node.name, config_helper=config_helper, config=tconf, ha=node_new_ha, cliha=client_new_ha) looper.add(node) restartedNodes.append(node) looper.run(checkNodesConnected(restartedNodes)) waitNodeDataEquality(looper, node, *restartedNodes[:-1]) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, restartedNodes, sdk_wallet_client, sdk_pool_handle)
def changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns, tconf, shouldBePrimary, tdir, sdk_pool_handle, sdk_wallet_stewards): # prepare new ha for node and client stack subjectedNode = None node_index = None for nodeIndex, n in enumerate(txnPoolNodeSet): if shouldBePrimary == n.has_master_primary: subjectedNode = n node_index = nodeIndex break nodeStackNewHA, clientStackNewHA = genHa(2) logger.debug("change HA for node: {} to {}".format( subjectedNode.name, (nodeStackNewHA, clientStackNewHA))) # change HA sdk_wallet_steward = sdk_wallet_stewards[node_index] node_dest = hexToFriendly(subjectedNode.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, subjectedNode.name, nodeStackNewHA[0], nodeStackNewHA[1], clientStackNewHA[0], clientStackNewHA[1], services=[VALIDATOR]) # stop node for which HA will be changed subjectedNode.stop() looper.removeProdable(subjectedNode) # start node with new HA config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir) restartedNode = TestNode(subjectedNode.name, config_helper=config_helper, config=tconf, ha=nodeStackNewHA, cliha=clientStackNewHA) looper.add(restartedNode) txnPoolNodeSet[nodeIndex] = restartedNode looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70)) sdk_pool_refresh(looper, sdk_pool_handle) electionTimeout = waits.expectedPoolElectionTimeout( nodeCount=len(txnPoolNodeSet), numOfReelections=3) ensureElectionsDone(looper, txnPoolNodeSet, retryWait=1, customTimeout=electionTimeout) # start client and check the node HA anotherClient, _ = genTestClient(tmpdir=tdirWithClientPoolTxns, usePoolLedger=True) looper.add(anotherClient) looper.run(eventually(anotherClient.ensureConnectedToNodes)) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 8)
def sdk_node_set_with_node_added_after_some_txns( txnPoolNodeSet, sdk_node_created_after_some_txns): looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_created_after_some_txns txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) return looper, new_node, sdk_pool_handle, new_steward_wallet_handle
def testNonStewardCannotAddNode(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath): new_node_name = "Epsilon" with pytest.raises(RequestRejectedException) as e: sdk_add_new_node(looper, sdk_pool_handle, sdk_wallet_client, new_node_name, tdir, tconf, allPluginsPath) assert 'is not a steward so cannot add a ' in e._excinfo[1].args[0] sdk_pool_refresh(looper, sdk_pool_handle)
def testStewardCannotAddMoreThanOneNode(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, tdir, tconf, allPluginsPath): new_node_name = "Epsilon" with pytest.raises(RequestRejectedException) as e: sdk_add_new_node(looper, sdk_pool_handle, sdk_wallet_steward, new_node_name, tdir, tconf, allPluginsPath) assert 'already has a node' in e._excinfo[1].args[0] sdk_pool_refresh(looper, sdk_pool_handle)
def changeNodeHa(looper, txnPoolNodeSet, tconf, shouldBePrimary, tdir, sdk_pool_handle, sdk_wallet_stewards, sdk_wallet_client): # prepare new ha for node and client stack subjectedNode = None node_index = None for nodeIndex, n in enumerate(txnPoolNodeSet): if shouldBePrimary == n.has_master_primary: subjectedNode = n node_index = nodeIndex break nodeStackNewHA, clientStackNewHA = genHa(2) logger.debug("change HA for node: {} to {}".format( subjectedNode.name, (nodeStackNewHA, clientStackNewHA))) # change HA sdk_wallet_steward = sdk_wallet_stewards[node_index] node_dest = hexToFriendly(subjectedNode.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, subjectedNode.name, nodeStackNewHA[0], nodeStackNewHA[1], clientStackNewHA[0], clientStackNewHA[1], services=[VALIDATOR]) # stop node for which HA will be changed subjectedNode.stop() looper.removeProdable(subjectedNode) # start node with new HA config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir) restartedNode = TestNode(subjectedNode.name, config_helper=config_helper, config=tconf, ha=nodeStackNewHA, cliha=clientStackNewHA) looper.add(restartedNode) txnPoolNodeSet[nodeIndex] = restartedNode looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70)) electionTimeout = waits.expectedPoolElectionTimeout( nodeCount=len(txnPoolNodeSet), numOfReelections=3) ensureElectionsDone(looper, txnPoolNodeSet, retryWait=1, customTimeout=electionTimeout) sdk_pool_refresh(looper, sdk_pool_handle) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 8)
def testNewNodeCatchupWhileIncomingRequests(looper, txnPoolNodeSet, testNodeClass, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, allPluginsPath): """ A new node joins while transactions are happening, its catchup requests include till where it has to catchup, which would be less than the other node's ledger size. In the meantime, the new node will stash all requests """ sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 5) def chkAfterCall(self, req, frm): r = self.processCatchupReq(req, frm) typ = getattr(req, f.LEDGER_ID.nm) if typ == DOMAIN_LEDGER_ID: ledger = self.getLedgerForMsg(req) assert req.catchupTill <= ledger.size return r for node in txnPoolNodeSet: node.nodeMsgRouter.routes[CatchupReq] = \ types.MethodType(chkAfterCall, node.ledgerManager) node.nodeIbStasher.delay(cqDelay(3)) print('Sending 5 requests') sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_steward, 5) looper.runFor(1) new_steward_name = randomString() new_node_name = "Epsilon" new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=True) sdk_pool_refresh(looper, sdk_pool_handle) txnPoolNodeSet.append(new_node) looper.runFor(2) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 5) # TODO select or create a timeout for this case in 'waits' looper.run( eventually(checkNodeDataForEquality, new_node, *txnPoolNodeSet[:-1], retryWait=1, timeout=80)) assert new_node.spylog.count(TestNode.processStashedOrderedReqs) > 0
def sdk_node_theta_added(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, node_config_helper_class, testNodeClass, name=None): new_steward_name = "testClientSteward" + randomString(3) new_node_name = name or "Theta" new_steward_wallet = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, alias=new_steward_name, role=STEWARD_STRING) sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort = \ prepare_new_node_data(tconf, tdir, new_node_name, configClass=node_config_helper_class) # filling node request _, steward_did = new_steward_wallet node_request = looper.loop.run_until_complete( prepare_node_request(steward_did, new_node_name=new_node_name, clientIp=clientIp, clientPort=clientPort, nodeIp=nodeIp, nodePort=nodePort, bls_key=bls_key, sigseed=sigseed, services=[VALIDATOR])) # sending request using 'sdk_' functions request_couple = sdk_sign_and_send_prepared_request(looper, new_steward_wallet, sdk_pool_handle, node_request) # waitng for replies sdk_get_and_check_replies(looper, [request_couple]) new_node = create_and_start_new_node(looper, new_node_name, tdir, sigseed, (nodeIp, nodePort), (clientIp, clientPort), tconf, True, allPluginsPath, testNodeClass, configClass=node_config_helper_class) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) return new_steward_wallet, new_node
def test_demote_nonexisted(looper, txnPoolNodeSet, sdk_pool_handle, tdir, tconf, sdk_wallet_new_steward): dst, name = add_ne_node(looper, sdk_pool_handle, sdk_wallet_new_steward) assert dst sdk_pool_refresh(looper, sdk_pool_handle) assert len(txnPoolNodeSet[0].nodeReg) == len(txnPoolNodeSet) + 1 _, st_did = sdk_wallet_new_steward node_request = looper.loop.run_until_complete( prepare_node_request(st_did, destination=dst, new_node_name=name, services=[])) request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_new_steward, sdk_pool_handle, node_request) sdk_get_and_check_replies(looper, [request_couple])
def test_steward_suspends_node_and_promote_with_new_ha( looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, sdk_node_theta_added, poolTxnStewardData, allPluginsPath): new_steward_wallet, new_node = sdk_node_theta_added looper.run(checkNodesConnected(txnPoolNodeSet + [new_node])) demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name)) # Check that a node does not connect to the suspended # node sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) with pytest.raises(RemoteNotFound): looper.loop.run_until_complete( sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node)) new_node.stop() looper.removeProdable(new_node) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node node_ha, client_ha = genHa(2) node_nym = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_nym, new_node.name, node_ha.host, node_ha.port, client_ha.host, client_ha.port, services=[VALIDATOR]) new_node.nodestack.ha = node_ha new_node.clientstack.ha = client_ha nodeTheta = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath, delay_instance_change_msgs=False) assert all(node.nodestack.remotes[new_node.name].ha == node_ha for node in txnPoolNodeSet) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
def test_node_alias_cannot_be_changed(looper, txnPoolNodeSet, sdk_pool_handle, sdk_node_theta_added): """ The node alias cannot be changed. """ new_steward_wallet, new_node = sdk_node_theta_added node_dest = hexToFriendly(new_node.nodestack.verhex) with pytest.raises(RequestRejectedException) as e: sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, 'foo', None, None, None, None) assert 'data has conflicts with request data' in e._excinfo[1].args[0] sdk_pool_refresh(looper, sdk_pool_handle)
def testNodeRejectingInvalidTxns(looper, sdk_pool_handle, sdk_wallet_client, tconf, tdir, txnPoolNodeSet, patched_node, request, sdk_wallet_steward, testNodeClass, allPluginsPath, do_post_node_creation): """ A newly joined node is catching up and sends catchup requests to other nodes but one of the nodes replies with incorrect transactions. The newly joined node detects that and rejects the transactions and thus blacklists the node. Ii thus cannot complete the process till the timeout and then requests the missing transactions. """ txnCount = getValueFromModule(request, "txnCount", 5) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, txnCount) new_steward_name = randomString() new_node_name = "Epsilon" new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=True, do_post_node_creation=do_post_node_creation) sdk_pool_refresh(looper, sdk_pool_handle) bad_node = patched_node do_not_tell_clients_about_newly_joined_node(txnPoolNodeSet) logger.debug('Catchup request processor of {} patched'.format(bad_node)) looper.run(checkNodesConnected(txnPoolNodeSet)) # catchup #1 -> CatchupTransactionsTimeout -> catchup #2 catchup_timeout = waits.expectedPoolCatchupTime(len(txnPoolNodeSet) + 1) timeout = 2 * catchup_timeout + tconf.CatchupTransactionsTimeout # have to skip seqno_db check because the txns are not executed # on the new node waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1], customTimeout=timeout) assert new_node.isNodeBlacklisted(bad_node.name)
def test_add_node_with_not_unique_alias(looper, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, allPluginsPath): new_node_name = "Alpha" new_steward_wallet, steward_did = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias="TEST_STEWARD1", role='STEWARD') with pytest.raises(RequestRejectedException) as e: sdk_add_new_node(looper, sdk_pool_handle, (new_steward_wallet, steward_did), new_node_name, tdir, tconf, allPluginsPath) assert 'existing data has conflicts with request data' in \ e._excinfo[1].args[0] sdk_pool_refresh(looper, sdk_pool_handle)
def testNodePortCannotBeChangedByAnotherSteward(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle, sdk_node_theta_added): new_steward_wallet, new_node = sdk_node_theta_added node_new_ha, client_new_ha = genHa(2) logger.debug("{} changing HAs to {} {}".format(new_node, node_new_ha, client_new_ha)) node_dest = hexToFriendly(new_node.nodestack.verhex) with pytest.raises(RequestRejectedException) as e: sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, new_node.name, node_new_ha.host, node_new_ha.port, client_new_ha.host, client_new_ha.port) assert 'is not a steward of node' in e._excinfo[1].args[0] sdk_pool_refresh(looper, sdk_pool_handle)
def sdk_node_created_after_some_txns_not_started(looper, testNodeClass, do_post_node_creation, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, txnPoolNodeSet, tdir, tconf, allPluginsPath, request): txnCount = getValueFromModule(request, "txnCount", 5) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, txnCount) new_steward_name = randomString() new_node_name = "Epsilon" new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=False, do_post_node_creation=do_post_node_creation) sdk_pool_refresh(looper, sdk_pool_handle) yield looper, new_node, sdk_pool_handle, new_steward_wallet_handle
def sdk_node_created_after_some_txns(looper, testNodeClass, do_post_node_creation, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, txnPoolNodeSet, tdir, tconf, allPluginsPath, request): txnCount = getValueFromModule(request, "txnCount", 5) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, txnCount) new_steward_name = randomString() new_node_name = "Epsilon" new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=True, do_post_node_creation=do_post_node_creation) sdk_pool_refresh(looper, sdk_pool_handle) yield looper, new_node, sdk_pool_handle, new_steward_wallet_handle
def test_steward_suspends_node_and_promote_with_new_ha( looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, sdk_node_theta_added, poolTxnStewardData, allPluginsPath): new_steward_wallet, new_node = sdk_node_theta_added looper.run(checkNodesConnected(txnPoolNodeSet + [new_node])) demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name)) # Check that a node does not connect to the suspended # node sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) with pytest.raises(RemoteNotFound): looper.loop.run_until_complete(sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node)) new_node.stop() looper.removeProdable(new_node) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node node_ha, client_ha = genHa(2) node_nym = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_nym, new_node.name, node_ha.host, node_ha.port, client_ha.host, client_ha.port, services=[VALIDATOR]) new_node.nodestack.ha = node_ha new_node.clientstack.ha = client_ha nodeTheta = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath, delay_instance_change_msgs=False) assert all(node.nodestack.remotes[new_node.name].ha == node_ha for node in txnPoolNodeSet) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
def update_bls_keys_no_proof(node_index, sdk_wallet_stewards, sdk_pool_handle, looper, txnPoolNodeSet): node = txnPoolNodeSet[node_index] sdk_wallet_steward = sdk_wallet_stewards[node_index] new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name) node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=new_blspk, services=None, key_proof=None) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) return new_blspk
def testNewNodeCatchupWhileIncomingRequests(looper, txnPoolNodeSet, testNodeClass, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, allPluginsPath): """ A new node joins while transactions are happening, its catchup requests include till where it has to catchup, which would be less than the other node's ledger size. In the meantime, the new node will stash all requests """ sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 5) def chkAfterCall(self, req, frm): r = self.processCatchupReq(req, frm) typ = getattr(req, f.LEDGER_ID.nm) if typ == DOMAIN_LEDGER_ID: ledger = self.getLedgerForMsg(req) assert req.catchupTill <= ledger.size return r for node in txnPoolNodeSet: node.nodeMsgRouter.routes[CatchupReq] = \ types.MethodType(chkAfterCall, node.ledgerManager) node.nodeIbStasher.delay(cqDelay(3)) print('Sending 5 requests') sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_steward, 5) looper.runFor(1) new_steward_name = randomString() new_node_name = "Epsilon" new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=True) sdk_pool_refresh(looper, sdk_pool_handle) txnPoolNodeSet.append(new_node) looper.runFor(2) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 5) # TODO select or create a timeout for this case in 'waits' looper.run(eventually(checkNodeDataForEquality, new_node, *txnPoolNodeSet[:-1], retryWait=1, timeout=150)) assert new_node.spylog.count(TestNode.processStashedOrderedReqs) > 0
def testAdd2NewNodes(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, tdir, tconf, allPluginsPath): """ Add 2 new nodes to trigger replica addition and primary election """ new_nodes = sdk_add_2_nodes(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, tdir, tconf, allPluginsPath) for n in new_nodes: logger.debug("{} connected to the pool".format(n)) f = getMaxFailures(len(txnPoolNodeSet)) def checkFValue(): for node in txnPoolNodeSet: assert node.f == f assert len(node.replicas) == (f + 1) timeout = waits.expectedClientToPoolConnectionTimeout(len(txnPoolNodeSet)) looper.run(eventually(checkFValue, retryWait=1, timeout=timeout)) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) sdk_pool_refresh(looper, sdk_pool_handle)
def sdk_node_created_after_some_txns(looper, testNodeClass, do_post_node_creation, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, txnPoolNodeSet, tdir, tconf, allPluginsPath, request, setup): def post_node_creation(node): write_rh = WriteConfHandler(node.db_manager) read_rh = ReadConfHandler(node.db_manager) node.write_manager.register_req_handler(write_rh) node.read_manager.register_req_handler(read_rh) ca = node.clientAuthNr.core_authenticator ca._write_types.add(write_rh.txn_type) ca._query_types.add(read_rh.txn_type) do_post_node_creation(node) return node txnCount = getValueFromModule(request, "txnCount", 5) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, txnCount) new_steward_name = randomString() new_node_name = "Epsilon" new_steward_wallet_handle, new_node = sdk_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, new_steward_name, new_node_name, tdir, tconf, nodeClass=testNodeClass, allPluginsPath=allPluginsPath, autoStart=True, do_post_node_creation=post_node_creation) sdk_pool_refresh(looper, sdk_pool_handle) yield looper, new_node, sdk_pool_handle, new_steward_wallet_handle
def test_number_txns_in_catchup_and_vc_queue_valid(looper, txnPoolNodeSet, tconf, sdk_pool_handle, sdk_wallet_steward): num_txns = 5 master_node = get_master_primary_node(txnPoolNodeSet) old_view = master_node.viewNo expected_view_no = old_view + 1 disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, master_node, stopNode=False) looper.run(eventually(checkViewNoForNodes, txnPoolNodeSet[1:], expected_view_no, retryWait=1, timeout=tconf.VIEW_CHANGE_TIMEOUT)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, num_txns) reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, master_node) waitNodeDataEquality(looper, master_node, *txnPoolNodeSet[-1:]) latest_info = master_node._info_tool.info assert latest_info['Node_info']['Catchup_status']['Number_txns_in_catchup'][1] == num_txns assert latest_info['Node_info']['View_change_status']['View_No'] == expected_view_no node_names = [n.name for n in txnPoolNodeSet[1:]] for node_name in node_names: assert latest_info['Node_info']['View_change_status']['VCDone_queue'][node_name][0] == master_node.master_primary_name assert latest_info['Node_info']['View_change_status']['VCDone_queue'][node_name][1] assert latest_info['Node_info']['View_change_status']['Last_complete_view_no'] == expected_view_no
def testCatchupDelayedNodes(txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, sdk_wallet_steward, txnPoolCliNodeReg, tdirWithPoolTxns, tconf, tdir, allPluginsPath): """ Node sends catchup request to other nodes for only those sequence numbers that other nodes have. Have pool of connected nodes with some transactions made and then two more nodes say X and Y will join where Y node will start its catchup process after some time. The node starting late, i.e. Y should not receive any catchup requests :return: """ looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_set_with_node_added_after_some_txns stewardXName = "testClientStewardX" nodeXName = "Zeta" stewardYName = "testClientStewardY" nodeYName = "Eta" stewardZName = "testClientStewardZ" nodeZName = "Theta" delayX = 45 delayY = 2 stewardX, nodeX = sdk_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, stewardXName, nodeXName, tdir, tconf, autoStart=False, allPluginsPath=allPluginsPath) stewardY, nodeY = sdk_add_new_steward_and_node( looper, sdk_pool_handle, sdk_wallet_steward, stewardYName, nodeYName, tdir, tconf, autoStart=False, allPluginsPath=allPluginsPath) nodeX.nodeIbStasher.delay(cpDelay(delayX)) nodeY.nodeIbStasher.delay(cpDelay(delayY)) looper.add(nodeX) looper.add(nodeY) txnPoolNodeSet.append(nodeX) txnPoolNodeSet.append(nodeY) timeout = waits.expectedPoolCatchupTime( len(txnPoolNodeSet)) + delayX + delayY looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=timeout)) logger.debug("Stopping 2 newest nodes, {} and {}".format( nodeX.name, nodeY.name)) nodeX.stop() nodeY.stop() logger.debug("Sending requests") sdk_pool_refresh(looper, sdk_pool_handle) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 50) logger.debug("Starting the 2 stopped nodes, {} and {}".format( nodeX.name, nodeY.name)) nodeX.start(looper.loop) nodeY.start(looper.loop) waitNodeDataEquality(looper, nodeX, *txnPoolNodeSet[:5]) waitNodeDataEquality(looper, nodeY, *txnPoolNodeSet[:5])
def test_sdk_pool_refresh(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): sdk_pool_refresh(looper, sdk_pool_handle) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
def test_new_node_catchup_update_projection(looper, nodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, some_transactions_done): """ A node which receives txns from catchup updates both ledger and projection 4 nodes start up and some txns happen, after txns are done, new node joins and starts catching up, the node should not process requests while catchup is in progress. Make sure the new requests are coming from the new NYMs added while the node was offline or catching up. """ # Create a new node and stop it. new_steward_wallet, new_node = sdk_node_theta_added( looper, nodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, node_config_helper_class=NodeConfigHelper, testNodeClass=TestNode) waitNodeDataEquality(looper, new_node, *nodeSet[:-1]) ta_count = 2 np_count = 2 new_txn_count = 2 * ta_count + np_count # Since ATTRIB txn is done for TA old_ledger_sizes = {} new_ledger_sizes = {} old_projection_sizes = {} new_projection_sizes = {} old_seq_no_map_sizes = {} new_seq_no_map_sizes = {} def get_ledger_size(node): return len(node.domainLedger) def get_projection_size(node): domain_state = node.getState(DOMAIN_LEDGER_ID) return len(domain_state.as_dict) def get_seq_no_map_size(node): return node.seqNoDB.size def fill_counters(ls, ps, ss, nodes): for n in nodes: ls[n.name] = get_ledger_size(n) ps[n.name] = get_projection_size(n) ss[n.name] = get_seq_no_map_size(n) def check_sizes(nodes): for node in nodes: assert new_ledger_sizes[node.name] - \ old_ledger_sizes[node.name] == new_txn_count assert new_projection_sizes[node.name] - \ old_projection_sizes[node.name] == new_txn_count assert new_seq_no_map_sizes[node.name] - \ old_seq_no_map_sizes[node.name] == new_txn_count # Stop a node and note down the sizes of ledger and projection (state) other_nodes = nodeSet[:-1] fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, other_nodes) new_node.cleanupOnStopping = False # new_node.stop() # looper.removeProdable(new_node) # ensure_node_disconnected(looper, new_node, other_nodes) disconnect_node_and_ensure_disconnected(looper, nodeSet, new_node.name) looper.removeProdable(name=new_node.name) trust_anchors = [] attributes = [] for i in range(ta_count): trust_anchors.append( sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, role='TRUST_ANCHOR', alias='TA' + str(i))) attributes.append((randomString(6), randomString(10))) sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1], *attributes[-1]) non_privileged = [] for i in range(np_count): non_privileged.append( sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, alias='NP' + str(i))) checkNodeDataForEquality(nodeSet[0], *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, other_nodes) # The size difference should be same as number of new NYM txns check_sizes(other_nodes) new_node = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath) nodeSet[-1] = new_node fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, [new_node]) looper.run(checkNodesConnected(nodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) waitNodeDataEquality(looper, new_node, *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, [new_node]) check_sizes([new_node]) # Set the old counters to be current ledger and projection size fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, nodeSet) more_nyms_count = 2 for wh in trust_anchors: for i in range(more_nyms_count): non_privileged.append( sdk_add_new_nym(looper, sdk_pool_handle, wh, alias='NP1' + str(i))) # The new node should process transactions done by Nyms added to its # ledger while catchup fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, nodeSet) new_txn_count = more_nyms_count * len(trust_anchors) check_sizes(nodeSet)
def testCatchupDelayedNodes(txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, sdk_wallet_steward, txnPoolCliNodeReg, tdirWithPoolTxns, tconf, tdir, allPluginsPath): """ Node sends catchup request to other nodes for only those sequence numbers that other nodes have. Have pool of connected nodes with some transactions made and then two more nodes say X and Y will join where Y node will start its catchup process after some time. The node starting late, i.e. Y should not receive any catchup requests :return: """ looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_set_with_node_added_after_some_txns stewardXName = "testClientStewardX" nodeXName = "Zeta" stewardYName = "testClientStewardY" nodeYName = "Eta" stewardZName = "testClientStewardZ" nodeZName = "Theta" delayX = 45 delayY = 2 stewardX, nodeX = sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, stewardXName, nodeXName, tdir, tconf, autoStart=False, allPluginsPath=allPluginsPath) stewardY, nodeY = sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, stewardYName, nodeYName, tdir, tconf, autoStart=False, allPluginsPath=allPluginsPath) nodeX.nodeIbStasher.delay(cpDelay(delayX)) nodeY.nodeIbStasher.delay(cpDelay(delayY)) looper.add(nodeX) looper.add(nodeY) txnPoolNodeSet.append(nodeX) txnPoolNodeSet.append(nodeY) timeout = waits.expectedPoolCatchupTime( len(txnPoolNodeSet)) + delayX + delayY looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=timeout)) logger.debug("Stopping 2 newest nodes, {} and {}".format(nodeX.name, nodeY.name)) nodeX.stop() nodeY.stop() logger.debug("Sending requests") sdk_pool_refresh(looper, sdk_pool_handle) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 50) logger.debug("Starting the 2 stopped nodes, {} and {}".format(nodeX.name, nodeY.name)) nodeX.start(looper.loop) nodeY.start(looper.loop) waitNodeDataEquality(looper, nodeX, *txnPoolNodeSet[:5]) waitNodeDataEquality(looper, nodeY, *txnPoolNodeSet[:5])
def test_new_node_catchup_update_projection(looper, nodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, some_transactions_done): """ A node which receives txns from catchup updates both ledger and projection 4 nodes start up and some txns happen, after txns are done, new node joins and starts catching up, the node should not process requests while catchup is in progress. Make sure the new requests are coming from the new NYMs added while the node was offline or catching up. """ # Create a new node and stop it. new_steward_wallet, new_node = sdk_node_theta_added(looper, nodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, node_config_helper_class=NodeConfigHelper, testNodeClass=TestNode) waitNodeDataEquality(looper, new_node, *nodeSet[:-1]) ta_count = 2 np_count = 2 new_txn_count = 2 * ta_count + np_count # Since ATTRIB txn is done for TA old_ledger_sizes = {} new_ledger_sizes = {} old_projection_sizes = {} new_projection_sizes = {} old_seq_no_map_sizes = {} new_seq_no_map_sizes = {} def get_ledger_size(node): return len(node.domainLedger) def get_projection_size(node): domain_state = node.getState(DOMAIN_LEDGER_ID) return len(domain_state.as_dict) def get_seq_no_map_size(node): return node.seqNoDB.size def fill_counters(ls, ps, ss, nodes): for n in nodes: ls[n.name] = get_ledger_size(n) ps[n.name] = get_projection_size(n) ss[n.name] = get_seq_no_map_size(n) def check_sizes(nodes): for node in nodes: assert new_ledger_sizes[node.name] - \ old_ledger_sizes[node.name] == new_txn_count assert new_projection_sizes[node.name] - \ old_projection_sizes[node.name] == new_txn_count assert new_seq_no_map_sizes[node.name] - \ old_seq_no_map_sizes[node.name] == new_txn_count # Stop a node and note down the sizes of ledger and projection (state) other_nodes = nodeSet[:-1] fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, other_nodes) new_node.cleanupOnStopping = False # new_node.stop() # looper.removeProdable(new_node) # ensure_node_disconnected(looper, new_node, other_nodes) disconnect_node_and_ensure_disconnected(looper, nodeSet, new_node.name) looper.removeProdable(name=new_node.name) trust_anchors = [] attributes = [] for i in range(ta_count): trust_anchors.append( sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, role='TRUST_ANCHOR', alias='TA' + str(i))) attributes.append((randomString(6), randomString(10))) sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1], *attributes[-1]) non_privileged = [] for i in range(np_count): non_privileged.append( sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, alias='NP' + str(i))) checkNodeDataForEquality(nodeSet[0], *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, other_nodes) # The size difference should be same as number of new NYM txns check_sizes(other_nodes) new_node = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath) nodeSet[-1] = new_node fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, [new_node]) looper.run(checkNodesConnected(nodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) waitNodeDataEquality(looper, new_node, *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, [new_node]) check_sizes([new_node]) # Set the old counters to be current ledger and projection size fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, nodeSet) more_nyms_count = 2 for wh in trust_anchors: for i in range(more_nyms_count): non_privileged.append(sdk_add_new_nym(looper, sdk_pool_handle, wh, alias='NP1' + str(i))) # The new node should process transactions done by Nyms added to its # ledger while catchup fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, nodeSet) new_txn_count = more_nyms_count * len(trust_anchors) check_sizes(nodeSet)