def initLocalKeys(name, keys_dir, sigseed, *, use_bls, override=False): # * forces usage of names for args on the right hand side pubkey, verkey = nodeStackClass.initLocalKeys(name, keys_dir, sigseed, override=override) print("Public key is", hexToFriendly(pubkey)) print("Verification key is", hexToFriendly(verkey)) blspk, key_proof = init_bls_keys(keys_dir, name, sigseed) if use_bls \ else (None, None) return pubkey, verkey, blspk, key_proof
def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None): new_blspk = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk \ if not add_wrong \ else base58.b58encode(randomString(128).encode()) node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias=randomString(5)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) return new_blspk
def testValidatorSuspensionByTrustee(trustee, trusteeWallet, looper, nodeSet): node = nodeSet[-1] nodeNym = hexToFriendly(node.nodestack.verhex) suspendNode(looper, trustee, trusteeWallet, nodeNym, node.name) for n in nodeSet[:-1]: looper.run(eventually(checkNodeNotInNodeReg, n, node.name)) looper.run(eventually(checkNodeNotInNodeReg, trustee, node.name))
def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None, new_key_proof=None): if add_wrong: _, new_blspk, key_proof = create_default_bls_crypto_factory().generate_bls_keys() else: new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk bls_key_proof = new_key_proof or key_proof node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None, key_proof=bls_key_proof) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) return new_blspk
def __init__(self, identifier=None, seed=None, alias=None): """ Initialize the signer with an identifier and a seed. :param identifier: some identifier that directly or indirectly references this client :param seed: the seed used to generate a signing key. """ # should be stored securely/privately self.seed = seed if seed else randombytes(32) # generates key pair based on seed self.sk = SigningKey(seed=self.seed) # helper for signing self.naclSigner = NaclSigner(self.sk) # this is the public key used to verify signatures (securely shared # before-hand with recipient) hex_verkey = hexlify(self.naclSigner.verraw) self.verkey = hexToFriendly(hex_verkey) self._identifier = identifier or self.verkey self._alias = alias
def promote_node(looper, steward_wallet, sdk_pool_handle, node): node_nym = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, steward_wallet, sdk_pool_handle, node_nym, node.name, None, None, None, None, services=[VALIDATOR])
def testChangeHaPersistsPostNodesRestart(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward): new_steward_wallet, new_node = \ sdk_add_new_steward_and_node(looper, sdk_pool_handle, sdk_wallet_steward, 'AnotherSteward' + randomString(4), 'AnotherNode' + randomString(4), tdir, tconf) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) node_new_ha, client_new_ha = genHa(2) logger.debug("{} changing HAs to {} {}".format(new_node, node_new_ha, client_new_ha)) # Making the change HA txn an confirming its succeeded node_dest = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, node_new_ha.host, node_new_ha.port, client_new_ha.host, client_new_ha.port) # Stopping existing nodes for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) # Starting nodes again by creating `Node` objects since that simulates # what happens when starting the node with script restartedNodes = [] for node in txnPoolNodeSet[:-1]: config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir) restartedNode = TestNode(node.name, config_helper=config_helper, config=tconf, ha=node.nodestack.ha, cliha=node.clientstack.ha) looper.add(restartedNode) restartedNodes.append(restartedNode) # Starting the node whose HA was changed config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir) node = TestNode(new_node.name, config_helper=config_helper, config=tconf, ha=node_new_ha, cliha=client_new_ha) looper.add(node) restartedNodes.append(node) looper.run(checkNodesConnected(restartedNodes)) waitNodeDataEquality(looper, node, *restartedNodes[:-1]) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, restartedNodes, sdk_wallet_client, sdk_pool_handle)
def changeNodeHa(looper, txnPoolNodeSet, tconf, shouldBePrimary, tdir, sdk_pool_handle, sdk_wallet_stewards, sdk_wallet_client): # prepare new ha for node and client stack subjectedNode = None node_index = None for nodeIndex, n in enumerate(txnPoolNodeSet): if shouldBePrimary == n.has_master_primary: subjectedNode = n node_index = nodeIndex break nodeStackNewHA, clientStackNewHA = genHa(2) logger.debug("change HA for node: {} to {}".format( subjectedNode.name, (nodeStackNewHA, clientStackNewHA))) # change HA sdk_wallet_steward = sdk_wallet_stewards[node_index] node_dest = hexToFriendly(subjectedNode.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, subjectedNode.name, nodeStackNewHA[0], nodeStackNewHA[1], clientStackNewHA[0], clientStackNewHA[1], services=[VALIDATOR]) # stop node for which HA will be changed subjectedNode.stop() looper.removeProdable(subjectedNode) # start node with new HA config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir) restartedNode = TestNode(subjectedNode.name, config_helper=config_helper, config=tconf, ha=nodeStackNewHA, cliha=clientStackNewHA) looper.add(restartedNode) txnPoolNodeSet[nodeIndex] = restartedNode looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70)) electionTimeout = waits.expectedPoolElectionTimeout( nodeCount=len(txnPoolNodeSet), numOfReelections=3) ensureElectionsDone(looper, txnPoolNodeSet, retryWait=1, customTimeout=electionTimeout) sdk_pool_refresh(looper, sdk_pool_handle) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 8)
def test_primary_selection_after_primary_demotion_and_pool_restart(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, txnPoolMasterNodes, tdir, tconf): """ Demote primary and restart the pool. Pool should select new primary and have viewNo=0 after restart. """ logger.info("1. turn off the node which has primary replica for master instanse") master_node = txnPoolMasterNodes[0] node_dest = hexToFriendly(master_node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, master_node.name, None, None, None, None, services=[]) restNodes = [node for node in txnPoolNodeSet if node.name != master_node.name] ensureElectionsDone(looper, restNodes) # ensure pool is working properly logger.info("2. restart pool") # Stopping existing nodes for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) # Starting nodes again by creating `Node` objects since that simulates # what happens when starting the node with script restartedNodes = [] for node in txnPoolNodeSet: config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir) restartedNode = TestNode(node.name, config_helper=config_helper, config=tconf, ha=node.nodestack.ha, cliha=node.clientstack.ha) looper.add(restartedNode) restartedNodes.append(restartedNode) restNodes = [node for node in restartedNodes if node.name != master_node.name] looper.run(checkNodesConnected(restNodes)) ensureElectionsDone(looper, restNodes) checkViewNoForNodes(restNodes, 0) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) primariesIdxs = getPrimaryNodesIdxs(restNodes) assert restNodes[primariesIdxs[0]].name != master_node.name
def test_primary_selection_after_demoted_primary_node_promotion( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, txnPoolMasterNodes): """ Demote primary of master instance, wait for view change and promote it back. Check primaries for instances. """ assert len(txnPoolNodeSet) == 4 # Check primaries after test setup. primariesIdxs = getPrimaryNodesIdxs(txnPoolNodeSet) assert len(primariesIdxs) == 2 assert primariesIdxs[0] == 0 assert primariesIdxs[1] == 1 master_node = txnPoolMasterNodes[0] # Demote primary of master instance. node_dest = hexToFriendly(master_node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, master_node.name, None, None, None, None, services=[]) restNodes = [node for node in txnPoolNodeSet if node.name != master_node.name] ensureElectionsDone(looper, restNodes) # Check that there is only one instance now, check it's primary. primariesIdxs = getPrimaryNodesIdxs(restNodes) assert len(primariesIdxs) == 1 assert primariesIdxs[0] == 1 # Ensure pool is working properly. sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) # Promote demoted node back. sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, master_node.name, None, None, None, None, services=[VALIDATOR]) # Ensure pool is working properly. sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) # Check that there are two instances again, check their primaries. primariesIdxs = getPrimaryNodesIdxs(txnPoolNodeSet) assert len(primariesIdxs) == 2 assert primariesIdxs[0] == 2 assert primariesIdxs[1] == 3
def _node_txn(self): node_nym = hexToFriendly(self.node.verkey) return self.node_txn(steward_nym=self.nym, node_name=self.node.name, nym=node_nym, ip=self.node.ha[0], node_port=self.node.ha[1], client_ip=self.node.cliha[0], client_port=self.node.cliha[1], blskey=self.node.blskey, bls_key_proof=self.node.blsley_proof)
def testChangeNodeHaBack(looper, txnPoolNodeSet, sdk_pool_handle, sdk_node_theta_added, tconf, tdir): """ The case: The Node HA is updated with some HA (let's name it 'correct' HA). Then the Steward makes a mistake and sends the NODE txn with other HA ('wrong' HA). The Steward replaces back 'wrong' HA by 'correct' HA sending yet another one NODE txn. """ new_steward_wallet, new_node = sdk_node_theta_added client_ha = new_node.cliNodeReg['ThetaC'] # use the same client HA # do all exercises without the Node new_node.stop() looper.removeProdable(name=new_node.name) # step 1: set 'correct' HA correct_node_ha = genHa(1) node_dest = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, correct_node_ha.host, correct_node_ha.port, client_ha.host, client_ha.port) # step 2: set 'wrong' HA wrong_node_ha = genHa(1) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, wrong_node_ha.host, wrong_node_ha.port, client_ha.host, client_ha.port) # step 3: set 'correct' HA back sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, correct_node_ha.host, correct_node_ha.port, client_ha.host, client_ha.port) # In order to save the time the pool connection is not maintaining # during the steps, only the final result is checked. config_helper = PNodeConfigHelper(new_node.name, tconf, chroot=tdir) restartedNode = TestNode(new_node.name, config_helper=config_helper, config=tconf, ha=correct_node_ha, cliha=client_ha) looper.add(restartedNode) txnPoolNodeSet[-1] = restartedNode looper.run(checkNodesConnected(txnPoolNodeSet)) # check Theta HA for n in txnPoolNodeSet: assert n.nodeReg['Theta'] == correct_node_ha
def test_try_change_node_alias(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards): node = txnPoolNodeSet[1] node_dest = hexToFriendly(node.nodestack.verhex) with pytest.raises(RequestRejectedException) as e: sdk_send_update_node(looper, sdk_wallet_stewards[1], sdk_pool_handle, node_dest, node.name + '-foo', None, None, None, None, services=[]) assert e.match("Node's alias cannot be changed")
def test_node_alias_cannot_be_changed(looper, txnPoolNodeSet, sdk_pool_handle, sdk_node_theta_added): """ The node alias cannot be changed. """ new_steward_wallet, new_node = sdk_node_theta_added node_dest = hexToFriendly(new_node.nodestack.verhex) with pytest.raises(RequestRejectedException) as e: sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, 'foo', None, None, None, None) assert 'data has conflicts with request data' in e._excinfo[1].args[0] sdk_pool_refresh(looper, sdk_pool_handle)
def testSendNymFailsForCryptonymIdentifierAndMatchedAbbrevVerkey( be, do, poolNodesStarted, trusteeCli): cryptonym = createCryptonym() hexCryptonym = friendlyToHex(cryptonym) abbrevVerkey = '~' + hexToFriendly(hexCryptonym[16:]) parameters = { 'dest': cryptonym, 'verkey': abbrevVerkey, 'role': Roles.TRUST_ANCHOR.name } be(trusteeCli) do('send NYM dest={dest} role={role} verkey={verkey}', mapper=parameters, expect=ERROR, within=2)
def testNodePortCannotBeChangedByAnotherSteward(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle, sdk_node_theta_added): new_steward_wallet, new_node = sdk_node_theta_added node_new_ha, client_new_ha = genHa(2) logger.debug("{} changing HAs to {} {}".format(new_node, node_new_ha, client_new_ha)) node_dest = hexToFriendly(new_node.nodestack.verhex) with pytest.raises(RequestRejectedException) as e: sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, new_node.name, node_new_ha.host, node_new_ha.port, client_new_ha.host, client_new_ha.port) assert 'is not a steward of node' in e._excinfo[1].args[0] sdk_pool_refresh(looper, sdk_pool_handle)
def testSendNymFailsForCryptonymIdentifierAndMatchedAbbrevVerkey( looper, sdk_pool_handle, txnPoolNodeSet, nym_request, sdk_wallet_trustee): cryptonym = createCryptonym() hexCryptonym = friendlyToHex(cryptonym) abbrevVerkey = '~' + hexToFriendly(hexCryptonym[16:]) parameters = { 'dest': cryptonym, 'verkey': abbrevVerkey, 'role': TRUST_ANCHOR } nym_request[OPERATION].update(parameters) request_couple = sdk_sign_and_send_prepared_request( looper, sdk_wallet_trustee, sdk_pool_handle, json.dumps(nym_request)) sdk_get_bad_response(looper, [request_couple], RequestNackedException, 'Neither a full verkey nor an abbreviated one')
def test_state_proofs_for_get_nym(request_handler): nym = 'Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv' role = "2" verkey = "~7TYfekw4GUagBnBVCqPjiC" seq_no = 1 # Check for existing nym data, multi_sig = prep_multi_sig(request_handler, nym, role, verkey, seq_no) assert get_nym_verify_proof(request_handler, nym, data, multi_sig) # Shuffle the bytes of nym h = list(friendlyToHex(nym)) random.shuffle(h) garbled_nym = hexToFriendly(bytes(h)) data[f.IDENTIFIER.nm] = garbled_nym # `garbled_nym` does not exist, proof should verify but data is null assert get_nym_verify_proof(request_handler, garbled_nym, None, multi_sig)
def test_steward_suspends_node_and_promote_with_new_ha( looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_steward, sdk_node_theta_added, poolTxnStewardData, allPluginsPath): new_steward_wallet, new_node = sdk_node_theta_added looper.run(checkNodesConnected(txnPoolNodeSet + [new_node])) demote_node(looper, new_steward_wallet, sdk_pool_handle, new_node) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, new_node.name)) # Check that a node does not connect to the suspended # node sdk_ensure_pool_functional(looper, txnPoolNodeSet, new_steward_wallet, sdk_pool_handle) with pytest.raises(RemoteNotFound): looper.loop.run_until_complete(sendMessageAndCheckDelivery(txnPoolNodeSet[0], new_node)) new_node.stop() looper.removeProdable(new_node) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node node_ha, client_ha = genHa(2) node_nym = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_nym, new_node.name, node_ha.host, node_ha.port, client_ha.host, client_ha.port, services=[VALIDATOR]) new_node.nodestack.ha = node_ha new_node.clientstack.ha = client_ha nodeTheta = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath, delay_instance_change_msgs=False) assert all(node.nodestack.remotes[new_node.name].ha == node_ha for node in txnPoolNodeSet) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle)
def update_bls_keys_no_proof(node_index, sdk_wallet_stewards, sdk_pool_handle, looper, txnPoolNodeSet): node = txnPoolNodeSet[node_index] sdk_wallet_steward = sdk_wallet_stewards[node_index] new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name) node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=new_blspk, services=None, key_proof=None) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) return new_blspk
def test_catch_up_after_demoted( txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, sdk_wallet_client): logger.info( "1. add a new node after sending some txns and check that catch-up " "is done (the new node is up to date)") looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_set_with_node_added_after_some_txns waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4]) logger.info("2. turn the new node off (demote)") node_dest = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet_handle, sdk_pool_handle, node_dest, new_node.name, None, None, None, None, services=[]) logger.info("3. send more requests, " "so that the new node's state is outdated") sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5) checkNodeDataForInequality(new_node, *txnPoolNodeSet[:-1]) logger.info("4. turn the new node on") sdk_send_update_node(looper, new_steward_wallet_handle, sdk_pool_handle, node_dest, new_node.name, None, None, None, None, services=[VALIDATOR]) logger.info("5. make sure catch-up is done " "(the new node is up to date again)") waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) logger.info("6. send more requests and make sure " "that the new node participates in processing them") sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet_handle, 10) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None, new_key_proof=None, check_functional=True, pool_refresh=True): if add_wrong: _, new_blspk, key_proof = create_default_bls_crypto_factory( ).generate_bls_keys() else: new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk bls_key_proof = new_key_proof or key_proof node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None, key_proof=bls_key_proof, pool_refresh=pool_refresh) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) if pool_refresh: sdk_pool_refresh(looper, sdk_pool_handle) if check_functional: sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) return new_blspk
def update_node_data_and_reconnect(looper, txnPoolNodeSet, steward_wallet, sdk_pool_handle, node, new_node_ip, new_node_port, new_client_ip, new_client_port, tdir, tconf): node_ha = node.nodestack.ha cli_ha = node.clientstack.ha node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, steward_wallet, sdk_pool_handle, node_dest, node.name, new_node_ip, new_node_port, new_client_ip, new_client_port) # restart the Node with new HA node.stop() looper.removeProdable(name=node.name) config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir) restartedNode = TestNode(node.name, config_helper=config_helper, config=tconf, ha=HA(new_node_ip or node_ha.host, new_node_port or node_ha.port), cliha=HA(new_client_ip or cli_ha.host, new_client_port or cli_ha.port)) looper.add(restartedNode) # replace node in txnPoolNodeSet try: idx = next(i for i, n in enumerate(txnPoolNodeSet) if n.name == node.name) except StopIteration: raise Exception('{} is not the pool'.format(node)) txnPoolNodeSet[idx] = restartedNode looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_ensure_pool_functional(looper, txnPoolNodeSet, steward_wallet, sdk_pool_handle) return restartedNode
def test_fail_node_bls_key_validation(looper, sdk_pool_handle, sdk_node_theta_added): """ Test request for change node bls key with incorrect bls key proof of possession. """ new_steward_wallet, new_node = sdk_node_theta_added node_dest = hexToFriendly(new_node.nodestack.verhex) bls_key, key_proof = init_bls_keys(new_node.keys_dir, new_node.name) # change key_proof key_proof = key_proof.upper() with pytest.raises(RequestNackedException) as e: sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, None, None, None, None, bls_key=bls_key, key_proof=key_proof) assert "Proof of possession {} " \ "is incorrect for BLS key {}".format(key_proof, bls_key) \ in e._excinfo[1].args[0]
def sdk_change_node_keys(looper, node, sdk_wallet_steward, sdk_pool_handle, verkey): _, steward_did = sdk_wallet_steward node_dest = hexToFriendly(node.nodestack.verhex) node_request = looper.loop.run_until_complete( prepare_node_request(steward_did, new_node_name=node.name, destination=node_dest)) request_json = json.loads(node_request) request_json['operation'][VERKEY] = verkey node_request1 = json.dumps(request_json) request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_steward, sdk_pool_handle, node_request1) sdk_get_and_check_replies(looper, [request_couple]) node.nodestack.clearLocalRoleKeep() node.nodestack.clearRemoteRoleKeeps() node.nodestack.clearAllDir() node.clientstack.clearLocalRoleKeep() node.clientstack.clearRemoteRoleKeeps() node.clientstack.clearAllDir()
def changeNodeKeys(looper, stewardClient, stewardWallet, node, verkey): nodeNym = hexToFriendly(node.nodestack.verhex) op = { TXN_TYPE: NODE, TARGET_NYM: nodeNym, VERKEY: verkey, DATA: { ALIAS: node.name } } req = stewardWallet.signOp(op) stewardClient.submitReqs(req) waitForSufficientRepliesForRequests(looper, stewardClient, requests=[req]) node.nodestack.clearLocalRoleKeep() node.nodestack.clearRemoteRoleKeeps() node.nodestack.clearAllDir() node.clientstack.clearLocalRoleKeep() node.clientstack.clearRemoteRoleKeeps() node.clientstack.clearAllDir()
def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None, new_key_proof=None): if add_wrong: _, new_blspk, key_proof = create_default_bls_crypto_factory( ).generate_bls_keys() else: new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk bls_key_proof = new_key_proof or key_proof node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None, key_proof=bls_key_proof) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias=randomString(5)) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) return new_blspk
def changeNodeKeys(looper, stewardClient, stewardWallet, node, verkey): nodeNym = hexToFriendly(node.nodestack.local.signer.verhex) op = { TXN_TYPE: CHANGE_KEYS, TARGET_NYM: nodeNym, DATA: { VERKEY: verkey, ALIAS: node.name } } req = stewardWallet.signOp(op) stewardClient.submitReqs(req) looper.run(eventually(checkSufficientRepliesRecvd, stewardClient.inBox, req.reqId, 1, retryWait=1, timeout=5)) node.nodestack.clearLocalRoleKeep() node.nodestack.clearRemoteRoleKeeps() node.nodestack.clearAllDir() node.clientstack.clearLocalRoleKeep() node.clientstack.clearRemoteRoleKeeps() node.clientstack.clearAllDir()
def add_started_node(looper, new_node, node_ha, client_ha, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, bls_key, key_proof): ''' Adds already created node to the pool, that is sends NODE txn. Makes sure that node is actually added and connected to all otehr nodes. ''' new_steward_wallet_handle = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, "Steward" + new_node.name, role=STEWARD_STRING) node_name = new_node.name node_dest = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet_handle, sdk_pool_handle, node_dest, node_name, node_ha[0], node_ha[1], client_ha[0], client_ha[1], services=[VALIDATOR], bls_key=bls_key, key_proof=key_proof) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(txnPoolNodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
def sdk_change_bls_key(looper, txnPoolNodeSet, node, sdk_pool_handle, sdk_wallet_steward, add_wrong=False, new_bls=None): new_blspk = init_bls_keys(node.keys_dir, node.name) key_in_txn = new_bls or new_blspk \ if not add_wrong \ else base58.b58encode(randomString(128).encode()).decode("utf-8") node_dest = hexToFriendly(node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, node.name, None, None, None, None, bls_key=key_in_txn, services=None) poolSetExceptOne = list(txnPoolNodeSet) poolSetExceptOne.remove(node) waitNodeDataEquality(looper, node, *poolSetExceptOne) sdk_pool_refresh(looper, sdk_pool_handle) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward, sdk_pool_handle) return new_blspk
def changeNodeHa(looper, stewardClient, stewardWallet, node, nodeHa, clientHa): nodeNym = hexToFriendly(node.nodestack.local.signer.verhex) (nodeIp, nodePort), (clientIp, clientPort) = nodeHa, clientHa op = { TXN_TYPE: CHANGE_HA, TARGET_NYM: nodeNym, DATA: { NODE_IP: nodeIp, NODE_PORT: nodePort, CLIENT_IP: clientIp, CLIENT_PORT: clientPort, ALIAS: node.name } } req = stewardWallet.signOp(op) stewardClient.submitReqs(req) looper.run(eventually(checkSufficientRepliesRecvd, stewardClient.inBox, req.reqId, 1, retryWait=1, timeout=5)) node.nodestack.clearLocalKeep() node.nodestack.clearRemoteKeeps() node.clientstack.clearLocalKeep() node.clientstack.clearRemoteKeeps()
print("Node-stack name is", args.name) print("Client-stack name is", args.name + CLIENT_STACK_SUFFIX) config = getConfig() config_helper = NodeConfigHelper(args.name, config) os.makedirs(config_helper.keys_dir, exist_ok=True) try: _, verkey, blskey, key_proof = initNodeKeysForBothStacks( args.name, config_helper.keys_dir, args.seed, override=args.force) except Exception as ex: print(ex) exit() nym = hexToFriendly(verkey.encode()) cf = configparser.ConfigParser() cf.read('node.config') if not cf.has_section(args.name): cf.add_section(args.name) cf.set(args.name, 'dest', nym) cf.set(args.name, 'bls_key', blskey) cf.set(args.name, 'bls_pop', key_proof) with open('node.config', 'w') as fw: cf.write(fw)
def get_nym_from_verkey(verkey: bytes): return hexToFriendly(verkey)
def testStewardSuspendsNode(looper, txnPoolNodeSet, tdirWithPoolTxns, tconf, steward1, stewardWallet, nodeThetaAdded, poolTxnStewardData, allPluginsPath): newSteward, newStewardWallet, newNode = nodeThetaAdded newNodeNym = hexToFriendly(newNode.nodestack.local.signer.verhex) suspendNode(looper, newSteward, newStewardWallet, newNodeNym, newNode.name) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, newNode.name)) for client in (steward1, newSteward): looper.run(eventually(checkNodeNotInNodeReg, client, newNode.name)) # Check a client can send request and receive replies req = sendRandomRequest(newStewardWallet, newSteward) checkSufficientRepliesForRequests(looper, newSteward, [ req, ], timeoutPerReq=10) # Check that a restarted client or node does not connect to the suspended # node steward1.stop() looper.removeProdable(steward1) steward1, stewardWallet = buildPoolClientAndWallet(poolTxnStewardData, tdirWithPoolTxns) looper.add(steward1) ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1, *txnPoolNodeSet) looper.run(eventually(checkNodeNotInNodeReg, steward1, newNode.name)) newNode.stop() looper.removeProdable(newNode) # TODO: There is a bug that if a primary node is turned off, it sends # duplicate Pre-Prepare and gets blacklisted. Here is the gist # https://gist.github.com/lovesh/c16989616ebb6856f9fa2905c14dc4b7 oldNodeIdx, oldNode = [(i, n) for i, n in enumerate(txnPoolNodeSet) if not n.hasPrimary][0] oldNode.stop() looper.removeProdable(oldNode) oldNode = TestNode(oldNode.name, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath) looper.add(oldNode) txnPoolNodeSet[oldNodeIdx] = oldNode looper.run(checkNodesConnected(txnPoolNodeSet)) looper.run(eventually(checkNodeNotInNodeReg, oldNode, newNode.name)) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node cancelNodeSuspension(looper, newSteward, newStewardWallet, newNodeNym, newNode.name) nodeTheta = TestNode(newNode.name, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath, ha=newNode.nodestack.ha, cliha=newNode.clientstack.ha) looper.add(nodeTheta) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=30)) ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1, *txnPoolNodeSet) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward, *txnPoolNodeSet)
def test_primary_selection_after_demoted_node_promotion( looper, txnPoolNodeSet, sdk_node_theta_added, sdk_pool_handle, tconf, tdir, allPluginsPath): """ Demote non-primary node Promote it again Restart one node to get the following difference with others: - not restarted - node registry and related pool parameters are kept in memory in some state which is expected as the same as in the pool ledger - restarted one - loaded node registry and pool parameters from the pool ledger at startup Do several view changes and check that all nodes will choose previously demoted / promoted node as a primary for some instanse """ new_steward_wallet, new_node = sdk_node_theta_added # viewNo0 = checkViewNoForNodes(txnPoolNodeSet) check_all_nodes_the_same_pool_list(txnPoolNodeSet) logger.info("1. Demote node Theta") node_dest = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, None, None, None, None, []) remainingNodes = list(set(txnPoolNodeSet) - {new_node}) check_all_nodes_the_same_pool_list(remainingNodes) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3) # TODO view change might happen unexpectedly by unknown reason # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0) logger.info("2. Promote node Theta back") sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, None, None, None, None, [VALIDATOR]) check_all_nodes_the_same_pool_list(txnPoolNodeSet) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0) logger.info("3. Restart one node") stopped_node = txnPoolNodeSet[0] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, stopped_node, stopNode=True) looper.removeProdable(stopped_node) remainingNodes = list(set(txnPoolNodeSet) - {stopped_node}) ensureElectionsDone(looper, remainingNodes) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0) # start node restartedNode = start_stopped_node(stopped_node, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet = remainingNodes + [restartedNode] ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0) logger.info( "4. Do view changes to check that nodeTheta will be chosen " "as a primary for some instance by all nodes after some rounds") while txnPoolNodeSet[0].viewNo < 4: ensure_view_change_complete(looper, txnPoolNodeSet) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3)
def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, txnPoolMasterNodes): """ Demote primary and do multiple view changes forcing primaries rotation. Demoted primary should be skipped without additional view changes. """ viewNo0 = checkViewNoForNodes(txnPoolNodeSet) logger.info("1. turn off the node which has primary replica for master instanse, " " this should trigger view change") master_node = txnPoolMasterNodes[0] node_dest = hexToFriendly(master_node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, master_node.name, None, None, None, None, services=[]) restNodes = [node for node in txnPoolNodeSet \ if node.name != master_node.name] ensureElectionsDone(looper, restNodes) viewNo1 = checkViewNoForNodes(restNodes) assert viewNo1 == viewNo0 + 1 assert master_node.viewNo == viewNo0 assert len(restNodes[0].replicas) == 1 # only one instance left assert restNodes[0].replicas[0].primaryName != master_node.name # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) logger.info("2. force view change 2 and check final viewNo") ensure_view_change_complete(looper, restNodes) viewNo2 = checkViewNoForNodes(restNodes) assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo2 == viewNo1 + 1 sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) logger.info("3. force view change 3 and check final viewNo") ensure_view_change_complete(looper, restNodes) viewNo3 = checkViewNoForNodes(restNodes) assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo3 == viewNo2 + 1 sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) logger.info("4. force view change 4 and check final viewNo") ensure_view_change_complete(looper, restNodes) viewNo4 = checkViewNoForNodes(restNodes) assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo4 == viewNo3 + 1 sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3)
def test_primary_selection_after_primary_demotion_and_pool_restart( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, txnPoolMasterNodes, tdir, tconf): """ Demote primary and restart the pool. Pool should select new primary and have viewNo=0 after restart. """ logger.info( "1. turn off the node which has primary replica for master instanse") master_node = txnPoolMasterNodes[0] node_dest = hexToFriendly(master_node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, master_node.name, None, None, None, None, services=[]) restNodes = [ node for node in txnPoolNodeSet if node.name != master_node.name ] ensureElectionsDone(looper, restNodes) # ensure pool is working properly logger.info("2. restart pool") # Stopping existing nodes for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) # Starting nodes again by creating `Node` objects since that simulates # what happens when starting the node with script restartedNodes = [] for node in txnPoolNodeSet: config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir) restartedNode = TestNode(node.name, config_helper=config_helper, config=tconf, ha=node.nodestack.ha, cliha=node.clientstack.ha) looper.add(restartedNode) restartedNodes.append(restartedNode) restNodes = [ node for node in restartedNodes if node.name != master_node.name ] looper.run(checkNodesConnected(restNodes)) ensureElectionsDone(looper, restNodes) checkViewNoForNodes(restNodes, 0) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) primariesIdxs = getPrimaryNodesIdxs(restNodes) assert restNodes[primariesIdxs[0]].name != master_node.name
def test_primary_selection_after_demoted_primary_node_promotion( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, txnPoolMasterNodes): """ Demote primary of master instance, wait for view change and promote it back. Check primaries for instances. """ assert len(txnPoolNodeSet) == 4 # Check primaries after test setup. primariesIdxs = getPrimaryNodesIdxs(txnPoolNodeSet) assert len(primariesIdxs) == 2 assert primariesIdxs[0] == 0 assert primariesIdxs[1] == 1 master_node = txnPoolMasterNodes[0] # Demote primary of master instance. node_dest = hexToFriendly(master_node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, master_node.name, None, None, None, None, services=[]) restNodes = [ node for node in txnPoolNodeSet if node.name != master_node.name ] ensureElectionsDone(looper, restNodes) # Check that there is only one instance now, check it's primary. primariesIdxs = getPrimaryNodesIdxs(restNodes) assert len(primariesIdxs) == 1 assert primariesIdxs[0] == 1 # Ensure pool is working properly. sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) # Promote demoted node back. sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, master_node.name, None, None, None, None, services=[VALIDATOR]) # Ensure pool is working properly. sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) # Check that there are two instances again, check their primaries. primariesIdxs = getPrimaryNodesIdxs(txnPoolNodeSet) assert len(primariesIdxs) == 2 assert primariesIdxs[0] == 2 assert primariesIdxs[1] == 3
def test_primary_selection_after_primary_demotion_and_view_changes( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, txnPoolMasterNodes): """ Demote primary and do multiple view changes forcing primaries rotation. Demoted primary should be skipped without additional view changes. """ viewNo0 = checkViewNoForNodes(txnPoolNodeSet) logger.info( "1. turn off the node which has primary replica for master instanse, " " this should trigger view change") master_node = txnPoolMasterNodes[0] node_dest = hexToFriendly(master_node.nodestack.verhex) sdk_send_update_node(looper, sdk_wallet_steward, sdk_pool_handle, node_dest, master_node.name, None, None, None, None, services=[]) restNodes = [node for node in txnPoolNodeSet \ if node.name != master_node.name] ensureElectionsDone(looper, restNodes) viewNo1 = checkViewNoForNodes(restNodes) assert viewNo1 == viewNo0 + 1 assert master_node.viewNo == viewNo0 assert len(restNodes[0].replicas) == 1 # only one instance left assert restNodes[0].replicas[0].primaryName != master_node.name # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) logger.info("2. force view change 2 and check final viewNo") ensure_view_change_complete(looper, restNodes) viewNo2 = checkViewNoForNodes(restNodes) assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo2 == viewNo1 + 1 sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) logger.info("3. force view change 3 and check final viewNo") ensure_view_change_complete(looper, restNodes) viewNo3 = checkViewNoForNodes(restNodes) assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo3 == viewNo2 + 1 sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3) logger.info("4. force view change 4 and check final viewNo") ensure_view_change_complete(looper, restNodes) viewNo4 = checkViewNoForNodes(restNodes) assert restNodes[0].replicas[0].primaryName != master_node.name assert viewNo4 == viewNo3 + 1 sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 3)
def getNymFromVerkey(verkey: bytes): return hexToFriendly(verkey)
def getEncodedLocalVerKey(name, baseDir=None): verKey = getLocalVerKey(name, baseDir) return hexToFriendly(verKey)
def test_primary_selection_after_demoted_node_promotion( looper, txnPoolNodeSet, sdk_node_theta_added, sdk_pool_handle, tconf, tdir, allPluginsPath): """ Demote non-primary node Promote it again Restart one node to get the following difference with others: - not restarted - node registry and related pool parameters are kept in memory in some state which is expected as the same as in the pool ledger - restarted one - loaded node registry and pool parameters from the pool ledger at startup Do several view changes and check that all nodes will choose previously demoted / promoted node as a primary for some instanse """ new_steward_wallet, new_node = sdk_node_theta_added # viewNo0 = checkViewNoForNodes(txnPoolNodeSet) check_all_nodes_the_same_pool_list(txnPoolNodeSet) logger.info("1. Demote node Theta") node_dest = hexToFriendly(new_node.nodestack.verhex) sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, None, None, None, None, []) remainingNodes = list(set(txnPoolNodeSet) - {new_node}) check_all_nodes_the_same_pool_list(remainingNodes) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3) # TODO view change might happen unexpectedly by unknown reason # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0) logger.info("2. Promote node Theta back") sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle, node_dest, new_node.name, None, None, None, None, [VALIDATOR]) check_all_nodes_the_same_pool_list(txnPoolNodeSet) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0) logger.info("3. Restart one node") stopped_node = txnPoolNodeSet[0] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, stopped_node, stopNode=True) looper.removeProdable(stopped_node) remainingNodes = list(set(txnPoolNodeSet) - {stopped_node}) ensureElectionsDone(looper, remainingNodes) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0) # start node restartedNode = start_stopped_node(stopped_node, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet = remainingNodes + [restartedNode] ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3) # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0) logger.info("4. Do view changes to check that nodeTheta will be chosen " "as a primary for some instance by all nodes after some rounds") while txnPoolNodeSet[0].viewNo < 4: ensure_view_change_complete(looper, txnPoolNodeSet) # ensure pool is working properly sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, new_steward_wallet, 3)
seq_no = 1 for node_num in range(nodeCountToInit): _, verkey, blskey, key_proof = initNodeKeysForBothStacks(node_defs[node_num].name, keys_dir, node_defs[node_num].sigseed, override=True) verkey = verkey.encode() assert verkey == node_defs[node_num].verkey nodeParamsFileName = 'indy.env' paramsFilePath = os.path.join(config.GENERAL_CONFIG_DIR, nodeParamsFileName) print('Nodes will not run locally, so writing {}'.format(paramsFilePath)) TestNetworkSetup.writeNodeParamsFile(paramsFilePath, node_def.name, node_def.ip, node_def.port, node_def.ip, node_def.client_port) print("This node with name {} will use ports {} and {} for nodestack and clientstack respectively" .format(node_defs[node_num].name, node_defs[node_num].port, node_defs[node_num].client_port)) node_nym = hexToFriendly(verkey) node_txn = Steward.node_txn(node_defs[node_num].steward_nym, node_defs[node_num].name, node_nym, node_defs[node_num].ip, node_defs[node_num].port, node_defs[node_num].client_port, blskey=blskey, bls_key_proof=key_proof, seq_no=seq_no, protocol_version=genesis_protocol_version) seq_no += 1 poolLedger.add(node_txn) poolLedger.stop() domainLedger.stop()