def steward(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet): return getClientAddedWithRole( nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'newSteward', STEWARD)
def some_transactions_done(looper, nodeSet, tdirWithClientPoolTxns, trustee, trusteeWallet): new_c, new_w = getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'some_name', addVerkey=False) new_idr = new_w.defaultId updateIndyIdrWithVerkey(looper, trusteeWallet, trustee, new_idr, new_w.getVerkey(new_idr))
def nodeThetaAdded(looper, nodeSet, tdirWithPoolTxns, tconf, steward, stewardWallet, allPluginsPath, testNodeClass, testClientClass, tdir): newStewardName = "testClientSteward" + randomString(3) newNodeName = "Theta" newSteward, newStewardWallet = getClientAddedWithRole( nodeSet, tdir, looper, steward, stewardWallet, newStewardName, STEWARD) sigseed = randomString(32).encode() nodeSigner = DidSigner(seed=sigseed) (nodeIp, nodePort), (clientIp, clientPort) = genHa(2) data = { NODE_IP: nodeIp, NODE_PORT: nodePort, CLIENT_IP: clientIp, CLIENT_PORT: clientPort, ALIAS: newNodeName, SERVICES: [ VALIDATOR, ] } node = Node(nodeSigner.identifier, data, newStewardWallet.defaultId) newStewardWallet.addNode(node) reqs = newStewardWallet.preparePending() req = newSteward.submitReqs(*reqs)[0][0] waitForSufficientRepliesForRequests(looper, newSteward, requests=[req]) def chk(): assert newStewardWallet.getNode(node.id).seqNo is not None timeout = waits.expectedTransactionExecutionTime(len(nodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout)) initLocalKeys(newNodeName, tdirWithPoolTxns, sigseed, override=True) newNode = testNodeClass(newNodeName, basedirpath=tdir, base_data_dir=tdir, config=tconf, ha=(nodeIp, nodePort), cliha=(clientIp, clientPort), pluginPaths=allPluginsPath) nodeSet.append(newNode) looper.add(newNode) looper.run(checkNodesConnected(nodeSet)) ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward, *nodeSet) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward, *nodeSet) return newSteward, newStewardWallet, newNode
def test_requests_post_multiple_new_nodes( looper, tdirWithClientPoolTxns, tdirWithDomainTxnsUpdated, nodeSet, tconf, tdir, trustee, trusteeWallet, allPluginsPath, some_transactions_done): new_nodes = [] for node_name in ('Zeta', 'Eta'): new_steward, new_steward_wallet, new_node = nodeThetaAdded(looper, nodeSet, tdirWithClientPoolTxns, tconf, trustee, trusteeWallet, allPluginsPath, TestNode, TestClient, NodeConfigHelper, tdir, node_name=node_name) new_nodes.append((new_steward, new_steward_wallet, new_node)) for _ in range(5): getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, randomString(6)) for (_, _, new_node) in new_nodes: waitNodeDataEquality(looper, new_node, *nodeSet[:-2]) for _ in range(5): getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, randomString(6))
def anotherTrustAnchor1(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet): return getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'newTrustAnchor1', role=TRUST_ANCHOR)
def anotherSteward1(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet): return getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'newSteward1', role=STEWARD)
def anotherTGB(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet): return getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'newTGB', role=TGB)
def nodeThetaAdded(looper, nodeSet, tdirWithClientPoolTxns, tconf, steward, stewardWallet, allPluginsPath, testNodeClass, testClientClass, node_config_helper_class, tdir, node_name='Theta'): newStewardName = "testClientSteward" + randomString(3) newNodeName = node_name newSteward, newStewardWallet = getClientAddedWithRole( nodeSet, tdirWithClientPoolTxns, looper, steward, stewardWallet, newStewardName, role=STEWARD) sigseed = randomString(32).encode() nodeSigner = SimpleSigner(seed=sigseed) (nodeIp, nodePort), (clientIp, clientPort) = genHa(2) config_helper = node_config_helper_class(newNodeName, tconf, chroot=tdir) _, _, bls_key = initNodeKeysForBothStacks(newNodeName, config_helper.keys_dir, sigseed, override=True) data = { NODE_IP: nodeIp, NODE_PORT: nodePort, CLIENT_IP: clientIp, CLIENT_PORT: clientPort, ALIAS: newNodeName, SERVICES: [ VALIDATOR, ], BLS_KEY: bls_key } node = Node(nodeSigner.identifier, data, newStewardWallet.defaultId) newStewardWallet.addNode(node) reqs = newStewardWallet.preparePending() req = newSteward.submitReqs(*reqs)[0][0] waitForSufficientRepliesForRequests(looper, newSteward, requests=[req]) def chk(): assert newStewardWallet.getNode(node.id).seqNo is not None timeout = plenumWaits.expectedTransactionExecutionTime(len(nodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout)) newNode = testNodeClass(newNodeName, config_helper=config_helper, config=tconf, ha=(nodeIp, nodePort), cliha=(clientIp, clientPort), pluginPaths=allPluginsPath) nodeSet.append(newNode) looper.add(newNode) looper.run(checkNodesConnected(nodeSet)) ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward, *nodeSet) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward, *nodeSet) return newSteward, newStewardWallet, newNode
def test_only_trustee_send_pool_config_writes_true_force_false( nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, poolConfigWTFF): stClient, stWallet = getClientAddedWithRole( nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'tmpname', STEWARD) _, req = sendPoolConfig(stClient, stWallet, poolConfigWTFF) looper.run(eventually(checkRejects, stClient, req.reqId, 'cannot do'))
def test_new_node_catchup_update_projection(looper, tdirWithPoolTxns, tdirWithDomainTxnsUpdated, nodeSet, tconf, trustee, trusteeWallet, allPluginsPath, some_transactions_done): """ A node which receives txns from catchup updates both ledger and projection 4 nodes start up and some txns happen, after txns are done, new node joins and starts catching up, the node should not process requests while catchup is in progress. Make sure the new requests are coming from the new NYMs added while the node was offline or catching up. """ # Create a new node and stop it. new_steward, new_steward_wallet, new_node = nodeThetaAdded( looper, nodeSet, tdirWithPoolTxns, tconf, trustee, trusteeWallet, allPluginsPath, TestNode, TestClient, tdirWithPoolTxns) waitNodeDataEquality(looper, new_node, *nodeSet[:-1]) ta_count = 2 np_count = 2 new_txn_count = 2 * ta_count + np_count # Since ATTRIB txn is done for TA old_ledger_sizes = {} new_ledger_sizes = {} old_projection_sizes = {} new_projection_sizes = {} old_seq_no_map_sizes = {} new_seq_no_map_sizes = {} def get_ledger_size(node): return len(node.domainLedger) def get_projection_size(node): domain_state = node.getState(DOMAIN_LEDGER_ID) return len(domain_state.as_dict) def get_seq_no_map_size(node): return node.seqNoDB.size def fill_counters(ls, ps, ss, nodes): for n in nodes: ls[n.name] = get_ledger_size(n) ps[n.name] = get_projection_size(n) ss[n.name] = get_seq_no_map_size(n) def check_sizes(nodes): for node in nodes: assert new_ledger_sizes[node.name] - \ old_ledger_sizes[node.name] == new_txn_count assert new_projection_sizes[node.name] - \ old_projection_sizes[node.name] == new_txn_count assert new_seq_no_map_sizes[node.name] - \ old_seq_no_map_sizes[node.name] == new_txn_count # Stop a node and note down the sizes of ledger and projection (state) other_nodes = nodeSet[:-1] fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, other_nodes) new_node.cleanupOnStopping = False new_node.stop() looper.removeProdable(new_node) ensure_node_disconnected(looper, new_node.name, other_nodes) trust_anchors = [] attributes = [] for i in range(ta_count): trust_anchors.append( getClientAddedWithRole(other_nodes, tdirWithPoolTxns, looper, trustee, trusteeWallet, 'TA' + str(i), role=TRUST_ANCHOR, client_connects_to=len(other_nodes))) attributes.append((randomString(6), randomString(10))) addRawAttribute(looper, *trust_anchors[-1], *attributes[-1], dest=trust_anchors[-1][1].defaultId) non_privileged = [] for i in range(np_count): non_privileged.append( getClientAddedWithRole(other_nodes, tdirWithPoolTxns, looper, trustee, trusteeWallet, 'NP' + str(i), client_connects_to=len(other_nodes))) checkNodeDataForEquality(nodeSet[0], *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, other_nodes) # The size difference should be same as number of new NYM txns check_sizes(other_nodes) new_node = TestNode(new_node.name, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath, ha=new_node.nodestack.ha, cliha=new_node.clientstack.ha) looper.add(new_node) nodeSet[-1] = new_node fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, [new_node]) looper.run(checkNodesConnected(nodeSet)) waitNodeDataEquality(looper, new_node, *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, [new_node]) check_sizes([new_node]) for i, (tc, tw) in enumerate(trust_anchors): reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i]) all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm], reply[f.REQ_ID.nm]) assertLength(all_replies, len(nodeSet)) assert new_node.clientstack.name in all_replies # Set the old counters to be current ledger and projection size fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, nodeSet) more_nyms_count = 2 for tc, tw in trust_anchors: for i in range(more_nyms_count): non_privileged.append( getClientAddedWithRole(other_nodes, tdirWithPoolTxns, looper, tc, tw, 'NP1' + str(i))) # The new node should process transactions done by Nyms added to its # ledger while catchup fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, nodeSet) new_txn_count = more_nyms_count * len(trust_anchors) check_sizes(nodeSet)
def steward(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet): return getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'newSteward', STEWARD)
def test_state_regenerated_from_ledger(looper, tdirWithClientPoolTxns, tdirWithDomainTxnsUpdated, nodeSet, tconf, tdir, trustee, trusteeWallet, allPluginsPath): """ Node loses its state database but recreates it from ledger after start. Checking ATTRIB txns too since they store some data off ledger too """ trust_anchors = [] for i in range(5): trust_anchors.append( getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'TA' + str(i), role=TRUST_ANCHOR)) addRawAttribute(looper, *trust_anchors[-1], randomString(6), randomString(10), dest=trust_anchors[-1][1].defaultId) for tc, tw in trust_anchors: for i in range(3): getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, tc, tw, 'NP1' + str(i)) ensure_all_nodes_have_same_data(looper, nodeSet) node_to_stop = nodeSet[-1] node_state = node_to_stop.states[DOMAIN_LEDGER_ID] assert not node_state.isEmpty state_db_path = node_state._kv.db_path node_to_stop.cleanupOnStopping = False node_to_stop.stop() looper.removeProdable(node_to_stop) ensure_node_disconnected(looper, node_to_stop.name, nodeSet[:-1]) shutil.rmtree(state_db_path) config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir) restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf, pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha, cliha=node_to_stop.clientstack.ha) looper.add(restarted_node) nodeSet[-1] = restarted_node looper.run(checkNodesConnected(nodeSet)) # Need some time as `last_ordered_3PC` is compared too and that is # communicated through catchup waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1]) # Pool is still functional for tc, tw in trust_anchors: getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, tc, tw, 'NP--{}'.format(tc.name)) ensure_all_nodes_have_same_data(looper, nodeSet)
def test_state_regenerated_from_ledger(looper, tdirWithClientPoolTxns, tdirWithDomainTxnsUpdated, nodeSet, tconf, tdir, trustee, trusteeWallet, allPluginsPath): """ Node loses its state database but recreates it from ledger after start. Checking ATTRIB txns too since they store some data off ledger too """ trust_anchors = [] for i in range(5): trust_anchors.append(getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, trustee, trusteeWallet, 'TA' + str(i), role=TRUST_ANCHOR)) addRawAttribute(looper, *trust_anchors[-1], randomString(6), randomString(10), dest=trust_anchors[-1][1].defaultId) for tc, tw in trust_anchors: for i in range(3): getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, tc, tw, 'NP1' + str(i)) ensure_all_nodes_have_same_data(looper, nodeSet) node_to_stop = nodeSet[-1] node_state = node_to_stop.states[DOMAIN_LEDGER_ID] assert not node_state.isEmpty state_db_path = node_state._kv.db_path node_to_stop.cleanupOnStopping = False node_to_stop.stop() looper.removeProdable(node_to_stop) ensure_node_disconnected(looper, node_to_stop.name, nodeSet[:-1]) shutil.rmtree(state_db_path) config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir) restarted_node = TestNode( node_to_stop.name, config_helper=config_helper, config=tconf, pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha, cliha=node_to_stop.clientstack.ha) looper.add(restarted_node) nodeSet[-1] = restarted_node looper.run(checkNodesConnected(nodeSet)) # Need some time as `last_ordered_3PC` is compared too and that is # communicated through catchup waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1]) # Pool is still functional for tc, tw in trust_anchors: getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, tc, tw, 'NP--{}'.format(tc.name)) ensure_all_nodes_have_same_data(looper, nodeSet)