def test_idr_cache_update_after_catchup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath): wallet_handle, identifier = sdk_wallet_steward node_to_disconnect = txnPoolNodeSet[-1] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect.name, stopNode=True) looper.removeProdable(node_to_disconnect) idr, verkey = createHalfKeyIdentifierAndAbbrevVerkey() request = looper.loop.run_until_complete(build_nym_request(identifier, idr, verkey, None, None)) req_signed = looper.loop.run_until_complete(sign_request(wallet_handle, identifier, request)) result = json.loads(looper.loop.run_until_complete(submit_request(sdk_pool_handle, req_signed))) restarted_node = start_stopped_node(node_to_disconnect, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet[-1] = restarted_node waitNodeDataEquality(looper, restarted_node, *txnPoolNodeSet[:-1]) req_handler = restarted_node.getDomainReqHandler() root_hash = req_handler.ts_store.get_equal_or_prev(get_txn_time(result['result'])) key = domain.make_state_path_for_nym(idr) from_state = req_handler.state.get_for_root_hash(root_hash=root_hash, key=key) assert from_state deserialized = req_handler.stateSerializer.deserialize(from_state) assert deserialized items_after = req_handler.idrCache.get(idr) assert items_after
def test_idr_cache_update_after_catchup(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath): wallet_handle, identifier = sdk_wallet_steward node_to_disconnect = txnPoolNodeSet[-1] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_disconnect.name, stopNode=True) looper.removeProdable(node_to_disconnect) idr, verkey = createHalfKeyIdentifierAndAbbrevVerkey() request = looper.loop.run_until_complete( build_nym_request(identifier, idr, verkey, None, None)) req_signed = looper.loop.run_until_complete( sign_request(wallet_handle, identifier, request)) result = json.loads( looper.loop.run_until_complete( submit_request(sdk_pool_handle, req_signed))) restarted_node = start_stopped_node(node_to_disconnect, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet[-1] = restarted_node waitNodeDataEquality(looper, restarted_node, *txnPoolNodeSet[:-1]) req_handler = restarted_node.get_req_handler(DOMAIN_LEDGER_ID) root_hash = req_handler.ts_store.get_equal_or_prev( get_txn_time(result['result'])) key = domain.make_state_path_for_nym(idr) from_state = req_handler.state.get_for_root_hash(root_hash=root_hash, key=key) assert from_state deserialized = req_handler.stateSerializer.deserialize(from_state) assert deserialized items_after = req_handler.idrCache.get(idr) assert items_after
def scenario_txns_during_catchup( looper, tconf, tdir, allPluginsPath, do_post_node_creation, nodes, send_txns ): lagging_node = nodes[-1] rest_nodes = nodes[:-1] # Stop NodeX lagging_node.cleanupOnStopping = False disconnect_node_and_ensure_disconnected(looper, nodes, lagging_node.name, stopNode=True) looper.removeProdable(name=lagging_node.name) # Send transactions send_txns() ensure_all_nodes_have_same_data(looper, rest_nodes) # Start NodeX lagging_node = start_stopped_node( lagging_node, looper, tconf, tdir, allPluginsPath, start=False, ) do_post_node_creation(lagging_node) HelperNode.fill_auth_map_for_node(lagging_node, XFER_PUBLIC) HelperNode.fill_auth_map_for_node(lagging_node, NYM) nodes[-1] = lagging_node # Delay CathupRep for DOMAIN ledger for NodeX with delay_rules( lagging_node.nodeIbStasher, cr_delay(ledger_filter=DOMAIN_LEDGER_ID) ): # allow started node to receive looper events looper.add(lagging_node) # ensure it connected to others looper.run(checkNodesConnected(nodes)) # Send transactions send_txns() ensure_all_nodes_have_same_data(looper, rest_nodes) # Reset delays # Make sure that all nodes have equal state ensure_all_nodes_have_same_data(looper, nodes) # Send transactions send_txns() ensure_all_nodes_have_same_data(looper, rest_nodes)
def test_first_catchup_with_not_empty_ledger(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, sdk_wallet_trustee, fees_set, mint_tokens, addresses, fees, tconf, tdir, allPluginsPath, do_post_node_creation): node_set = nodeSetWithIntegratedTokenPlugin current_amount = get_amount_from_token_txn(mint_tokens) seq_no = 1 reverted_node = node_set[-1] idx = node_set.index(reverted_node) current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses, current_amount) reverted_node.cleanupOnStopping = False disconnect_node_and_ensure_disconnected(looper, node_set, reverted_node.name) looper.removeProdable(name=reverted_node.name) from_a_to_b = [addresses[0], addresses[1]] from_b_to_c = [addresses[1], addresses[2]] from_c_to_d = [addresses[2], addresses[3]] from_d_to_a = [addresses[3], addresses[0]] # current_amount, seq_no, _ = add_nym_with_fees(helpers, fees_set, seq_no, looper, addresses, current_amount) current_amount, seq_no, _ = send_and_check_transfer(helpers, from_a_to_b, fees, looper, current_amount, seq_no, transfer_summ=current_amount) current_amount, seq_no, _ = send_and_check_transfer(helpers, from_b_to_c, fees, looper, current_amount, seq_no, transfer_summ=current_amount) current_amount, seq_no, _ = send_and_check_transfer(helpers, from_c_to_d, fees, looper, current_amount, seq_no, transfer_summ=current_amount) # add node_to_disconnect to pool node_to_disconnect = start_stopped_node(reverted_node, looper, tconf, tdir, allPluginsPath, start=False) do_post_node_creation(node_to_disconnect) looper.add(node_to_disconnect) node_set[idx] = node_to_disconnect looper.run(checkNodesConnected(node_set)) helpers.node.fill_auth_map_for_node(node_to_disconnect, XFER_PUBLIC) current_amount, seq_no, _ = send_and_check_transfer(helpers, from_d_to_a, fees, looper, current_amount, seq_no, transfer_summ=current_amount) ensure_all_nodes_have_same_data(looper, node_set)
def restart_node(restarted_node, pool, looper, tconf, tdir, allPluginsPath, do_post_node_creation, fees): node_idx = pool.index(restarted_node) restarted_node.cleanupOnStopping = False disconnect_node_and_ensure_disconnected(looper, pool, restarted_node.name, stopNode=True) looper.removeProdable(name=restarted_node.name) restarted_node = start_stopped_node( restarted_node, looper, tconf, tdir, allPluginsPath, start=False, ) do_post_node_creation(restarted_node) for fee_alias, amount in fees.items(): HelperNode.fill_auth_map_for_node(restarted_node, alias_to_txn_type[fee_alias]) pool[node_idx] = restarted_node looper.add(restarted_node)
def test_recover_taa_from_ledger(txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee, looper, monkeypatch, setup_aml, tconf, tdir, allPluginsPath): orig_handlers = {} # Step 1. Stop one node node_to_stop = txnPoolNodeSet[-1] rest_pool = txnPoolNodeSet[:-1] disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node_to_stop.name, stopNode=True) looper.removeProdable(name=node_to_stop.name) # Step 2. Patch all the rest nodes for using old version TAA handler # it's ugly but it works globals()['CURRENT_TXN_PAYLOAD_VERSIONS'][TXN_AUTHOR_AGREEMENT] = '1' for node in rest_pool: handler = node.write_manager.request_handlers.get( TXN_AUTHOR_AGREEMENT)[0] orig_handlers[node.name] = handler handler_for_v_1 = node.write_manager._request_handlers_with_version.get( (TXN_AUTHOR_AGREEMENT, "1"))[0] node.write_manager.request_handlers[TXN_AUTHOR_AGREEMENT] = [ handler_for_v_1 ] # Step 3. Send TAA txn in old way text = randomString(1024) version_0 = randomString(16) res = sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version_0, text)[1] taa_0_ratification_ts = get_txn_time(res['result']) version_1 = randomString(16) res = sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version_1, text)[1] taa_1_ratification_ts = get_txn_time(res['result']) # Step 4. return original TAA handlers back # it's ugly but it works globals()['CURRENT_TXN_PAYLOAD_VERSIONS'][TXN_AUTHOR_AGREEMENT] = '2' for node in rest_pool: node.write_manager.request_handlers[TXN_AUTHOR_AGREEMENT] = [ orig_handlers[node.name] ] # Step 5. Send another TAA txn in new way without optional parameters text_2 = randomString(1024) version_2 = randomString(16) ratified_2 = get_utc_epoch() - 300 res_0 = sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version_2, text_2, ratified=ratified_2)[1] # Step 6. Send another TAA txn in new way without optional parameter text = randomString(1024) version_3 = randomString(16) ratified_3 = get_utc_epoch() - 300 sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version_3, text, ratified=ratified_3) # Step 7. Send taa updating for the second taa transaction (for checking txn with optional parameter) retired_time = int(time.time()) + 20 retired_time_in_past = int(time.time()) - 20 sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version_2, retired=retired_time) # Step 8. Ensure, that all TAAs was written res_1 = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_1)[1] assert TXN_AUTHOR_AGREEMENT_DIGEST not in res_1['result']['data'] assert TXN_AUTHOR_AGREEMENT_RATIFICATION_TS not in res_1['result']['data'] assert res_1['result']['data'][TXN_AUTHOR_AGREEMENT_VERSION] == version_1 res_2 = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_2)[1] check_result_contains_expected_taa_data(res_2, version_2, ratified_2, retired_time) res_3 = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_3)[1] check_result_contains_expected_taa_data(res_3, version_3, ratified_3) # Step 9. Return previous disconnected node back node_to_stop = start_stopped_node(node_to_stop, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet = rest_pool + [node_to_stop] # Step 10. Ensure that all nodes have the same data ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) # Step 11. Send another taa txns for checking pool writability text = randomString(1024) version_4 = randomString(16) ratified_4 = get_utc_epoch() - 300 sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version_4, text, ratified=ratified_4) # Step 12. Ensure that all nodes have the same data ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) # Step 13. Retire TAA written using old handler, make sure ratification date is not spoiled sdk_send_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version_1, retired=retired_time_in_past) res_1 = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_1)[1] check_result_contains_expected_taa_data(res_1, version_1, taa_1_ratification_ts, retired_time_in_past) # Step 14. Disable TAAs written using old handler, make sure ratification date is not spoiled disable_res = sdk_send_txn_author_agreement_disable( looper, sdk_pool_handle, sdk_wallet_trustee)[1] res_0 = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_0)[1] check_result_contains_expected_taa_data( res_0, version_0, taa_0_ratification_ts, get_txn_time(disable_res['result'])) res_1 = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_trustee, version=version_1)[1] check_result_contains_expected_taa_data(res_1, version_1, taa_1_ratification_ts, retired_time_in_past)
def test_new_node_catchup_update_projection(looper, nodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, some_transactions_done): """ A node which receives txns from catchup updates both ledger and projection 4 nodes start up and some txns happen, after txns are done, new node joins and starts catching up, the node should not process requests while catchup is in progress. Make sure the new requests are coming from the new NYMs added while the node was offline or catching up. """ # Create a new node and stop it. new_steward_wallet, new_node = sdk_node_theta_added(looper, nodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, node_config_helper_class=NodeConfigHelper, testNodeClass=TestNode) waitNodeDataEquality(looper, new_node, *nodeSet[:-1]) ta_count = 2 np_count = 2 new_txn_count = 2 * ta_count + np_count # Since ATTRIB txn is done for TA old_ledger_sizes = {} new_ledger_sizes = {} old_projection_sizes = {} new_projection_sizes = {} old_seq_no_map_sizes = {} new_seq_no_map_sizes = {} def get_ledger_size(node): return len(node.domainLedger) def get_projection_size(node): domain_state = node.getState(DOMAIN_LEDGER_ID) return len(domain_state.as_dict) def get_seq_no_map_size(node): return node.seqNoDB.size def fill_counters(ls, ps, ss, nodes): for n in nodes: ls[n.name] = get_ledger_size(n) ps[n.name] = get_projection_size(n) ss[n.name] = get_seq_no_map_size(n) def check_sizes(nodes): for node in nodes: assert new_ledger_sizes[node.name] - \ old_ledger_sizes[node.name] == new_txn_count assert new_projection_sizes[node.name] - \ old_projection_sizes[node.name] == new_txn_count assert new_seq_no_map_sizes[node.name] - \ old_seq_no_map_sizes[node.name] == new_txn_count # Stop a node and note down the sizes of ledger and projection (state) other_nodes = nodeSet[:-1] fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, other_nodes) new_node.cleanupOnStopping = False # new_node.stop() # looper.removeProdable(new_node) # ensure_node_disconnected(looper, new_node, other_nodes) disconnect_node_and_ensure_disconnected(looper, nodeSet, new_node.name) looper.removeProdable(name=new_node.name) trust_anchors = [] attributes = [] for i in range(ta_count): trust_anchors.append( sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, role='TRUST_ANCHOR', alias='TA' + str(i))) attributes.append((randomString(6), randomString(10))) sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1], *attributes[-1]) non_privileged = [] for i in range(np_count): non_privileged.append( sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, alias='NP' + str(i))) checkNodeDataForEquality(nodeSet[0], *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, other_nodes) # The size difference should be same as number of new NYM txns check_sizes(other_nodes) new_node = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath) nodeSet[-1] = new_node fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, [new_node]) looper.run(checkNodesConnected(nodeSet)) waitNodeDataEquality(looper, new_node, *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, [new_node]) check_sizes([new_node]) # Set the old counters to be current ledger and projection size fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, nodeSet) more_nyms_count = 2 for wh in trust_anchors: for i in range(more_nyms_count): non_privileged.append(sdk_add_new_nym(looper, sdk_pool_handle, wh, alias='NP1' + str(i))) # The new node should process transactions done by Nyms added to its # ledger while catchup fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, nodeSet) new_txn_count = more_nyms_count * len(trust_anchors) check_sizes(nodeSet)
def test_new_node_catchup_update_projection(looper, nodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, some_transactions_done): """ A node which receives txns from catchup updates both ledger and projection 4 nodes start up and some txns happen, after txns are done, new node joins and starts catching up, the node should not process requests while catchup is in progress. Make sure the new requests are coming from the new NYMs added while the node was offline or catching up. """ # Create a new node and stop it. new_steward_wallet, new_node = sdk_node_theta_added(looper, nodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_trustee, allPluginsPath, node_config_helper_class=NodeConfigHelper, testNodeClass=TestNode) waitNodeDataEquality(looper, new_node, *nodeSet[:-1]) ta_count = 2 np_count = 2 new_txn_count = 2 * ta_count + np_count # Since ATTRIB txn is done for TA old_ledger_sizes = {} new_ledger_sizes = {} old_projection_sizes = {} new_projection_sizes = {} old_seq_no_map_sizes = {} new_seq_no_map_sizes = {} def get_ledger_size(node): return len(node.domainLedger) def get_projection_size(node): domain_state = node.getState(DOMAIN_LEDGER_ID) return len(domain_state.as_dict) def get_seq_no_map_size(node): return node.seqNoDB.size def fill_counters(ls, ps, ss, nodes): for n in nodes: ls[n.name] = get_ledger_size(n) ps[n.name] = get_projection_size(n) ss[n.name] = get_seq_no_map_size(n) def check_sizes(nodes): for node in nodes: assert new_ledger_sizes[node.name] - \ old_ledger_sizes[node.name] == new_txn_count assert new_projection_sizes[node.name] - \ old_projection_sizes[node.name] == new_txn_count assert new_seq_no_map_sizes[node.name] - \ old_seq_no_map_sizes[node.name] == new_txn_count # Stop a node and note down the sizes of ledger and projection (state) other_nodes = nodeSet[:-1] fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, other_nodes) new_node.cleanupOnStopping = False # new_node.stop() # looper.removeProdable(new_node) # ensure_node_disconnected(looper, new_node, other_nodes) disconnect_node_and_ensure_disconnected(looper, nodeSet, new_node.name) looper.removeProdable(name=new_node.name) trust_anchors = [] attributes = [] for i in range(ta_count): trust_anchors.append( sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, role='TRUST_ANCHOR', alias='TA' + str(i))) attributes.append((randomString(6), randomString(10))) sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1], *attributes[-1]) non_privileged = [] for i in range(np_count): non_privileged.append( sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, alias='NP' + str(i))) checkNodeDataForEquality(nodeSet[0], *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, other_nodes) # The size difference should be same as number of new NYM txns check_sizes(other_nodes) new_node = start_stopped_node(new_node, looper, tconf, tdir, allPluginsPath) nodeSet[-1] = new_node fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, [new_node]) looper.run(checkNodesConnected(nodeSet)) sdk_pool_refresh(looper, sdk_pool_handle) waitNodeDataEquality(looper, new_node, *other_nodes) fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, [new_node]) check_sizes([new_node]) # Set the old counters to be current ledger and projection size fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes, nodeSet) more_nyms_count = 2 for wh in trust_anchors: for i in range(more_nyms_count): non_privileged.append(sdk_add_new_nym(looper, sdk_pool_handle, wh, alias='NP1' + str(i))) # The new node should process transactions done by Nyms added to its # ledger while catchup fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes, nodeSet) new_txn_count = more_nyms_count * len(trust_anchors) check_sizes(nodeSet)