def two_requests(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens, sdk_wallet_steward): amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 seed = randomString(32) alias = randomString(5) wh, _ = sdk_wallet_steward nym_request, new_did = looper.loop.run_until_complete( prepare_nym_request(sdk_wallet_steward, seed, alias, None)) nym_request = \ sdk_sign_request_objects(looper, sdk_wallet_steward, [sdk_json_to_request_object(json.loads(nym_request))])[0] req_obj = sdk_json_to_request_object(json.loads(nym_request)) helpers.request.nym = lambda: copy.deepcopy(req_obj) req1, req2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) assert req1.payload_digest == req2.payload_digest assert req1.digest != req2.digest return req1, req2
def test_last_committed_after_catchup(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): node_set = nodeSetWithIntegratedTokenPlugin reverted_node = node_set[-1] amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request_1, request_2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) reverted_last_committed = get_last_committed_from_tracker(reverted_node) not_reverted_last_committed = get_last_committed_from_tracker(node_set[-1]) assert reverted_last_committed == not_reverted_last_committed with delay_rules(reverted_node.nodeIbStasher, cDelay()): """ Send NYM with FEES and wait for reply. """ r = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request_1) sdk_get_and_check_replies(looper, [r]) """ Start catchup. Uncommitted batch for reverted_node should be rejected and it will get NYM with FEES during catchup procedure. """ reverted_node.start_catchup() looper.run(eventually(lambda: assertExp(reverted_node.mode == Mode.participating))) assert get_last_committed_from_tracker(reverted_node) ==\ get_last_committed_from_tracker(node_set[0])
def test_apply_several_batches_with_several_txns( looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): current_amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 node_set = nodeSetWithIntegratedTokenPlugin all_reqs = nyms_with_fees(NUM_BATCHES * TXN_IN_BATCH, helpers, fees_set, address_main, current_amount, init_seq_no=init_seq_no) domain_txns_before = get_committed_txns_count_for_pool( node_set, DOMAIN_LEDGER_ID) token_txns_before = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) sdk_requests = [] for req in all_reqs: r = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, req) sdk_requests.append(r) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) sdk_get_and_check_replies(looper, sdk_requests) domain_txns_after = get_committed_txns_count_for_pool( node_set, DOMAIN_LEDGER_ID) token_txns_after = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) assert domain_txns_after - domain_txns_before == NUM_BATCHES * TXN_IN_BATCH assert token_txns_after - token_txns_before == NUM_BATCHES * TXN_IN_BATCH
def test_malicious_primary_sent_pp(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): def raise_invalid_ex(): raise InvalidClientMessageException(1, 2, 3) nodes = nodeSetWithIntegratedTokenPlugin amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request1, request2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) malicious_primary = getPrimaryReplica(nodes).node not_malicious_nodes = set(nodes) - {malicious_primary} for n in not_malicious_nodes: n.doDynamicValidation = lambda *args, **kwargs: raise_invalid_ex() r1 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request1) with pytest.raises(RequestRejectedException, match="client request invalid"): sdk_get_and_check_replies(looper, [r1])
def test_apply_several_batches(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): node_set = [n.nodeIbStasher for n in nodeSetWithIntegratedTokenPlugin] amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request1, request2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) expected_txns_length = 2 txns_count_before = get_committed_txns_count_for_pool( nodeSetWithIntegratedTokenPlugin, TOKEN_LEDGER_ID) with delay_rules(node_set, cDelay()): r1 = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(request1.as_dict)]) r2 = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(request2.as_dict)]) for n in nodeSetWithIntegratedTokenPlugin: looper.run( eventually(check_uncommitted_txn, n, expected_txns_length, TOKEN_LEDGER_ID, retryWait=0.2, timeout=15)) sdk_get_and_check_replies(looper, r1) sdk_get_and_check_replies(looper, r2) txns_count_after = get_committed_txns_count_for_pool( nodeSetWithIntegratedTokenPlugin, TOKEN_LEDGER_ID) assert txns_count_after - txns_count_before == expected_txns_length
def test_multiple_batches_for_pool(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): node_set = nodeSetWithIntegratedTokenPlugin node_stashers = [n.nodeIbStasher for n in node_set] amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request1, request2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) txns_count_before = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) with delay_rules(node_stashers, cDelay()): r1 = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(request1.as_dict)]) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) r2 = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(request2.as_dict)]) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) for n in node_set: n.start_catchup() for n in node_set: looper.run( eventually(lambda: assertExp(n.mode == Mode.participating))) txns_count_after = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) assert txns_count_after == txns_count_before ensure_all_nodes_have_same_data(looper, node_set)
def test_revert_works_for_fees_before_catch_up_on_one_node( looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): node_set = nodeSetWithIntegratedTokenPlugin reverted_node = node_set[-1] amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request_1, request_2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) c_ledger_root_before = get_committed_txn_root_for_pool([reverted_node], TOKEN_LEDGER_ID) with delay_rules(reverted_node.nodeIbStasher, cDelay()): """ Send NYM with FEES and wait for reply. All of nodes, except reverted_node will order them """ r = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(request_1.as_dict)]) sdk_get_and_check_replies(looper, r) check_state(reverted_node, is_equal=False) c_ledger_root_for_other = get_committed_txn_root_for_pool( node_set[:-1], TOKEN_LEDGER_ID) """ Start catchup. Uncommitted batch for reverted_node should be rejected and it will get NYM with FEES during catchup procedure. """ reverted_node.start_catchup() looper.run( eventually( lambda: assertExp(reverted_node.mode == Mode.participating))) check_state(reverted_node, is_equal=True) """ Check, that committed txn root was changed and it's the same as for others """ c_ledger_root_after = get_committed_txn_root_for_pool([reverted_node], TOKEN_LEDGER_ID) assert c_ledger_root_after != c_ledger_root_before assert c_ledger_root_after == c_ledger_root_for_other ensure_all_nodes_have_same_data(looper, node_set) c_ledger_root_before = get_committed_txn_root_for_pool( node_set, TOKEN_LEDGER_ID) """ Send another NYM with FEES and check, that committed ledger's root was changed """ r = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(request_2.as_dict)]) sdk_get_and_check_replies(looper, r) c_ledger_root_after = get_committed_txn_root_for_pool( node_set, TOKEN_LEDGER_ID) assert c_ledger_root_after != c_ledger_root_before ensure_all_nodes_have_same_data(looper, node_set) for n in node_set: check_state(n, is_equal=True)
def test_catchup_several_batches(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): current_amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 node_set = nodeSetWithIntegratedTokenPlugin """ Prepare NUM_BATCHES * TXN_IN_BATCH requests and 1 for checking pool functional """ all_reqs = nyms_with_fees(NUM_BATCHES * TXN_IN_BATCH + 1, helpers, fees_set, address_main, current_amount, init_seq_no=init_seq_no) reqs_to_catchup = all_reqs[:-1] req_for_check = all_reqs[-1] reverted_node = node_set[-1] with delay_rules(reverted_node.nodeIbStasher, cDelay()): len_batches_before = len( reverted_node.master_replica._ordering_service.batches) sdk_requests = [] for req in reqs_to_catchup: r = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, req) sdk_requests.append(r) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) len_batches_after = len( reverted_node.master_replica._ordering_service.batches) """ Checks, that we have a 2 new batches """ assert len_batches_after - len_batches_before == NUM_BATCHES sdk_get_and_check_replies(looper, sdk_requests) reverted_node.start_catchup() looper.run( eventually( lambda: assertExp(reverted_node.mode == Mode.participating))) ensure_all_nodes_have_same_data(looper, node_set) r = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, req_for_check) sdk_get_and_check_replies(looper, [r]) ensure_all_nodes_have_same_data(looper, node_set)
def test_multiple_batches_for_pool(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): node_set = nodeSetWithIntegratedTokenPlugin node_stashers = [n.nodeIbStasher for n in node_set] amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request1, request2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) txns_count_before = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) with delay_rules(node_stashers, cDelay()): r1 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request1) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) r2 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request2) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) for n in node_set: n.start_catchup() # clear all request queues to not re-send the same reqs after catch-up for n in node_set: n.requests.clear() for r in n.replicas.values(): for ledger_id, queue in r._ordering_service.requestQueues.items( ): queue.clear() for n in node_set: looper.run( eventually(lambda: assertExp(n.mode == Mode.participating))) txns_count_after = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) assert txns_count_after == txns_count_before ensure_all_nodes_have_same_data(looper, node_set)
def test_multiple_batches_for_one_node(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, address_main, mint_tokens): node_set = nodeSetWithIntegratedTokenPlugin affected_node = node_set[-1] amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request1, request2, request3 = nyms_with_fees(3, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) expected_txns_length = 2 txns_count_before = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) with delay_rules(affected_node.nodeIbStasher, cDelay()): r1 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request1) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) r2 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request2) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) affected_node.start_catchup() looper.run( eventually( lambda: assertExp(affected_node.mode == Mode.participating))) txns_count_after = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) assert txns_count_after - txns_count_before == expected_txns_length ensure_all_nodes_have_same_data(looper, node_set) r3 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request3) sdk_get_and_check_replies(looper, [r3]) ensure_all_nodes_have_same_data(looper, node_set)
def test_ordering_with_fees_and_without_fees(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, sdk_wallet_steward, fees_set, address_main, mint_tokens, fees): node_set = nodeSetWithIntegratedTokenPlugin node_stashers = [n.nodeIbStasher for n in nodeSetWithIntegratedTokenPlugin] committed_tokens_before = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) committed_domain_before = get_committed_txns_count_for_pool( node_set, DOMAIN_LEDGER_ID) """ We will try to send a 1 NYM txn with fees and 1 NYM without fees and 1 with fees In that case we expect, that we will have 3 domain txn and 2 token txn in ledgers """ expected_domain_txns_count = 3 expected_token_txns_count = 2 with delay_rules(node_stashers, cDelay()): amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request_1, request_2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) """ Sending 1 NYM txn with fees """ r_with_1 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request_1) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Unset fees for pool """ r_unset_fees = helpers.general.set_fees_without_waiting( {k: 0 for (k, v) in fees.items()}) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Sending 1 NYM txn without fees """ r_without = sdk_send_new_nym(looper, sdk_pool_handle, sdk_wallet_steward) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Set fees for pool """ r_set_fees = helpers.general.set_fees_without_waiting(fees) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Send another NYM txn with fees """ r_with_2 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request_2) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Reset delays and check, that all txns was ordered successfully """ sdk_get_and_check_replies(looper, [r_with_1]) sdk_get_and_check_replies(looper, r_unset_fees) sdk_get_and_check_replies(looper, r_without) sdk_get_and_check_replies(looper, r_set_fees) sdk_get_and_check_replies(looper, [r_with_2]) committed_tokens_after = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) committed_domain_after = get_committed_txns_count_for_pool( node_set, DOMAIN_LEDGER_ID) assert committed_domain_after - committed_domain_before == expected_domain_txns_count assert committed_tokens_after - committed_tokens_before == expected_token_txns_count ensure_all_nodes_have_same_data(looper, nodeSetWithIntegratedTokenPlugin)