def test_no_propagate_request_on_different_last_ordered_on_master_before_vc( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): ''' Send random request and do view change then fast_nodes (1, 4 - without primary after next view change) are already ordered transaction on master and slow_nodes are not. Check ordering on slow_nodes.''' sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) master_instance = txnPoolNodeSet[0].master_replica.instId slow_nodes = txnPoolNodeSet[1:3] fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes] nodes_stashers = [n.nodeIbStasher for n in slow_nodes] old_last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc with delay_rules(nodes_stashers, cDelay(delay=sys.maxsize)): # send one request requests = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) last_ordered_for_slow = slow_nodes[0].master_replica.last_ordered_3pc old_view_no = txnPoolNodeSet[0].viewNo looper.run( eventually(check_last_ordered, fast_nodes, master_instance, (old_view_no, old_last_ordered[1] + 1))) # trigger view change on all nodes for node in txnPoolNodeSet: node.view_changer.on_master_degradation() # wait for view change done on all nodes ensureElectionsDone(looper, txnPoolNodeSet) sdk_get_replies(looper, requests) looper.run( eventually(check_last_ordered, slow_nodes, master_instance, (old_view_no, last_ordered_for_slow[1] + 1))) assert all(0 == node.spylog.count(node.request_propagates) for node in txnPoolNodeSet)
def testQueueingReqFromFutureView(delayed_perf_chk, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Test if every node queues 3 Phase requests(PRE-PREPARE, PREPARE and COMMIT) that come from a view which is greater than the current view. - Delay reception and processing of view change messages by a non primary for master instance => it starts receiving 3 phase commit messages for next view """ lagging_node = get_last_master_non_primary_node(txnPoolNodeSet) old_view_no = lagging_node.viewNo # Delay processing of InstanceChange and ViewChangeDone so node stashes # 3PC messages delay_ic = 60 lagging_node.nodeIbStasher.delay(icDelay(delay_ic)) lagging_node.nodeIbStasher.delay(vcd_delay(delay_ic)) logger.debug('{} will delay its view change'.format(lagging_node)) def chk_fut_view(view_no, is_empty): length = len(lagging_node.msgsForFutureViews.get(view_no, ())) if is_empty: assert length == 0 else: assert length > 0 return length # No messages queued for future view chk_fut_view(old_view_no + 1, is_empty=True) logger.debug( '{} does not have any messages for future views'.format(lagging_node)) # Every node except Node A should do a view change ensure_view_change(looper, [n for n in txnPoolNodeSet if n != lagging_node], [lagging_node]) # send more requests that will be queued for the lagged node reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 5) l = looper.run( eventually(chk_fut_view, old_view_no + 1, False, retryWait=1)) logger.debug('{} has {} messages for future views'.format(lagging_node, l)) sdk_get_replies(looper, reqs) # reset delays for the lagging_node node so that it finally makes view # change lagging_node.reset_delays_and_process_delayeds() # Eventually no messages queued for future view looper.run( eventually(chk_fut_view, old_view_no + 1, True, retryWait=1, timeout=delay_ic + 10)) logger.debug( '{} exhausted pending messages for future views'.format(lagging_node)) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2)
def write(key, val, looper, sdk_pool_handle, sdk_wallet): _, idr = sdk_wallet reqs_obj = [sdk_gen_request(op, identifier=idr) for op in [write_conf_op(key, val)]] reqs = sdk_sign_request_objects(looper, sdk_wallet, reqs_obj) sent_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) sdk_get_replies(looper, sent_reqs, timeout=10)
def test_replica_removing_in_ordering(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, chkFreqPatched, view_change): """ 1. Start ordering (send pre-prepares on backup) 2. Remove replica 3. Finish ordering 4. Check monitor and replicas count """ node = txnPoolNodeSet[0] start_replicas_count = node.replicas.num_replicas instance_id = start_replicas_count - 1 stashers = [n.nodeIbStasher for n in txnPoolNodeSet] with delay_rules(stashers, cDelay(delay=sys.maxsize)): req = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) def chk(): assert len(node.requests) > 0 looper.run(eventually(chk)) digest = Request(**req[0][0]).digest old_forwarded_to = node.requests[digest].forwardedTo node.replicas.remove_replica(instance_id) assert old_forwarded_to - 1 == node.requests[digest].forwardedTo sdk_get_replies(looper, req) looper.run(eventually(check_checkpoint_finalize, txnPoolNodeSet)) _check_replica_removed(node, start_replicas_count, instance_id) assert not node.monitor.isMasterDegraded() assert len(node.requests) == 0
def do_view_change_with_delayed_commits_on_all_but_one(nodes, nodes_without_one_stashers, except_node, looper, sdk_pool_handle, sdk_wallet_client): new_view_no = except_node.viewNo + 1 old_last_ordered = except_node.master_replica.last_ordered_3pc # delay commits for all nodes except node X with delay_rules(nodes_without_one_stashers, cDelay(sys.maxsize)): # send one request requests2 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) def last_ordered(node: Node, last_ordered): assert node.master_replica.last_ordered_3pc == last_ordered # wait until except_node ordered txn looper.run( eventually(last_ordered, except_node, (except_node.viewNo, old_last_ordered[1] + 1))) # trigger view change on all nodes for node in nodes: node.view_changer.on_master_degradation() # wait for view change done on all nodes looper.run(eventually(view_change_done, nodes, new_view_no)) sdk_get_replies(looper, requests2)
def write(key, val, looper, sdk_pool_handle, sdk_wallet): _, idr = sdk_wallet reqs_obj = [sdk_gen_request(op, identifier=idr) for op in [write_conf_op(key, val)]] reqs = sdk_sign_request_objects(looper, sdk_wallet, reqs_obj) sent_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) sdk_get_replies(looper, sent_reqs, timeout=10)
def do_view_change_with_delayed_commits_on_all_but_one(nodes, nodes_without_one_stashers, except_node, looper, sdk_pool_handle, sdk_wallet_client): new_view_no = except_node.viewNo + 1 old_last_ordered = except_node.master_replica.last_ordered_3pc # delay commits for all nodes except node X with delay_rules(nodes_without_one_stashers, cDelay(sys.maxsize)): # send one request requests2 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) def last_ordered(node: Node, last_ordered): assert node.master_replica.last_ordered_3pc == last_ordered # wait until except_node ordered txn looper.run( eventually(last_ordered, except_node, (except_node.viewNo, old_last_ordered[1] + 1))) # trigger view change on all nodes for node in nodes: node.view_changer.on_master_degradation() # wait for view change done on all nodes looper.run(eventually(view_change_done, nodes, new_view_no)) sdk_get_replies(looper, requests2)
def test_all_replicas_hold_request_keys( perf_chk_patched, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ All replicas whether primary or non primary hold request keys of forwarded requests. Once requests are ordered, they request keys are removed from replica. """ tconf = perf_chk_patched delay_3pc = 2 delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc) delay_3pc_messages(txnPoolNodeSet, 1, delay_3pc) def chk(count): # All replicas have same amount of forwarded request keys and all keys # are finalised. for node in txnPoolNodeSet: for r in node.replicas.values(): if r.isPrimary is False: assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == count for i in range(count): k = r.requestQueues[DOMAIN_LEDGER_ID][i] assert r.requests[k].finalised elif r.isPrimary is True: assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == 0 reqs = sdk_signed_random_requests(looper, sdk_wallet_client, tconf.Max3PCBatchSize - 1) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) # Only non primary replicas should have all request keys with them looper.run(eventually(chk, tconf.Max3PCBatchSize - 1)) sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout( tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet), add_delay_to_timeout=delay_3pc)) # Replicas should have no request keys with them since they are ordered looper.run(eventually(chk, 0)) # Need to wait since one node might not # have processed it. delay = 1 for node in txnPoolNodeSet: node.nodeIbStasher.delay(nom_delay(delay)) ensure_view_change(looper, txnPoolNodeSet) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 2 * tconf.Max3PCBatchSize) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) looper.run(eventually(chk, 2 * tconf.Max3PCBatchSize)) # Since each nomination is delayed and there will be multiple nominations # so adding some extra time timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \ len(txnPoolNodeSet) * delay ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout) sdk_get_replies(looper, req_resps, timeout=timeout) looper.run(eventually(chk, 0))
def test_all_replicas_hold_request_keys(perf_chk_patched, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ All replicas whether primary or non primary hold request keys of forwarded requests. Once requests are ordered, they request keys are removed from replica. """ tconf = perf_chk_patched delay_3pc = 2 delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc) delay_3pc_messages(txnPoolNodeSet, 1, delay_3pc) def chk(count): # All replicas have same amount of forwarded request keys and all keys # are finalised. for node in txnPoolNodeSet: for r in node.replicas.values(): if r.isPrimary is False: assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == count for i in range(count): k = r.requestQueues[DOMAIN_LEDGER_ID][i] assert r.requests[k].finalised elif r.isPrimary is True: assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == 0 reqs = sdk_signed_random_requests(looper, sdk_wallet_client, tconf.Max3PCBatchSize - 1) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) # Only non primary replicas should have all request keys with them looper.run(eventually(chk, tconf.Max3PCBatchSize - 1)) sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout(tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet), add_delay_to_timeout=delay_3pc)) # Replicas should have no request keys with them since they are ordered looper.run(eventually(chk, 0)) # Need to wait since one node might not # have processed it. delay = 1 for node in txnPoolNodeSet: node.nodeIbStasher.delay(nom_delay(delay)) ensure_view_change(looper, txnPoolNodeSet) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 2 * tconf.Max3PCBatchSize) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) looper.run(eventually(chk, 2 * tconf.Max3PCBatchSize)) # Since each nomination is delayed and there will be multiple nominations # so adding some extra time timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \ len(txnPoolNodeSet) * delay ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout) sdk_get_replies(looper, req_resps, timeout=timeout) looper.run(eventually(chk, 0))
def test_catchup_with_old_txn_metadata_digest_format(tdir, tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, monkeypatch): lagging_node = txnPoolNodeSet[-1] lagging_stasher = lagging_node.nodeIbStasher other_nodes = txnPoolNodeSet[:-1] # Utility def check_nodes_domain_ledger(nodes: Iterable, txn_count: int): for node in nodes: assert node.domainLedger.size >= txn_count # Patch payload metadata, note that it will prevent pool from sending adequate replies to clients def append_old_payload_metadata(txn, frm=None, req_id=None, digest=None, payload_digest=None, taa_acceptance=None, endorser=None): txn = append_payload_metadata(txn, frm, req_id, digest, payload_digest, taa_acceptance, endorser) metadata = txn[TXN_PAYLOAD][TXN_PAYLOAD_METADATA] del metadata[TXN_PAYLOAD_METADATA_PAYLOAD_DIGEST] metadata[TXN_PAYLOAD_METADATA_DIGEST] = payload_digest return txn monkeypatch.setattr(txn_util, 'append_payload_metadata', append_old_payload_metadata) # Check pool initial state initial_size = txnPoolNodeSet[0].domainLedger.size for node in txnPoolNodeSet: assert node.domainLedger.size == initial_size # Order some transactions, with one node discarding messages with delay_rules_without_processing(lagging_stasher, delay_3pc()): reps = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 10) looper.run( eventually(check_nodes_domain_ledger, other_nodes, initial_size + 10)) assert lagging_node.domainLedger.size == initial_size # Catchup transactions and ensure that all nodes will eventually have same data lagging_node.start_catchup() ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) # Catch replies sdk_get_replies(looper, reps)
def test_revoc_entry_static_validation_on_size(revoc_entry, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward): _expected, _req = revoc_entry results = sdk_send_signed_requests(sdk_pool_handle, [json.dumps(_req)]) _reply = sdk_get_replies(looper, results)[0][1] assert _expected == _reply['op']
def read(key, looper, sdk_pool_handle, sdk_wallet): _, idr = sdk_wallet reqs_obj = [sdk_gen_request(op, identifier=idr) for op in [read_conf_op(key)]] reqs = sdk_sign_request_objects(looper, sdk_wallet, reqs_obj) sent_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) (req, resp), = sdk_get_replies(looper, sent_reqs, timeout=10) return json.loads(resp['result'][DATA])[key]
def read(key, looper, sdk_pool_handle, sdk_wallet): _, idr = sdk_wallet reqs_obj = [sdk_gen_request(op, identifier=idr) for op in [read_conf_op(key)]] reqs = sdk_sign_request_objects(looper, sdk_wallet, reqs_obj) sent_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) (req, resp), = sdk_get_replies(looper, sent_reqs, timeout=10) return json.loads(resp['result'][DATA])[key]
def test_view_change_during_unstash(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf): slow_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] slow_stasher = slow_node.nodeIbStasher other_stashers = [n.nodeIbStasher for n in other_nodes] all_stashers = [n.nodeIbStasher for n in txnPoolNodeSet] # Preload nodes with some transactions sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) for node in txnPoolNodeSet: assert node.master_replica.last_ordered_3pc == (0, 1) # Prevent ordering of some requests start_delaying(all_stashers, delay_3pc(after=7, msgs=(Prepare, Commit))) # Stop ordering on slow node and send requests slow_node_after_5 = start_delaying(slow_stasher, delay_3pc(after=5, msgs=Commit)) slow_node_until_5 = start_delaying(slow_stasher, delay_3pc(after=0)) reqs_view_0 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 8) # Make pool order first 2 batches and pause pool_after_3 = start_delaying(other_stashers, delay_3pc(after=3)) looper.run(eventually(check_nodes_ordered_till, other_nodes, 0, 3)) # Start catchup, continue ordering everywhere (except two last batches on slow node) with delay_rules(slow_stasher, cr_delay()): slow_node._do_start_catchup(just_started=False) looper.run(eventually(check_catchup_is_started, slow_node)) stop_delaying_and_process(pool_after_3) looper.run(eventually(check_nodes_ordered_till, other_nodes, 0, 7)) # Finish catchup and continue processing on slow node looper.run(eventually(check_catchup_is_finished, slow_node)) stop_delaying_and_process(slow_node_until_5) looper.run(eventually(check_nodes_ordered_till, [slow_node], 0, 5)) # Start view change and allow slow node to get remaining commits with delay_rules(all_stashers, icDelay()): for node in txnPoolNodeSet: node.view_changer.on_master_degradation() looper.runFor(0.1) stop_delaying_and_process(slow_node_after_5) # Ensure that expected number of requests was ordered replies = sdk_get_replies(looper, reqs_view_0) for rep in replies[:6]: sdk_check_reply(rep) # Ensure that everything is ok ensureElectionsDone(looper, txnPoolNodeSet) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_unordered_request_freed_on_replica_removal(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, chkFreqPatched, view_change): node = txnPoolNodeSet[0] # Stabilize checkpoint # Send one more request to stabilize checkpoint sdk_send_random_and_check( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ - get_pp_seq_no(txnPoolNodeSet) % CHK_FREQ) old_stable_checkpoint = node.master_replica._consensus_data.stable_checkpoint stashers = [n.nodeIbStasher for n in txnPoolNodeSet] with delay_rules(stashers, cDelay(delay=sys.maxsize), msg_rep_delay(types_to_delay=[COMMIT])): req = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) looper.runFor( waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) f_d, f_r = get_forwarded_to_all(node) assert f_d node.replicas.remove_replica(node.replicas.num_replicas - 1) assert node.requests[f_d].forwardedTo == node.replicas.num_replicas check_for_nodes(txnPoolNodeSet, check_stable_checkpoint, old_stable_checkpoint) sdk_get_replies(looper, req) check_for_nodes(txnPoolNodeSet, check_stable_checkpoint, old_stable_checkpoint) # Send one more request to stabilize checkpoint sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ - 1) looper.run( eventually(check_for_nodes, txnPoolNodeSet, check_stable_checkpoint, old_stable_checkpoint + CHK_FREQ)) assert len(node.requests) == 0
def test_no_propagate_request_on_different_prepares_on_backup_before_vc( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): ''' Send random request and do view change then fast_nodes (2,3 - with primary backup replica) will have prepare or send preprepare on backup replicas and slow_nodes are have not and transaction will ordered on all master replicas. Check last ordered after view change and after another one request.''' sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) slow_instance = 1 slow_nodes = txnPoolNodeSet[1:3] fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes] nodes_stashers = [n.nodeIbStasher for n in slow_nodes] with delay_rules(nodes_stashers, pDelay(delay=sys.maxsize, instId=slow_instance)): with delay_rules(nodes_stashers, ppDelay(delay=sys.maxsize, instId=slow_instance)): # send one request requests = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) sdk_get_replies(looper, requests) looper.run(eventually(is_prepared, fast_nodes, 2, slow_instance)) # trigger view change on all nodes for node in txnPoolNodeSet: node.view_changer.on_master_degradation() # wait for view change done on all nodes ensureElectionsDone(looper, txnPoolNodeSet) last_ordered_3pc = fast_nodes[0].replicas[slow_instance].last_ordered_3pc for node in txnPoolNodeSet: assert last_ordered_3pc == node.replicas[ slow_instance].last_ordered_3pc sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) looper.run( eventually(check_last_ordered, txnPoolNodeSet, slow_instance, (txnPoolNodeSet[0].viewNo, 1))) assert all(0 == node.spylog.count(node.request_propagates) for node in txnPoolNodeSet)
def test_old_view_requests_processed_during_view_change(looper, txnPoolNodeSet, sdk_wallet_handle, sdk_wallet_client): """ Make sure that requests sent before view change started are processed and replies are returned: - delay Propogates (to make sure that requests are not ordered before view change is started) - send requests - check that requests are ordered despite of view change being in progress """ for node in txnPoolNodeSet: node.view_change_in_progress = False node.nodeIbStasher.delay(ppgDelay(3, 0)) requests = sdk_send_random_requests(looper, sdk_wallet_handle, sdk_wallet_client, 2) looper.runFor(1) for node in txnPoolNodeSet: node.view_change_in_progress = True sdk_get_replies(looper, requests)
def test_all_replicas_hold_request_keys(perf_chk_patched, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ All replicas whether primary or non primary hold request keys of forwarded requests. Once requests are ordered, they request keys are removed from replica. """ tconf = perf_chk_patched delay_3pc = 2 delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc) delay_3pc_messages(txnPoolNodeSet, 1, delay_3pc) def chk(count): # All replicas have same amount of forwarded request keys and all keys # are finalised. for node in txnPoolNodeSet: for r in node.replicas.values(): if r.isPrimary is False: assert len(r._ordering_service. requestQueues[DOMAIN_LEDGER_ID]) == count for i in range(count): k = r._ordering_service.requestQueues[ DOMAIN_LEDGER_ID][i] assert r.requests[k].finalised elif r.isPrimary is True: assert len(r._ordering_service. requestQueues[DOMAIN_LEDGER_ID]) == 0 reqs = sdk_signed_random_requests(looper, sdk_wallet_client, tconf.Max3PCBatchSize - 1) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) # Only non primary replicas should have all request keys with them looper.run(eventually(chk, tconf.Max3PCBatchSize - 1)) sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout(tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet), add_delay_to_timeout=delay_3pc)) # Replicas should have no request keys with them since they are ordered looper.run(eventually(chk, 0)) # Need to wait since one node might not
def test_catchup_during_3pc(tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): ''' 1) Send 1 3PC batch + 2 reqs 2) Delay commits on one node 3) Make sure the batch is ordered on all nodes except the lagged one 4) start catchup of the lagged node 5) Make sure that all nodes are equal 6) Send more requests that we have 3 batches in total 7) Make sure that all nodes are equal ''' lagging_node = txnPoolNodeSet[-1] rest_nodes = txnPoolNodeSet[:-1] with delay_rules(lagging_node.nodeIbStasher, cDelay()): sdk_reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize + 2) looper.run( eventually(check_last_ordered_3pc_on_master, rest_nodes, (0, 1))) lagging_node.start_catchup() looper.run( eventually( lambda: assertExp(lagging_node.mode == Mode.participating), retryWait=1, timeout=waits.expectedPoolCatchupTime(len(txnPoolNodeSet)))) waitNodeDataEquality(looper, *txnPoolNodeSet, customTimeout=5) sdk_get_replies(looper, sdk_reqs) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * tconf.Max3PCBatchSize - 2) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def sdk_get_and_check_multiply_replies(looper, request_couple): rets = [] for req_res in sdk_get_replies(looper, [ request_couple, ]): req, responses = req_res if not isinstance(responses, ErrorCode) and "op" not in responses: for node_resp in responses.values(): sdk_check_reply((req, json.loads(node_resp))) else: sdk_check_reply(req_res) rets.append(req_res) return rets[0]
def test_unordered_request_freed_on_replica_removal(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, chkFreqPatched, view_change): node = txnPoolNodeSet[0] stashers = [n.nodeIbStasher for n in txnPoolNodeSet] with delay_rules(stashers, cDelay(delay=sys.maxsize)): req = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) assert len(node.requests) == 1 forwardedToBefore = next(iter(node.requests.values())).forwardedTo node.replicas.remove_replica(node.replicas.num_replicas - 1) assert len(node.requests) == 1 forwardedToAfter = next(iter(node.requests.values())).forwardedTo assert forwardedToAfter == forwardedToBefore - 1 chkChkpoints(txnPoolNodeSet, 0) sdk_get_replies(looper, req) chkChkpoints(txnPoolNodeSet, 1) # Send one more request to stabilize checkpoint sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, 0)) assert len(node.requests) == 0
def test_unordered_request_freed_on_replica_removal(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, chkFreqPatched, view_change): node = txnPoolNodeSet[0] stashers = [n.nodeIbStasher for n in txnPoolNodeSet] with delay_rules(stashers, cDelay(delay=sys.maxsize)): req = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) assert len(node.requests) == 1 forwardedToBefore = next(iter(node.requests.values())).forwardedTo node.replicas.remove_replica(node.replicas.num_replicas - 1) assert len(node.requests) == 1 forwardedToAfter = next(iter(node.requests.values())).forwardedTo assert forwardedToAfter == forwardedToBefore - 1 chkChkpoints(txnPoolNodeSet, 0) sdk_get_replies(looper, req) chkChkpoints(txnPoolNodeSet, 1) # Send one more request to stabilize checkpoint sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, 0)) assert len(node.requests) == 0
def test_no_propagate_request_on_different_last_ordered_on_master_before_vc( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): ''' Send random request and do view change then fast_nodes (1, 4 - without primary after next view change) are already ordered transaction on master and slow_nodes are not. Check ordering on slow_nodes.''' global batches_count sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) batches_count += 1 master_instance = txnPoolNodeSet[0].master_replica.instId slow_nodes = txnPoolNodeSet[1:3] fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes] nodes_stashers = [n.nodeIbStasher for n in slow_nodes] old_last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc assert batches_count == old_last_ordered[1] with delay_rules(nodes_stashers, cDelay()): # send one request requests = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) batches_count += 1 last_ordered_for_slow = slow_nodes[0].master_replica.last_ordered_3pc old_view_no = txnPoolNodeSet[0].viewNo looper.run( eventually(check_last_ordered, fast_nodes, master_instance, (old_view_no, batches_count))) # trigger view change on all nodes ensure_view_change(looper, txnPoolNodeSet) # wait for view change done on all nodes ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=60) batches_count += 1 replies = sdk_get_replies(looper, requests) for reply in replies: sdk_check_reply(reply) # a new primary will send a PrePrepare for the new view looper.run( eventually(check_last_ordered, txnPoolNodeSet, master_instance, (old_view_no + 1, batches_count))) ensure_all_nodes_have_same_data(looper, txnPoolNodeSet) assert all(0 == node.spylog.count(node.request_propagates) for node in txnPoolNodeSet)
def testOnlyAStewardCanAddAnotherSteward(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, sdk_wallet_client): sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias='testSteward' + randomString(3), role=STEWARD_STRING) seed = randomString(32) wh, _ = sdk_wallet_client nym_request, steward_did = looper.loop.run_until_complete( prepare_nym_request(sdk_wallet_client, seed, 'testSteward2', 'STEWARD')) request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, nym_request) total_timeout = sdk_eval_timeout(1, len(txnPoolNodeSet)) request_couple = sdk_get_replies(looper, [request_couple], total_timeout)[0] with pytest.raises(RequestRejectedException) as e: sdk_check_reply(request_couple) assert 'Only Steward is allowed to do these transactions' in e._excinfo[1].args[0]
def test_no_propagate_request_on_different_last_ordered_on_master_before_vc(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): ''' Send random request and do view change then fast_nodes (1, 4 - without primary after next view change) are already ordered transaction on master and slow_nodes are not. Check ordering on slow_nodes.''' sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) master_instance = txnPoolNodeSet[0].master_replica.instId slow_nodes = txnPoolNodeSet[1:3] fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes] nodes_stashers = [n.nodeIbStasher for n in slow_nodes] old_last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc with delay_rules(nodes_stashers, cDelay()): # send one request requests = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) last_ordered_for_slow = slow_nodes[0].master_replica.last_ordered_3pc old_view_no = txnPoolNodeSet[0].viewNo looper.run( eventually(check_last_ordered, fast_nodes, master_instance, (old_view_no, old_last_ordered[1] + 1))) # trigger view change on all nodes ensure_view_change(looper, txnPoolNodeSet) # wait for view change done on all nodes ensureElectionsDone(looper, txnPoolNodeSet) replies = sdk_get_replies(looper, requests) for reply in replies: sdk_check_reply(reply) check_last_ordered(slow_nodes, master_instance, (old_view_no, last_ordered_for_slow[1] + 1)) assert all(0 == node.spylog.count(node.request_propagates) for node in txnPoolNodeSet)
def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): timeout = sdk_eval_timeout(1, len(txnPoolNodeSet)) nodes = txnPoolNodeSet sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) stop_node(nodes[0], looper, nodes) waitForViewChange(looper, nodes[1:], expectedViewNo=1) ensureElectionsDone(looper, nodes[1:], numInstances=getRequiredInstances(nodeCount)) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) stop_node(nodes[1], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[2:], expectedViewNo=1) sdk_reqs3 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(TimeoutError): req_res = sdk_get_replies(looper, sdk_reqs3, timeout=timeout) sdk_check_reply(req_res[0]) stop_node(nodes[2], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[3:], expectedViewNo=1) sdk_reqs4 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(TimeoutError): req_res = sdk_get_replies(looper, sdk_reqs4, timeout=timeout) sdk_check_reply(req_res[0]) nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath) looper.runFor(waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[3:], expectedViewNo=1) sdk_reqs5 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(TimeoutError): req_res = sdk_get_replies(looper, sdk_reqs5, timeout=timeout) sdk_check_reply(req_res[0]) nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes[1:], numInstances=getRequiredInstances(nodeCount)) checkViewNoForNodes(nodes[1:], expectedViewNo=1) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes, numInstances=getRequiredInstances(nodeCount)) checkViewNoForNodes(nodes, expectedViewNo=1) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
def test_view_change_gc_in_between_3pc_all_nodes_delays( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo) and does not remove messages from node's queues that have higher viewNo than last ordered one even if their ppSeqNo are less or equal """ numNodes = len(txnPoolNodeSet) viewNo = checkViewNoForNodes(txnPoolNodeSet) # 1 send two messages one by one separately to make # node pool working with two batches # -> last_ordered_3pc = (+0, 2) [+0 means from the initial state] # (last_ordered_3pc here and futher is tracked # for master instances only cause non-master ones have # specific logic of its management which we don't care in # the test, see Replica::_setup_for_non_master) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) batches_count = get_pp_seq_no(txnPoolNodeSet) last_ordered_3pc = (viewNo, batches_count) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 2) # 2 do view change # -> GC should remove it from nodes' queues # -> viewNo = +1 ensure_view_change_complete(looper, txnPoolNodeSet) batches_count += 1 viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1) looper.run( eventually(check_nodes_last_ordered_3pc, txnPoolNodeSet, (viewNo, batches_count))) check_nodes_requests_size(txnPoolNodeSet, 0) # 3 slow processing 3PC messages for all nodes (all replica instances) # randomly and send one more message # -> not ordered (last_ordered_3pc still equal (+0, 2)) but primaries # should at least send PRE-PREPAREs # TODO could it be not enough for wainting that at least primary # has sent PRE-PREPARE propagationTimeout = waits.expectedClientRequestPropagationTime(numNodes) delay_3pc_messages(txnPoolNodeSet, 0, delay=propagationTimeout * 2) delay_3pc_messages(txnPoolNodeSet, 1, delay=propagationTimeout * 2) requests = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) def checkPrePrepareSentAtLeastByPrimary(): for node in txnPoolNodeSet: for replica in node.replicas.values(): if replica.isPrimary: assert len(replica._ordering_service.sent_preprepares) looper.run( eventually(checkPrePrepareSentAtLeastByPrimary, retryWait=0.1, timeout=propagationTimeout)) # 4 do view change # -> GC shouldn't remove anything because # last_ordered_3pc (+0, 1) < last message's 3pc key (+1, 1) # -> viewNo = 2 ensure_view_change_complete(looper, txnPoolNodeSet) batches_count += 1 viewNoNew = checkViewNoForNodes(txnPoolNodeSet) # another view change could happen because of slow nodes assert viewNoNew - viewNo in (1, 2) viewNo = viewNoNew check_nodes_last_ordered_3pc(txnPoolNodeSet, (last_ordered_3pc[0] + 1, batches_count - 1)) check_nodes_requests_size(txnPoolNodeSet, 1) # 5 reset delays and wait for replies # -> new primaries should send new 3pc for last message # with 3pc key (+2, 1) # -> they should be ordered # -> last_ordered_3pc = (+2, 1) reset_delays_and_process_delayeds(txnPoolNodeSet) sdk_get_replies(looper, [requests]) batches_count += 1 checkViewNoForNodes(txnPoolNodeSet, viewNo) last_ordered_3pc = (viewNo, batches_count) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 1) # 6 do view change # -> GC should remove them ensure_view_change_complete(looper, txnPoolNodeSet) batches_count += 1 viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1) check_nodes_last_ordered_3pc(txnPoolNodeSet, (last_ordered_3pc[0] + 1, batches_count)) check_nodes_requests_size(txnPoolNodeSet, 0)
def test_view_change_gc_in_between_3pc_all_nodes_delays( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo) and does not remove messages from node's queues that have higher viewNo than last ordered one even if their ppSeqNo are less or equal """ numNodes = len(txnPoolNodeSet) viewNo = checkViewNoForNodes(txnPoolNodeSet) # 1 send two messages one by one separately to make # node pool working with two batches # -> last_ordered_3pc = (+0, 2) [+0 means from the initial state] # (last_ordered_3pc here and futher is tracked # for master instances only cause non-master ones have # specific logic of its management which we don't care in # the test, see Replica::_setup_for_non_master) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) last_ordered_3pc = (viewNo, 2) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 2) # 2 do view change # -> GC should remove it from nodes' queues # -> viewNo = +1 ensure_view_change_complete(looper, txnPoolNodeSet) viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 0) # 3 slow processing 3PC messages for all nodes (all replica instances) # randomly and send one more message # -> not ordered (last_ordered_3pc still equal (+0, 2)) but primaries # should at least send PRE-PREPAREs # TODO could it be not enough for wainting that at least primary # has sent PRE-PREPARE propagationTimeout = waits.expectedClientRequestPropagationTime(numNodes) delay_3pc_messages(txnPoolNodeSet, 0, delay=propagationTimeout * 2) delay_3pc_messages(txnPoolNodeSet, 1, delay=propagationTimeout * 2) requests = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) def checkPrePrepareSentAtLeastByPrimary(): for node in txnPoolNodeSet: for replica in node.replicas.values(): if replica.isPrimary: assert len(replica.sentPrePrepares) looper.run(eventually(checkPrePrepareSentAtLeastByPrimary, retryWait=0.1, timeout=propagationTimeout)) # 4 do view change # -> GC shouldn't remove anything because # last_ordered_3pc (+0, 1) < last message's 3pc key (+1, 1) # -> viewNo = 2 ensure_view_change_complete(looper, txnPoolNodeSet) viewNoNew = checkViewNoForNodes(txnPoolNodeSet) # another view change could happen because of slow nodes assert viewNoNew - viewNo in (1, 2) viewNo = viewNoNew check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 1) # 5 reset delays and wait for replies # -> new primaries should send new 3pc for last message # with 3pc key (+2, 1) # -> they should be ordered # -> last_ordered_3pc = (+2, 1) reset_delays_and_process_delayeds(txnPoolNodeSet) sdk_get_replies(looper, [requests]) checkViewNoForNodes(txnPoolNodeSet, viewNo) last_ordered_3pc = (viewNo, 1) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 1) # 6 do view change # -> GC should remove them ensure_view_change_complete(looper, txnPoolNodeSet) viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 0)
def test_already_processed_requests(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Client re-sending request and checking that nodes picked the reply from ledger and did not process the request again """ def get_method_call_count(method): counts = set() for node in txnPoolNodeSet: c = node.spylog.count(method) counts.add(c) assert len(counts) == 1 return counts.pop() def get_getReplyFromLedgerForRequest_call_count(): return get_method_call_count( next(iter(txnPoolNodeSet)).getReplyFromLedgerForRequest) def get_recordAndPropagate_call_count(): return get_method_call_count( next(iter(txnPoolNodeSet)).recordAndPropagate) def get_last_returned_val(): rvs = [] for node in txnPoolNodeSet: rv = getAllReturnVals(node, node.getReplyFromLedgerForRequest) rvs.append(rv[0]) # All items are same in the list assert rvs.count(rvs[0]) == len(txnPoolNodeSet) return rvs[0] rlc1 = get_getReplyFromLedgerForRequest_call_count() rpc1 = get_recordAndPropagate_call_count() # Request which will be send twice reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) # Send, check and getting reply from first request sdk_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet)) request1 = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout) for req_res in request1: sdk_check_reply(req_res) first_req_id = request1[0][0]['reqId'] rlc2 = get_getReplyFromLedgerForRequest_call_count() rpc2 = get_recordAndPropagate_call_count() assert rlc2 - rlc1 == 1 # getReplyFromLedgerForRequest was called assert rpc2 - rpc1 == 1 # recordAndPropagate was called r1 = get_last_returned_val() assert r1 is None # getReplyFromLedgerForRequest returned None since had not seen request # Request which we will send only once request2 = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) second_req_id = request2[0][0]['reqId'] assert second_req_id != first_req_id rlc3 = get_getReplyFromLedgerForRequest_call_count() rpc3 = get_recordAndPropagate_call_count() assert rlc3 - rlc2 == 1 # getReplyFromLedgerForRequest was called again assert rpc3 - rpc2 == 1 # recordAndPropagate was called again r2 = get_last_returned_val() assert r2 is None # getReplyFromLedgerForRequest returned None since had not seen request # Reply for the first request, which is going to be sent again rep1 = request1[0][1]['result'] # Client re-sending first request request3 = sdk_send_signed_requests(sdk_pool_handle, reqs) total_timeout = sdk_eval_timeout(len(request3), len(txnPoolNodeSet)) request3 = sdk_get_replies(looper, request3, timeout=total_timeout) third_req_id = request3[0][0]['reqId'] assert third_req_id == first_req_id rlc4 = get_getReplyFromLedgerForRequest_call_count() rpc4 = get_recordAndPropagate_call_count() assert rlc4 - rlc3 == 1 # getReplyFromLedgerForRequest was called again assert rpc4 - rpc3 == 0 # recordAndPropagate was not called r3 = get_last_returned_val() # getReplyFromLedgerForRequest did not return None this time since had seen request assert r3 is not None rep3 = request3[0][1]['result'] # Since txnTime is not stored in ledger and reading from ledger return # all possible fields from transactions rep3 = {k: v for k, v in rep3.items() if v is not None} rep1 = {k: v for k, v in rep1.items() if k in rep3} assert rep3 == rep1 # The reply client got is same as the previous one
def test_sdk_steward_send_many(looper, sdk_pool_handle, sdk_wallet_steward): resp_task = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_steward, 30) repl = sdk_get_replies(looper, resp_task) for _, resp in repl: assert resp['result']
def test_unordered_state_reverted_before_catchup( tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ Check that unordered state is reverted before starting catchup: - save the initial state on a node - slow down processing of COMMITs - send requests - wait until other nodes come to consensus - call start of catch-up - check that the state of the slow node is reverted and equal to the initial one. """ # CONFIG ledger_id = DOMAIN_LEDGER_ID non_primary_node = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0].node non_primary_ledger = non_primary_node.getLedger(ledger_id) non_primary_state = non_primary_node.getState(ledger_id) # send reqs and make sure we are at the same state reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 10) sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) checkNodesHaveSameRoots(txnPoolNodeSet) # the state of the node before committed_ledger_before = non_primary_ledger.tree.root_hash uncommitted_ledger_before = non_primary_ledger.uncommittedRootHash committed_state_before = non_primary_state.committedHeadHash uncommitted_state_before = non_primary_state.headHash # EXECUTE # Delay commit requests on the node delay_c = 60 non_primary_node.nodeIbStasher.delay(cDelay(delay_c)) # send requests reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) sdk_get_replies(looper, reqs, timeout=40) committed_ledger_during_3pc = non_primary_node.getLedger( ledger_id).tree.root_hash uncommitted_ledger_during_3pc = non_primary_node.getLedger( ledger_id).uncommittedRootHash committed_state_during_3pc = non_primary_node.getState( ledger_id).committedHeadHash uncommitted_state_during_3pc = non_primary_node.getState( ledger_id).headHash # start catchup non_primary_node.ledgerManager.preCatchupClbk(ledger_id) committed_ledger_reverted = non_primary_ledger.tree.root_hash uncommitted_ledger_reverted = non_primary_ledger.uncommittedRootHash committed_state_reverted = non_primary_state.committedHeadHash uncommitted_state_reverted = non_primary_state.headHash # CHECK # check that initial uncommitted state differs from the state during 3PC # but committed does not assert committed_ledger_before == committed_ledger_during_3pc assert uncommitted_ledger_before != uncommitted_ledger_during_3pc assert committed_state_before == committed_state_during_3pc assert uncommitted_state_before != uncommitted_state_during_3pc assert committed_ledger_before == committed_ledger_reverted assert uncommitted_ledger_before == uncommitted_ledger_reverted assert committed_state_before == committed_state_reverted assert uncommitted_state_before == uncommitted_state_reverted
def test_quorum_after_f_plus_2_nodes_but_not_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): nodes = txnPoolNodeSet sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) stop_node(nodes[4], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:4], expectedViewNo=0) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) stop_node(nodes[3], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:3], expectedViewNo=0) sdk_reqs3 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs3) sdk_check_reply(req_res[0]) stop_node(nodes[2], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:2], expectedViewNo=0) sdk_reqs4 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs4) sdk_check_reply(req_res[0]) nodes[4] = start_stopped_node(nodes[4], looper, tconf, tdir, allPluginsPath) looper.runFor(waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:2] + nodes[4:], expectedViewNo=0) sdk_reqs5 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs5) sdk_check_reply(req_res[0]) nodes[3] = start_stopped_node(nodes[3], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes[:2] + nodes[3:], instances_list=range(getRequiredInstances(nodeCount))) checkViewNoForNodes(nodes[:2] + nodes[3:], expectedViewNo=0) sdk_reqs6 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) sdk_get_replies(looper, sdk_reqs6) nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes, instances_list=range(getRequiredInstances(nodeCount))) checkViewNoForNodes(nodes, expectedViewNo=0) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
def test_quorum_after_f_plus_2_nodes_but_not_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): nodes = txnPoolNodeSet sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) stop_node(nodes[4], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:4], expectedViewNo=0) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) stop_node(nodes[3], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:3], expectedViewNo=0) sdk_reqs3 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs3) sdk_check_reply(req_res[0]) stop_node(nodes[2], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:2], expectedViewNo=0) sdk_reqs4 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs4) sdk_check_reply(req_res[0]) nodes[4] = start_stopped_node(nodes[4], looper, tconf, tdir, allPluginsPath) looper.runFor(waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[:2] + nodes[4:], expectedViewNo=0) sdk_reqs5 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs5) sdk_check_reply(req_res[0]) nodes[3] = start_stopped_node(nodes[3], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes[:2] + nodes[3:], numInstances=getRequiredInstances(nodeCount)) checkViewNoForNodes(nodes[:2] + nodes[3:], expectedViewNo=0) sdk_reqs6 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) sdk_get_replies(looper, [sdk_reqs3[0], sdk_reqs4[0], sdk_reqs5[0], sdk_reqs6[0]]) nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes, numInstances=getRequiredInstances(nodeCount)) checkViewNoForNodes(nodes, expectedViewNo=0) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): timeout = sdk_eval_timeout(1, len(txnPoolNodeSet)) nodes = txnPoolNodeSet sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) stop_node(nodes[0], looper, nodes) waitForViewChange(looper, nodes[1:], expectedViewNo=1) ensureElectionsDone(looper, nodes[1:], instances_list=range(getRequiredInstances(nodeCount))) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) stop_node(nodes[1], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[2:], expectedViewNo=1) sdk_reqs3 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs3, timeout=timeout) sdk_check_reply(req_res[0]) stop_node(nodes[2], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[3:], expectedViewNo=1) sdk_reqs4 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs4, timeout=timeout) sdk_check_reply(req_res[0]) nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath) looper.runFor(waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[3:], expectedViewNo=1) sdk_reqs5 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): req_res = sdk_get_replies(looper, sdk_reqs5, timeout=timeout) sdk_check_reply(req_res[0]) nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes[1:], instances_list=range(getRequiredInstances(nodeCount)), customTimeout=60) checkViewNoForNodes(nodes[1:], expectedViewNo=1) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes, instances_list=range(getRequiredInstances(nodeCount)), customTimeout=60) checkViewNoForNodes(nodes, expectedViewNo=1) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
def test_unordered_state_reverted_before_catchup(tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ Check that unordered state is reverted before starting catchup: - save the initial state on a node - slow down processing of COMMITs - send requests - wait until other nodes come to consensus - call start of catch-up - check that the state of the slow node is reverted and equal to the initial one. """ # CONFIG ledger_id = DOMAIN_LEDGER_ID non_primary_node = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0].node non_primary_ledger = non_primary_node.getLedger(ledger_id) non_primary_state = non_primary_node.getState(ledger_id) # send reqs and make sure we are at the same state reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 10) sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) checkNodesHaveSameRoots(txnPoolNodeSet) # the state of the node before committed_ledger_before = non_primary_ledger.tree.root_hash uncommitted_ledger_before = non_primary_ledger.uncommittedRootHash committed_state_before = non_primary_state.committedHeadHash uncommitted_state_before = non_primary_state.headHash # EXECUTE # Delay commit requests on the node non_primary_node.nodeIbStasher.delay(cDelay()) # Delay Consistency proofs to not finish catchup non_primary_node.nodeIbStasher.delay(cpDelay()) # send requests reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) sdk_get_replies(looper, reqs, timeout=40) committed_ledger_during_3pc = non_primary_node.getLedger( ledger_id).tree.root_hash uncommitted_ledger_during_3pc = non_primary_node.getLedger( ledger_id).uncommittedRootHash committed_state_during_3pc = non_primary_node.getState( ledger_id).committedHeadHash uncommitted_state_during_3pc = non_primary_node.getState( ledger_id).headHash # start catchup non_primary_node.start_catchup() committed_ledger_reverted = non_primary_ledger.tree.root_hash uncommitted_ledger_reverted = non_primary_ledger.uncommittedRootHash committed_state_reverted = non_primary_state.committedHeadHash uncommitted_state_reverted = non_primary_state.headHash # CHECK # check that initial uncommitted state differs from the state during 3PC # but committed does not assert committed_ledger_before == committed_ledger_during_3pc assert uncommitted_ledger_before != uncommitted_ledger_during_3pc assert committed_state_before == committed_state_during_3pc assert uncommitted_state_before != uncommitted_state_during_3pc assert committed_ledger_before == committed_ledger_reverted assert uncommitted_ledger_before == uncommitted_ledger_reverted assert committed_state_before == committed_state_reverted assert uncommitted_state_before == uncommitted_state_reverted
def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and sequence number "n" """ numOfNodes = 4 fValue = getMaxFailures(numOfNodes) primaryRepl = getPrimaryReplica(txnPoolNodeSet, 1) logger.debug("Primary Replica: {}".format(primaryRepl)) nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, 1) logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas)) # Delay COMMITs so request is not ordered and checks can be made c_delay = 10 for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(delay=c_delay, instId=1)) req1 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0] request1 = sdk_json_to_request_object(req1[0]) for npr in nonPrimaryReplicas: looper.run(eventually(checkPrepareReqSent, npr, request1.key, primaryRepl.viewNo, retryWait=1)) prePrepareReq = primaryRepl.sentPrePrepares[primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo] looper.run(eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1)) # logger.debug("Patching the primary replica's pre-prepare sending method ") # orig_method = primaryRepl.sendPrePrepare # def patched(self, ppReq): # self.sentPrePrepares[ppReq.viewNo, ppReq.ppSeqNo] = ppReq # ppReq = updateNamedTuple(ppReq, **{f.PP_SEQ_NO.nm: 1}) # self.send(ppReq, TPCStat.PrePrepareSent) # # primaryRepl.sendPrePrepare = types.MethodType(patched, primaryRepl) logger.debug( "Decrementing the primary replica's pre-prepare sequence number by " "one...") primaryRepl._lastPrePrepareSeqNo -= 1 view_no = primaryRepl.viewNo request2 = sdk_json_to_request_object( sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0][0]) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqSent, primaryRepl, request2, retryWait=1, timeout=timeout)) # Since the node is malicious, it will not be able to process requests due # to conflicts in PRE-PREPARE primaryRepl.node.stop() looper.removeProdable(primaryRepl.node) reqIdr = [request2.digest] prePrepareReq = PrePrepare( primaryRepl.instId, view_no, primaryRepl.lastPrePrepareSeqNo, get_utc_epoch(), reqIdr, init_discarded(), primaryRepl.batchDigest([request2]), DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID), 0, True ) logger.debug("""Checking whether all the non primary replicas have received the pre-prepare request with same sequence number""") timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1, timeout=timeout)) logger.debug("""Check that none of the non primary replicas didn't send any prepare message " in response to the pre-prepare message""") timeout = waits.expectedPrepareTime(len(txnPoolNodeSet)) looper.runFor(timeout) # expect prepare processing timeout # check if prepares have not been sent for npr in nonPrimaryReplicas: with pytest.raises(AssertionError): looper.run(eventually(checkPrepareReqSent, npr, request2.key, view_no, retryWait=1, timeout=timeout)) timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) + c_delay result1 = sdk_get_replies(looper, [req1])[0][1] logger.debug("request {} gives result {}".format(request1, result1))
def test_already_processed_requests(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Client re-sending request and checking that nodes picked the reply from ledger and did not process the request again """ def get_method_call_count(method): counts = set() for node in txnPoolNodeSet: c = node.spylog.count(method) counts.add(c) assert len(counts) == 1 return counts.pop() def get_getReplyFromLedger_call_count(): return get_method_call_count( next(iter(txnPoolNodeSet)).getReplyFromLedger) def get_recordAndPropagate_call_count(): return get_method_call_count( next(iter(txnPoolNodeSet)).recordAndPropagate) def get_last_returned_val(): rvs = [] for node in txnPoolNodeSet: rv = getAllReturnVals(node, node.getReplyFromLedger) rvs.append(rv[0]) # All items are same in the list assert rvs.count(rvs[0]) == len(txnPoolNodeSet) return rvs[0] rlc1 = get_getReplyFromLedger_call_count() rpc1 = get_recordAndPropagate_call_count() # Request which will be send twice reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) # Send, check and getting reply from first request sdk_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet)) request1 = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout) for req_res in request1: sdk_check_reply(req_res) first_req_id = request1[0][0]['reqId'] rlc2 = get_getReplyFromLedger_call_count() rpc2 = get_recordAndPropagate_call_count() assert rlc2 - rlc1 == 1 # getReplyFromLedger was called assert rpc2 - rpc1 == 1 # recordAndPropagate was called r1 = get_last_returned_val() assert r1 is None # getReplyFromLedger returned None since had not seen request # Request which we will send only once request2 = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) second_req_id = request2[0][0]['reqId'] assert second_req_id != first_req_id rlc3 = get_getReplyFromLedger_call_count() rpc3 = get_recordAndPropagate_call_count() assert rlc3 - rlc2 == 1 # getReplyFromLedger was called again assert rpc3 - rpc2 == 1 # recordAndPropagate was called again r2 = get_last_returned_val() assert r2 is None # getReplyFromLedger returned None since had not seen request # Reply for the first request, which is going to be sent again rep1 = request1[0][1]['result'] # Client re-sending first request request3 = sdk_send_signed_requests(sdk_pool_handle, reqs) total_timeout = sdk_eval_timeout(len(request3), len(txnPoolNodeSet)) request3 = sdk_get_replies(looper, request3, timeout=total_timeout) third_req_id = request3[0][0]['reqId'] assert third_req_id == first_req_id rlc4 = get_getReplyFromLedger_call_count() rpc4 = get_recordAndPropagate_call_count() assert rlc4 - rlc3 == 1 # getReplyFromLedger was called again assert rpc4 - rpc3 == 0 # recordAndPropagate was not called r3 = get_last_returned_val() # getReplyFromLedger did not return None this time since had seen request assert r3 is not None rep3 = request3[0][1]['result'] # Since txnTime is not stored in ledger and reading from ledger return # all possible fields from transactions rep3 = {k: v for k, v in rep3.items() if v is not None} rep1 = {k: v for k, v in rep1.items() if k in rep3} assert rep3 == rep1 # The reply client got is same as the previous one
def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and sequence number "n" """ numOfNodes = 4 fValue = getMaxFailures(numOfNodes) primaryRepl = getPrimaryReplica(txnPoolNodeSet, 1) logger.debug("Primary Replica: {}".format(primaryRepl)) nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, 1) logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas)) # Delay COMMITs so request is not ordered and checks can be made c_delay = 10 for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(delay=c_delay, instId=1)) req1 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0] request1 = sdk_json_to_request_object(req1[0]) for npr in nonPrimaryReplicas: looper.run( eventually(checkPrepareReqSent, npr, request1.key, primaryRepl.viewNo, retryWait=1)) prePrepareReq = primaryRepl._ordering_service.sent_preprepares[ primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo] looper.run( eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1)) # logger.debug("Patching the primary replica's pre-prepare sending method ") # orig_method = primaryRepl.sendPrePrepare # def patched(self, ppReq): # self._ordering_service.sent_preprepares[ppReq.viewNo, ppReq.ppSeqNo] = ppReq # ppReq = updateNamedTuple(ppReq, **{f.PP_SEQ_NO.nm: 1}) # self.send(ppReq, TPCStat.PrePrepareSent) # # primaryRepl.sendPrePrepare = types.MethodType(patched, primaryRepl) logger.debug( "Decrementing the primary replica's pre-prepare sequence number by " "one...") primaryRepl._ordering_service._lastPrePrepareSeqNo -= 1 view_no = primaryRepl.viewNo request2 = sdk_json_to_request_object( sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0][0]) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run( eventually(checkPrePrepareReqSent, primaryRepl, request2, retryWait=1, timeout=timeout)) # Since the node is malicious, it will not be able to process requests due # to conflicts in PRE-PREPARE primaryRepl.node.stop() looper.removeProdable(primaryRepl.node) reqIdr = [request2.digest] prePrepareReq = PrePrepare( primaryRepl.instId, view_no, primaryRepl.lastPrePrepareSeqNo, get_utc_epoch(), reqIdr, init_discarded(), primaryRepl.batchDigest([request2]), DOMAIN_LEDGER_ID, primaryRepl._ordering_service.get_state_root_hash(DOMAIN_LEDGER_ID), primaryRepl._ordering_service.get_txn_root_hash(DOMAIN_LEDGER_ID), 0, True, []) logger.debug("""Checking whether all the non primary replicas have received the pre-prepare request with same sequence number""") timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run( eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1, timeout=timeout)) logger.debug("""Check that none of the non primary replicas didn't send any prepare message " in response to the pre-prepare message""") timeout = waits.expectedPrepareTime(len(txnPoolNodeSet)) looper.runFor(timeout) # expect prepare processing timeout # check if prepares have not been sent for npr in nonPrimaryReplicas: with pytest.raises(AssertionError): looper.run( eventually(checkPrepareReqSent, npr, request2.key, view_no, retryWait=1, timeout=timeout)) timeout = waits.expectedTransactionExecutionTime( len(txnPoolNodeSet)) + c_delay result1 = sdk_get_replies(looper, [req1])[0][1] logger.debug("request {} gives result {}".format(request1, result1))
def test_seller_xfer_double_spend_attempt(looper, sdk_pool_handle, # noqa nodeSetWithIntegratedTokenPlugin, public_minting, sdk_wallet_client, seller_address, seller_token_wallet, user1_address, user2_address): """ An address tries to send to send to transactions using the same UTXO, one of the txn gets rejected even though the amount held by the UTXO is greater than the sum of outputs in both txns, since one UTXO can be used only once """ # ============= # Declaration of helper functions. # ============= def succeeded(req_resp): try: sdk_check_reply(req_resp) return True except Exception: return False def check_output_val(address, amount): return check_output_val_on_all_nodes( nodeSetWithIntegratedTokenPlugin, address, amount ) def check_no_output_val(address, amount): with pytest.raises(AssertionError): check_output_val(address, amount) def get_seq_no_first_utxo(address): get_utxo_resp = send_get_utxo( looper, address, sdk_wallet_client, sdk_pool_handle ) return get_utxo_resp[OUTPUTS][0]["seqNo"] # ============= # Form the two xfer requests. Each request will try to spend the same UTXO. # ============= user1_gets = 3 user2_gets = 5 seq_no = get_seq_no_first_utxo(seller_address) inputs = [[seller_token_wallet, seller_address, seq_no]] outputs1 = [ {"address": user1_address, "amount": user1_gets}, {"address": seller_address, "amount": seller_gets - user1_gets} ] outputs2 = [ {"address": user2_address, "amount": user2_gets}, {"address": seller_address, "amount": seller_gets - user2_gets} ] r1 = xfer_request(inputs, outputs1) r2 = xfer_request(inputs, outputs2) requests = [json.dumps(r.as_dict) for r in [r1, r2]] # ============= # Send the two requests and wait for replies. Only one request # should succeed. # ============= req_resp = sdk_send_signed_requests(sdk_pool_handle, requests) req_resp = sdk_get_replies(looper, req_resp) success1 = succeeded(req_resp[0]) success2 = succeeded(req_resp[1]) assert lxor(success1, success2) # ============= # Check that the seller, user1, and user2, have the output or not. # ============= if success1: check_output_val(seller_address, seller_gets - user1_gets) check_output_val(user1_address, user1_gets) check_no_output_val(user2_address, 0) else: check_output_val(seller_address, seller_gets - user2_gets) check_output_val(user2_address, user2_gets) check_no_output_val(user1_address, 0)