def test_all_replicas_hold_request_keys( perf_chk_patched, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ All replicas whether primary or non primary hold request keys of forwarded requests. Once requests are ordered, they request keys are removed from replica. """ tconf = perf_chk_patched delay_3pc = 2 delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc) delay_3pc_messages(txnPoolNodeSet, 1, delay_3pc) def chk(count): # All replicas have same amount of forwarded request keys and all keys # are finalised. for node in txnPoolNodeSet: for r in node.replicas.values(): if r.isPrimary is False: assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == count for i in range(count): k = r.requestQueues[DOMAIN_LEDGER_ID][i] assert r.requests[k].finalised elif r.isPrimary is True: assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == 0 reqs = sdk_signed_random_requests(looper, sdk_wallet_client, tconf.Max3PCBatchSize - 1) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) # Only non primary replicas should have all request keys with them looper.run(eventually(chk, tconf.Max3PCBatchSize - 1)) sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout( tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet), add_delay_to_timeout=delay_3pc)) # Replicas should have no request keys with them since they are ordered looper.run(eventually(chk, 0)) # Need to wait since one node might not # have processed it. delay = 1 for node in txnPoolNodeSet: node.nodeIbStasher.delay(nom_delay(delay)) ensure_view_change(looper, txnPoolNodeSet) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 2 * tconf.Max3PCBatchSize) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) looper.run(eventually(chk, 2 * tconf.Max3PCBatchSize)) # Since each nomination is delayed and there will be multiple nominations # so adding some extra time timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \ len(txnPoolNodeSet) * delay ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout) sdk_get_replies(looper, req_resps, timeout=timeout) looper.run(eventually(chk, 0))
def test_all_replicas_hold_request_keys(perf_chk_patched, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ All replicas whether primary or non primary hold request keys of forwarded requests. Once requests are ordered, they request keys are removed from replica. """ tconf = perf_chk_patched delay_3pc = 2 delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc) delay_3pc_messages(txnPoolNodeSet, 1, delay_3pc) def chk(count): # All replicas have same amount of forwarded request keys and all keys # are finalised. for node in txnPoolNodeSet: for r in node.replicas.values(): if r.isPrimary is False: assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == count for i in range(count): k = r.requestQueues[DOMAIN_LEDGER_ID][i] assert r.requests[k].finalised elif r.isPrimary is True: assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == 0 reqs = sdk_signed_random_requests(looper, sdk_wallet_client, tconf.Max3PCBatchSize - 1) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) # Only non primary replicas should have all request keys with them looper.run(eventually(chk, tconf.Max3PCBatchSize - 1)) sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout(tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet), add_delay_to_timeout=delay_3pc)) # Replicas should have no request keys with them since they are ordered looper.run(eventually(chk, 0)) # Need to wait since one node might not # have processed it. delay = 1 for node in txnPoolNodeSet: node.nodeIbStasher.delay(nom_delay(delay)) ensure_view_change(looper, txnPoolNodeSet) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 2 * tconf.Max3PCBatchSize) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) looper.run(eventually(chk, 2 * tconf.Max3PCBatchSize)) # Since each nomination is delayed and there will be multiple nominations # so adding some extra time timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \ len(txnPoolNodeSet) * delay ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout) sdk_get_replies(looper, req_resps, timeout=timeout) looper.run(eventually(chk, 0))
def test_belated_request_not_processed_if_already_in_3pc_process( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(300)) one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_signed_requests(sdk_pool_handle, one_req) looper.runFor( waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) delta.clientIbStasher.reset_delays_and_process_delayeds() looper.runFor( waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: node.nodeIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedOrderingTime(delta.replicas.num_replicas)) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def _add_txns_to_ledger(node, looper, sdk_wallet_client, num_txns_in_reply, reply_count): ''' Add txn_count transactions to node's ledger and return ConsistencyProof for all new transactions and list of CatchupReplies :return: ConsistencyProof, list of CatchupReplies ''' txn_count = num_txns_in_reply * reply_count ledger_manager = node.ledgerManager ledger = ledger_manager.ledgerRegistry[ledger_id].ledger catchup_rep_service = ledger_manager._node_leecher._leechers[ledger_id]._catchup_rep_service reqs = sdk_signed_random_requests(looper, sdk_wallet_client, txn_count) # add transactions to ledger for req in reqs: txn = append_txn_metadata(reqToTxn(req), txn_time=12345678) catchup_rep_service._add_txn(txn) # generate CatchupReps replies = [] for i in range(ledger.seqNo - txn_count + 1, ledger.seqNo + 1, num_txns_in_reply): start = i end = i + num_txns_in_reply - 1 cons_proof = ledger_manager._node_seeder._make_consistency_proof(ledger, end, ledger.size) txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[str(seq_no)] = ledger_manager.owner.update_txn_with_extra_data(txn) replies.append(CatchupRep(ledger_id, SortedDict(txns), cons_proof)) three_pc_key = node.three_phase_key_for_txn_seq_no(ledger_id, ledger.seqNo) view_no, pp_seq_no = three_pc_key if three_pc_key else (0, 0) return CatchupTill(start_size=ledger.seqNo - txn_count, final_size=ledger.seqNo, final_hash=Ledger.hashToStr(ledger.tree.merkle_tree_hash(0, ledger.seqNo)), view_no=view_no, pp_seq_no=pp_seq_no), replies
def _add_txns_to_ledger(node, looper, sdk_wallet_client, num_txns_in_reply, reply_count): ''' Add txn_count transactions to node's ledger and return ConsistencyProof for all new transactions and list of CatchupReplies :return: ConsistencyProof, list of CatchupReplies ''' txn_count = num_txns_in_reply * reply_count ledger_manager = node.ledgerManager ledger = ledger_manager.ledgerRegistry[ledger_id].ledger ledger_info = ledger_manager.getLedgerInfoByType(ledger_id) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, txn_count) # add transactions to ledger for req in reqs: txn = append_txn_metadata(reqToTxn(req), txn_time=12345678) ledger_manager._add_txn(ledger_id, ledger, ledger_info, txn) # generate CatchupReps replies = [] for i in range(ledger.seqNo - txn_count + 1, ledger.seqNo + 1, num_txns_in_reply): start = i end = i + num_txns_in_reply - 1 cons_proof = ledger_manager._make_consistency_proof( ledger, end, ledger.size) txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[str( seq_no)] = ledger_manager.owner.update_txn_with_extra_data(txn) replies.append(CatchupRep(ledger_id, SortedDict(txns), cons_proof)) return ledger_manager._buildConsistencyProof(ledger_id, ledger.seqNo - txn_count, ledger.seqNo), replies
def test_hook_pre_send_reply(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): def hook_add_field(*args, **kwargs): kwargs['committed_txns'][0][foo] = foo register_hook(txnPoolNodeSet, NodeHooks.PRE_SEND_REPLY, hook_add_field) # Reply on request signed_reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) reqs = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) reply = sdk_get_and_check_replies(looper, reqs)[0] assert foo in reply[1][f.RESULT.nm] assert reply[1][f.RESULT.nm][foo] == foo # Reply on repeated request reqs = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) reply = sdk_get_and_check_replies(looper, reqs)[0] assert foo in reply[1][f.RESULT.nm] assert reply[1][f.RESULT.nm][foo] == foo # Reply on get_txn request _, did = sdk_wallet_client request = sdk_build_get_txn_request( looper, did, reply[1][f.RESULT.nm][TXN_METADATA][f.SEQ_NO.nm]) request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, request) reply = sdk_get_and_check_replies(looper, [request_couple])[0] assert foo in reply[1][f.RESULT.nm]['data'] assert reply[1][f.RESULT.nm]['data'][foo] == foo
def test_hook_pre_send_reply(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): def hook_add_field(*args, **kwargs): kwargs['committed_txns'][0][foo] = foo register_hook(txnPoolNodeSet, NodeHooks.PRE_SEND_REPLY, hook_add_field) # Reply on request signed_reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) reqs = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) reply = sdk_get_and_check_replies(looper, reqs)[0] assert foo in reply[1][f.RESULT.nm] assert reply[1][f.RESULT.nm][foo] == foo # Reply on repeated request reqs = sdk_send_signed_requests(sdk_pool_handle, signed_reqs) reply = sdk_get_and_check_replies(looper, reqs)[0] assert foo in reply[1][f.RESULT.nm] assert reply[1][f.RESULT.nm][foo] == foo # Reply on get_txn request _, did = sdk_wallet_client request = sdk_build_get_txn_request( looper, did, reply[1][f.RESULT.nm][TXN_METADATA][f.SEQ_NO.nm]) request_couple = sdk_sign_and_send_prepared_request( looper, sdk_wallet_client, sdk_pool_handle, request) reply = sdk_get_and_check_replies(looper, [request_couple])[0] assert foo in reply[1][f.RESULT.nm]['data'] assert reply[1][f.RESULT.nm]['data'][foo] == foo
def _add_txns_to_ledger(node, looper, sdk_wallet_client, num_txns_in_reply, reply_count): ''' Add txn_count transactions to node's ledger and return ConsistencyProof for all new transactions and list of CatchupReplies :return: ConsistencyProof, list of CatchupReplies ''' txn_count = num_txns_in_reply * reply_count ledger_manager = node.ledgerManager ledger = ledger_manager.ledgerRegistry[ledger_id].ledger ledger_info = ledger_manager.getLedgerInfoByType(ledger_id) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, txn_count) # add transactions to ledger for req in reqs: txn = append_txn_metadata(reqToTxn(req), txn_time=12345678) ledger_manager._add_txn( ledger_id, ledger, ledger_info, txn) # generate CatchupReps replies = [] for i in range(ledger.seqNo - txn_count + 1, ledger.seqNo + 1, num_txns_in_reply): start = i end = i + num_txns_in_reply - 1 cons_proof = ledger_manager._make_consistency_proof(ledger, end, ledger.size) txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[str(seq_no)] = ledger_manager.owner.update_txn_with_extra_data(txn) replies.append(CatchupRep(ledger_id, SortedDict(txns), cons_proof)) return ledger_manager._buildConsistencyProof(ledger_id, ledger.seqNo - txn_count, ledger.seqNo), replies
def test_6_nodes_pool_cannot_reach_quorum_with_2_disconnected( txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): ''' Check that we can not reach consensus when more than n-f nodes are disconnected: disconnect 2 of 6 nodes ''' faulties = nodes_by_rank(txnPoolNodeSet)[-faultyNodes:] current_node_set = set(txnPoolNodeSet) for node in faulties: for r in node.replicas.values(): assert not r.isPrimary disconnect_node_and_ensure_disconnected( looper, current_node_set, node, stopNode=False) current_node_set.remove(node) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) check_request_is_not_returned_to_nodes( txnPoolNodeSet, sdk_json_to_request_object(json.loads(reqs[0]))) # The following reconnection of nodes is needed in this test to avoid # pytest process hangup for node in faulties: current_node_set.add(node) reconnect_node_and_ensure_connected(looper, current_node_set, node)
def test_6_nodes_pool_cannot_reach_quorum_with_2_disconnected( txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): ''' Check that we can not reach consensus when more than n-f nodes are disconnected: disconnect 2 of 6 nodes ''' faulties = nodes_by_rank(txnPoolNodeSet)[-faultyNodes:] current_node_set = set(txnPoolNodeSet) for node in faulties: for r in node.replicas: assert not r.isPrimary disconnect_node_and_ensure_disconnected( looper, current_node_set, node, stopNode=False) current_node_set.remove(node) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) with pytest.raises(TimeoutError): sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) check_request_is_not_returned_to_nodes( txnPoolNodeSet, sdk_json_to_request_object(json.loads(reqs[0]))) # The following reconnection of nodes is needed in this test to avoid # pytest process hangup for node in faulties: current_node_set.add(node) reconnect_node_and_ensure_connected(looper, current_node_set, node)
def test_belated_request_not_processed_if_already_in_3pc_process( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(300)) one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_signed_requests(sdk_pool_handle, one_req) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) delta.clientIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: node.nodeIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedOrderingTime(delta.replicas.num_replicas)) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def test_process_invalid_catchup_reply(txnPoolNodeSet, looper, sdk_wallet_client): ''' Test correct work of method processCatchupRep and that sending replies in reverse order will call a few iterations of cycle in _processCatchupRep ''' ledger_manager = txnPoolNodeSet[0].ledgerManager catchup_rep_service = ledger_manager._leechers[ ledger_id].catchup_rep_service ledger = ledger_manager.ledgerRegistry[ledger_id].ledger ledger_size = ledger.size num_txns_in_reply = 3 reply_count = 2 cons_proof, catchup_reps = _add_txns_to_ledger(txnPoolNodeSet[1], looper, sdk_wallet_client, num_txns_in_reply, reply_count) catchup_rep_service._catchup_till = cons_proof catchup_rep_service._is_working = True # make invalid catchup reply by dint of adding new transaction in it reply2 = catchup_reps[1] txns = OrderedDict(getattr(reply2, f.TXNS.nm)) req = sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0] txns[str(ledger_size + 4)] = append_txn_metadata(reqToTxn(req), txn_time=12345678) invalid_reply2 = CatchupRep(ledger_id, txns, getattr(reply2, f.CONS_PROOF.nm)) # process 2nd interval with invalid catchup reply ledger_manager.processCatchupRep(invalid_reply2, sdk_wallet_client[1]) # check that invalid transaction was not added to ledger, but add to ledger_info.receivedCatchUpReplies check_reply_not_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], invalid_reply2) # process valid reply from 1st interval reply1 = catchup_reps[0] ledger_manager.processCatchupRep(reply1, sdk_wallet_client[1]) # check that only valid reply added to ledger ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], [reply1]) # check that invalid reply was removed from ledger_info.receivedCatchUpReplies received_replies = { str(seq_no) for seq_no, _ in catchup_rep_service._received_catchup_txns } assert not set(reply2.txns.keys()).issubset(received_replies) assert not catchup_rep_service._received_catchup_replies_from[ sdk_wallet_client[1]] # check that valid reply for 2nd interval was added to ledger reply2 = catchup_reps[1] ledger_manager.processCatchupRep(reply2, sdk_wallet_client[1]) ledger_size = check_replies_applied(ledger_size, ledger, catchup_rep_service, sdk_wallet_client[1], [reply2]) assert not catchup_rep_service._received_catchup_replies_from assert not catchup_rep_service._received_catchup_txns
def test_6_nodes_pool_cannot_reach_quorum_with_2_faulty( afterElection, looper, txnPoolNodeSet, prepared1, sdk_wallet_client, sdk_pool_handle): reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) with pytest.raises(TimeoutError): sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) check_request_is_not_returned_to_nodes( txnPoolNodeSet, sdk_json_to_request_object(json.loads(reqs[0])))
def test_6_nodes_pool_cannot_reach_quorum_with_2_faulty(afterElection, looper, txnPoolNodeSet, prepared1, sdk_wallet_client, sdk_pool_handle): reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) with pytest.raises(PoolLedgerTimeoutException): sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) check_request_is_not_returned_to_nodes( txnPoolNodeSet, sdk_json_to_request_object(json.loads(reqs[0])))
def test_catchup_during_3pc_continue_sending(tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): reqs = sdk_signed_random_requests(looper, sdk_wallet_client, tconf.Max3PCBatchSize + 2) non_primary_replica = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0] # Simulate catch-up (add txns to ledger): # add txns corresponding to the requests after we got enough COMMITs to # order, but before ordering. add_txns_to_ledger_before_order( non_primary_replica, [json.loads(req) for req in reqs[:tconf.Max3PCBatchSize]]) sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) checkNodesHaveSameRoots(txnPoolNodeSet) # send another requests and check that they are received reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 2 * tconf.Max3PCBatchSize - 2) sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) checkNodesHaveSameRoots(txnPoolNodeSet)
def test_process_invalid_catchup_reply(txnPoolNodeSet, looper, sdk_wallet_client): ''' Test correct work of method processCatchupRep and that sending replies in reverse order will call a few iterations of cycle in _processCatchupRep ''' ledger_manager = txnPoolNodeSet[0].ledgerManager ledger_info = ledger_manager.getLedgerInfoByType(ledger_id) ledger = ledger_manager.ledgerRegistry[ledger_id].ledger ledger_size = ledger.size num_txns_in_reply = 3 reply_count = 2 cons_proof, catchup_reps = _add_txns_to_ledger(txnPoolNodeSet[1], looper, sdk_wallet_client, num_txns_in_reply, reply_count) ledger_info.catchUpTill = cons_proof ledger_info.state = LedgerState.syncing # make invalid catchup reply by dint of adding new transaction in it reply2 = catchup_reps[1] txns = OrderedDict(getattr(reply2, f.TXNS.nm)) req = sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0] txns[str(ledger_size + 4)] = append_txn_metadata(reqToTxn(req), txn_time=12345678) invalid_reply2 = CatchupRep(ledger_id, txns, getattr(reply2, f.CONS_PROOF.nm)) # process 2nd interval with invalid catchup reply ledger_manager.processCatchupRep(invalid_reply2, sdk_wallet_client[1]) # check that invalid transaction was not added to ledger, but add to ledger_info.receivedCatchUpReplies check_reply_not_applied(ledger_size, ledger, ledger_info, sdk_wallet_client[1], invalid_reply2) # process valid reply from 1st interval reply1 = catchup_reps[0] ledger_manager.processCatchupRep(reply1, sdk_wallet_client[1]) # check that only valid reply added to ledger ledger_size = check_replies_applied(ledger_size, ledger, ledger_info, sdk_wallet_client[1], [reply1]) # check that invalid reply was removed from ledger_info.receivedCatchUpReplies received_replies = {str(seq_no) for seq_no, _ in ledger_info.receivedCatchUpReplies} assert not set(getattr(reply2, f.TXNS.nm).keys()).issubset(received_replies) assert not ledger_info.recvdCatchupRepliesFrm[sdk_wallet_client[1]] # check that valid reply for 2nd interval was added to ledger reply2 = catchup_reps[1] ledger_manager.processCatchupRep(reply2, sdk_wallet_client[1]) ledger_size = check_replies_applied(ledger_size, ledger, ledger_info, sdk_wallet_client[1], [reply2]) assert not ledger_info.receivedCatchUpReplies assert not ledger_info.recvdCatchupRepliesFrm
def test_old_txn_metadata_digest_fallback(looper, sdk_wallet_client): # Create signed request and convert to legacy txn req_str = sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0] req = deserialize_req(req_str) txn = req_to_legacy_txn(req_str) # Check that digests still can be extracted correctly assert get_payload_digest(txn) == req.payload_digest assert get_digest(txn) == None
def test_make_proof_bls_disabled(looper, txnPoolNodeSet, sdk_wallet_client): req = json.loads( sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0]) for node in txnPoolNodeSet: req_handler = node.read_manager.request_handlers[GET_BUY] key = BuyHandler.prepare_buy_key(req['identifier'], req['reqId']) _, _, _, proof = req_handler.lookup(key, with_proof=True) assert not proof
def test_make_proof_bls_disabled(looper, txnPoolNodeSet, sdk_wallet_client): req = json.loads( sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0]) for node in txnPoolNodeSet: req_handler = node.get_req_handler(DOMAIN_LEDGER_ID) key = req_handler.prepare_buy_key(req['identifier'], req['reqId']) _, proof = req_handler.get_value_from_state(key, with_proof=True) assert not proof
def test_make_result_bls_disabled(looper, txnPoolNodeSet, sdk_wallet_client): req = json.loads( sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0]) for node in txnPoolNodeSet: req_handler = node.get_req_handler(DOMAIN_LEDGER_ID) key = req_handler.prepare_buy_key(req['identifier'], req['reqId']) _, proof = req_handler.get_value_from_state(key, with_proof=True) result = req_handler.make_result(sdk_json_to_request_object(req), {TXN_TYPE: "buy"}, 2, get_utc_epoch(), proof) assert STATE_PROOF not in result
def test_6_nodes_pool_cannot_reach_quorum_with_2_disconnected( txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client): ''' Check that we can not reach consensus when more than n-f nodes are disconnected: disconnect 2 of 6 nodes ''' stop_nodes(looper, txnPoolNodeSet) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) with pytest.raises(AssertionError): sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) check_request_is_not_returned_to_nodes( txnPoolNodeSet, sdk_json_to_request_object(json.loads(reqs[0])))
def test_make_result_bls_disabled(looper, txnPoolNodeSet, sdk_wallet_client): req = json.loads( sdk_signed_random_requests(looper, sdk_wallet_client, 1)[0]) for node in txnPoolNodeSet: req_handler = node.read_manager.request_handlers[GET_BUY] key = BuyHandler.prepare_buy_key(req['identifier'], req['reqId']) _, _, _, proof = req_handler.lookup(key, with_proof=True) result = req_handler.make_result(sdk_json_to_request_object(req), {TXN_TYPE: "buy"}, 2, get_utc_epoch(), proof) assert STATE_PROOF not in result
def test_repeated_request_not_processed_if_already_ordered( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) sdk_send_signed_requests(sdk_pool_handle, one_req) looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def test_belated_propagate_not_processed_if_already_ordered( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) delta.nodeIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def test_belated_request_not_processed_after_view_change( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) delta.clientIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def test_all_replicas_hold_request_keys(perf_chk_patched, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ All replicas whether primary or non primary hold request keys of forwarded requests. Once requests are ordered, they request keys are removed from replica. """ tconf = perf_chk_patched delay_3pc = 2 delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc) delay_3pc_messages(txnPoolNodeSet, 1, delay_3pc) def chk(count): # All replicas have same amount of forwarded request keys and all keys # are finalised. for node in txnPoolNodeSet: for r in node.replicas.values(): if r.isPrimary is False: assert len(r._ordering_service. requestQueues[DOMAIN_LEDGER_ID]) == count for i in range(count): k = r._ordering_service.requestQueues[ DOMAIN_LEDGER_ID][i] assert r.requests[k].finalised elif r.isPrimary is True: assert len(r._ordering_service. requestQueues[DOMAIN_LEDGER_ID]) == 0 reqs = sdk_signed_random_requests(looper, sdk_wallet_client, tconf.Max3PCBatchSize - 1) req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs) # Only non primary replicas should have all request keys with them looper.run(eventually(chk, tconf.Max3PCBatchSize - 1)) sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout(tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet), add_delay_to_timeout=delay_3pc)) # Replicas should have no request keys with them since they are ordered looper.run(eventually(chk, 0)) # Need to wait since one node might not
def testSendRequestWithoutSignatureFails(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ A client request sent without a signature fails with an EmptySignature exception """ # remove the client's ability to sign requests = sdk_signed_random_requests(looper, sdk_wallet_client, 1) json_req = json.loads(requests[0]) json_req['signature'] = None request = json.dumps(json_req) res = sdk_send_signed_requests(sdk_pool_handle, [request]) obj_req = sdk_json_to_request_object(res[0][0]) timeout = waits.expectedClientRequestPropagationTime(nodeCount) with pytest.raises(AssertionError): for node in txnPoolNodeSet: looper.loop.run_until_complete( eventually(checkLastClientReqForNode, node, obj_req, retryWait=1, timeout=timeout)) for n in txnPoolNodeSet: params = n.spylog.getLastParams(Node.handleInvalidClientMsg) ex = params['ex'] msg, _ = params['wrappedMsg'] assert isinstance(ex, MissingSignature) assert msg.get(f.IDENTIFIER.nm) == obj_req.identifier params = n.spylog.getLastParams(Node.discard) reason = params["reason"] (msg, frm) = params["msg"] assert msg == json_req assert msg.get(f.IDENTIFIER.nm) == obj_req.identifier assert "MissingSignature" in reason
def testSendRequestWithoutSignatureFails(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ A client request sent without a signature fails with an EmptySignature exception """ # remove the client's ability to sign requests = sdk_signed_random_requests(looper, sdk_wallet_client, 1) json_req = json.loads(requests[0]) json_req['signature'] = None request = json.dumps(json_req) res = sdk_send_signed_requests(sdk_pool_handle, [request]) obj_req = sdk_json_to_request_object(res[0][0]) timeout = waits.expectedClientRequestPropagationTime(nodeCount) with pytest.raises(AssertionError): for node in txnPoolNodeSet: looper.loop.run_until_complete(eventually( checkLastClientReqForNode, node, obj_req, retryWait=1, timeout=timeout)) for n in txnPoolNodeSet: params = n.spylog.getLastParams(Node.handleInvalidClientMsg) ex = params['ex'] msg, _ = params['wrappedMsg'] assert isinstance(ex, MissingSignature) assert msg.get(f.IDENTIFIER.nm) == obj_req.identifier params = n.spylog.getLastParams(Node.discard) reason = params["reason"] (msg, frm) = params["msg"] assert msg == json_req assert msg.get(f.IDENTIFIER.nm) == obj_req.identifier assert "MissingSignature" in reason
def test_already_processed_requests(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Client re-sending request and checking that nodes picked the reply from ledger and did not process the request again """ def get_method_call_count(method): counts = set() for node in txnPoolNodeSet: c = node.spylog.count(method) counts.add(c) assert len(counts) == 1 return counts.pop() def get_getReplyFromLedgerForRequest_call_count(): return get_method_call_count( next(iter(txnPoolNodeSet)).getReplyFromLedgerForRequest) def get_recordAndPropagate_call_count(): return get_method_call_count( next(iter(txnPoolNodeSet)).recordAndPropagate) def get_last_returned_val(): rvs = [] for node in txnPoolNodeSet: rv = getAllReturnVals(node, node.getReplyFromLedgerForRequest) rvs.append(rv[0]) # All items are same in the list assert rvs.count(rvs[0]) == len(txnPoolNodeSet) return rvs[0] rlc1 = get_getReplyFromLedgerForRequest_call_count() rpc1 = get_recordAndPropagate_call_count() # Request which will be send twice reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) # Send, check and getting reply from first request sdk_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet)) request1 = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout) for req_res in request1: sdk_check_reply(req_res) first_req_id = request1[0][0]['reqId'] rlc2 = get_getReplyFromLedgerForRequest_call_count() rpc2 = get_recordAndPropagate_call_count() assert rlc2 - rlc1 == 1 # getReplyFromLedgerForRequest was called assert rpc2 - rpc1 == 1 # recordAndPropagate was called r1 = get_last_returned_val() assert r1 is None # getReplyFromLedgerForRequest returned None since had not seen request # Request which we will send only once request2 = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) second_req_id = request2[0][0]['reqId'] assert second_req_id != first_req_id rlc3 = get_getReplyFromLedgerForRequest_call_count() rpc3 = get_recordAndPropagate_call_count() assert rlc3 - rlc2 == 1 # getReplyFromLedgerForRequest was called again assert rpc3 - rpc2 == 1 # recordAndPropagate was called again r2 = get_last_returned_val() assert r2 is None # getReplyFromLedgerForRequest returned None since had not seen request # Reply for the first request, which is going to be sent again rep1 = request1[0][1]['result'] # Client re-sending first request request3 = sdk_send_signed_requests(sdk_pool_handle, reqs) total_timeout = sdk_eval_timeout(len(request3), len(txnPoolNodeSet)) request3 = sdk_get_replies(looper, request3, timeout=total_timeout) third_req_id = request3[0][0]['reqId'] assert third_req_id == first_req_id rlc4 = get_getReplyFromLedgerForRequest_call_count() rpc4 = get_recordAndPropagate_call_count() assert rlc4 - rlc3 == 1 # getReplyFromLedgerForRequest was called again assert rpc4 - rpc3 == 0 # recordAndPropagate was not called r3 = get_last_returned_val() # getReplyFromLedgerForRequest did not return None this time since had seen request assert r3 is not None rep3 = request3[0][1]['result'] # Since txnTime is not stored in ledger and reading from ledger return # all possible fields from transactions rep3 = {k: v for k, v in rep3.items() if v is not None} rep1 = {k: v for k, v in rep1.items() if k in rep3} assert rep3 == rep1 # The reply client got is same as the previous one
def test_unordered_state_reverted_before_catchup( tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ Check that unordered state is reverted before starting catchup: - save the initial state on a node - slow down processing of COMMITs - send requests - wait until other nodes come to consensus - call start of catch-up - check that the state of the slow node is reverted and equal to the initial one. """ # CONFIG ledger_id = DOMAIN_LEDGER_ID non_primary_node = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0].node non_primary_ledger = non_primary_node.getLedger(ledger_id) non_primary_state = non_primary_node.getState(ledger_id) # send reqs and make sure we are at the same state reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 10) sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) checkNodesHaveSameRoots(txnPoolNodeSet) # the state of the node before committed_ledger_before = non_primary_ledger.tree.root_hash uncommitted_ledger_before = non_primary_ledger.uncommittedRootHash committed_state_before = non_primary_state.committedHeadHash uncommitted_state_before = non_primary_state.headHash # EXECUTE # Delay commit requests on the node delay_c = 60 non_primary_node.nodeIbStasher.delay(cDelay(delay_c)) # send requests reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) sdk_get_replies(looper, reqs, timeout=40) committed_ledger_during_3pc = non_primary_node.getLedger( ledger_id).tree.root_hash uncommitted_ledger_during_3pc = non_primary_node.getLedger( ledger_id).uncommittedRootHash committed_state_during_3pc = non_primary_node.getState( ledger_id).committedHeadHash uncommitted_state_during_3pc = non_primary_node.getState( ledger_id).headHash # start catchup non_primary_node.ledgerManager.preCatchupClbk(ledger_id) committed_ledger_reverted = non_primary_ledger.tree.root_hash uncommitted_ledger_reverted = non_primary_ledger.uncommittedRootHash committed_state_reverted = non_primary_state.committedHeadHash uncommitted_state_reverted = non_primary_state.headHash # CHECK # check that initial uncommitted state differs from the state during 3PC # but committed does not assert committed_ledger_before == committed_ledger_during_3pc assert uncommitted_ledger_before != uncommitted_ledger_during_3pc assert committed_state_before == committed_state_during_3pc assert uncommitted_state_before != uncommitted_state_during_3pc assert committed_ledger_before == committed_ledger_reverted assert uncommitted_ledger_before == uncommitted_ledger_reverted assert committed_state_before == committed_state_reverted assert uncommitted_state_before == uncommitted_state_reverted
def test_already_processed_requests(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Client re-sending request and checking that nodes picked the reply from ledger and did not process the request again """ def get_method_call_count(method): counts = set() for node in txnPoolNodeSet: c = node.spylog.count(method) counts.add(c) assert len(counts) == 1 return counts.pop() def get_getReplyFromLedger_call_count(): return get_method_call_count( next(iter(txnPoolNodeSet)).getReplyFromLedger) def get_recordAndPropagate_call_count(): return get_method_call_count( next(iter(txnPoolNodeSet)).recordAndPropagate) def get_last_returned_val(): rvs = [] for node in txnPoolNodeSet: rv = getAllReturnVals(node, node.getReplyFromLedger) rvs.append(rv[0]) # All items are same in the list assert rvs.count(rvs[0]) == len(txnPoolNodeSet) return rvs[0] rlc1 = get_getReplyFromLedger_call_count() rpc1 = get_recordAndPropagate_call_count() # Request which will be send twice reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 1) # Send, check and getting reply from first request sdk_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet)) request1 = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout) for req_res in request1: sdk_check_reply(req_res) first_req_id = request1[0][0]['reqId'] rlc2 = get_getReplyFromLedger_call_count() rpc2 = get_recordAndPropagate_call_count() assert rlc2 - rlc1 == 1 # getReplyFromLedger was called assert rpc2 - rpc1 == 1 # recordAndPropagate was called r1 = get_last_returned_val() assert r1 is None # getReplyFromLedger returned None since had not seen request # Request which we will send only once request2 = sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) second_req_id = request2[0][0]['reqId'] assert second_req_id != first_req_id rlc3 = get_getReplyFromLedger_call_count() rpc3 = get_recordAndPropagate_call_count() assert rlc3 - rlc2 == 1 # getReplyFromLedger was called again assert rpc3 - rpc2 == 1 # recordAndPropagate was called again r2 = get_last_returned_val() assert r2 is None # getReplyFromLedger returned None since had not seen request # Reply for the first request, which is going to be sent again rep1 = request1[0][1]['result'] # Client re-sending first request request3 = sdk_send_signed_requests(sdk_pool_handle, reqs) total_timeout = sdk_eval_timeout(len(request3), len(txnPoolNodeSet)) request3 = sdk_get_replies(looper, request3, timeout=total_timeout) third_req_id = request3[0][0]['reqId'] assert third_req_id == first_req_id rlc4 = get_getReplyFromLedger_call_count() rpc4 = get_recordAndPropagate_call_count() assert rlc4 - rlc3 == 1 # getReplyFromLedger was called again assert rpc4 - rpc3 == 0 # recordAndPropagate was not called r3 = get_last_returned_val() # getReplyFromLedger did not return None this time since had seen request assert r3 is not None rep3 = request3[0][1]['result'] # Since txnTime is not stored in ledger and reading from ledger return # all possible fields from transactions rep3 = {k: v for k, v in rep3.items() if v is not None} rep1 = {k: v for k, v in rep1.items() if k in rep3} assert rep3 == rep1 # The reply client got is same as the previous one
def test_unordered_state_reverted_before_catchup(tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ Check that unordered state is reverted before starting catchup: - save the initial state on a node - slow down processing of COMMITs - send requests - wait until other nodes come to consensus - call start of catch-up - check that the state of the slow node is reverted and equal to the initial one. """ # CONFIG ledger_id = DOMAIN_LEDGER_ID non_primary_node = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0].node non_primary_ledger = non_primary_node.getLedger(ledger_id) non_primary_state = non_primary_node.getState(ledger_id) # send reqs and make sure we are at the same state reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 10) sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle) checkNodesHaveSameRoots(txnPoolNodeSet) # the state of the node before committed_ledger_before = non_primary_ledger.tree.root_hash uncommitted_ledger_before = non_primary_ledger.uncommittedRootHash committed_state_before = non_primary_state.committedHeadHash uncommitted_state_before = non_primary_state.headHash # EXECUTE # Delay commit requests on the node non_primary_node.nodeIbStasher.delay(cDelay()) # Delay Consistency proofs to not finish catchup non_primary_node.nodeIbStasher.delay(cpDelay()) # send requests reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) sdk_get_replies(looper, reqs, timeout=40) committed_ledger_during_3pc = non_primary_node.getLedger( ledger_id).tree.root_hash uncommitted_ledger_during_3pc = non_primary_node.getLedger( ledger_id).uncommittedRootHash committed_state_during_3pc = non_primary_node.getState( ledger_id).committedHeadHash uncommitted_state_during_3pc = non_primary_node.getState( ledger_id).headHash # start catchup non_primary_node.start_catchup() committed_ledger_reverted = non_primary_ledger.tree.root_hash uncommitted_ledger_reverted = non_primary_ledger.uncommittedRootHash committed_state_reverted = non_primary_state.committedHeadHash uncommitted_state_reverted = non_primary_state.headHash # CHECK # check that initial uncommitted state differs from the state during 3PC # but committed does not assert committed_ledger_before == committed_ledger_during_3pc assert uncommitted_ledger_before != uncommitted_ledger_during_3pc assert committed_state_before == committed_state_during_3pc assert uncommitted_state_before != uncommitted_state_during_3pc assert committed_ledger_before == committed_ledger_reverted assert uncommitted_ledger_before == uncommitted_ledger_reverted assert committed_state_before == committed_state_reverted assert uncommitted_state_before == uncommitted_state_reverted
def create_txns(looper, sdk_wallet_client): reqs = sdk_signed_random_requests(looper, sdk_wallet_client, TXNS_IN_BATCH) return [reqToTxn(req) for req in reqs]
def create_txns(looper, sdk_wallet_client, count=TXNS_IN_BATCH): reqs = sdk_signed_random_requests(looper, sdk_wallet_client, count) return [reqToTxn(req) for req in reqs]