def test_audit_ledger_updated_after_freshness_updated(looper, tconf, txnPoolNodeSet, initial_domain_size, initial_pool_size, initial_config_size): # 1. Wait for the first freshness update looper.run( eventually(check_freshness_updated_for_all, txnPoolNodeSet, timeout=2 * FRESHNESS_TIMEOUT)) audit_size_initial = [node.auditLedger.size for node in txnPoolNodeSet] view_no = txnPoolNodeSet[0].master_replica.last_ordered_3pc[0] pp_seq_no = txnPoolNodeSet[0].master_replica.last_ordered_3pc[1] initial_seq_no = txnPoolNodeSet[0].auditLedger.size # 2. Wait for the second freshness update bls_multi_sigs_after_first_update = get_all_multi_sig_values_for_all_nodes( txnPoolNodeSet) looper.run( eventually(check_updated_bls_multi_sig_for_all_ledgers, txnPoolNodeSet, bls_multi_sigs_after_first_update, FRESHNESS_TIMEOUT, timeout=FRESHNESS_TIMEOUT + 5)) # 3. check that there is audit ledger txn created for each ledger updated as a freshness check check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=3) for node in txnPoolNodeSet: check_audit_txn(txn=node.auditLedger.getBySeqNo(node.auditLedger.size - 2), view_no=view_no, pp_seq_no=pp_seq_no + 1, seq_no=initial_seq_no + 1, txn_time=node.master_replica._ordering_service. last_accepted_pre_prepare_time, txn_roots={ POOL_LEDGER_ID: node.getLedger(POOL_LEDGER_ID).tree.root_hash }, state_roots={ POOL_LEDGER_ID: node.getState(POOL_LEDGER_ID).committedHeadHash }, pool_size=initial_pool_size, domain_size=initial_domain_size, config_size=initial_config_size, last_pool_seqno=None, last_domain_seqno=2, last_config_seqno=3, primaries=pp_seq_no + 1 - 1) check_audit_txn(txn=node.auditLedger.getBySeqNo(node.auditLedger.size - 1), view_no=view_no, pp_seq_no=pp_seq_no + 2, seq_no=initial_seq_no + 2, txn_time=node.master_replica._ordering_service. last_accepted_pre_prepare_time, txn_roots={ DOMAIN_LEDGER_ID: node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash }, state_roots={ DOMAIN_LEDGER_ID: node.getState(DOMAIN_LEDGER_ID).committedHeadHash }, pool_size=initial_pool_size, domain_size=initial_domain_size, config_size=initial_config_size, last_pool_seqno=4, last_domain_seqno=None, last_config_seqno=3, primaries=pp_seq_no + 2 - 1) check_audit_txn(txn=node.auditLedger.getBySeqNo(node.auditLedger.size), view_no=view_no, pp_seq_no=pp_seq_no + 3, seq_no=initial_seq_no + 3, txn_time=node.master_replica._ordering_service. last_accepted_pre_prepare_time, txn_roots={ CONFIG_LEDGER_ID: node.getLedger(CONFIG_LEDGER_ID).tree.root_hash }, state_roots={ CONFIG_LEDGER_ID: node.getState(CONFIG_LEDGER_ID).committedHeadHash }, pool_size=initial_pool_size, domain_size=initial_domain_size, config_size=initial_config_size, last_pool_seqno=4, last_domain_seqno=5, last_config_seqno=None, primaries=pp_seq_no + 3 - 1)
def test_audit_ledger_updated_after_ordering( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, sdk_wallet_stewards, initial_domain_size, initial_pool_size, initial_config_size, view_no, pp_seq_no, initial_seq_no): ''' Order 2 domain txns, 2 pool txns, and then 1 domain txn Check that audit ledger is correctly updated in all cases ''' # 1st domain txn audit_size_initial = [node.auditLedger.size for node in txnPoolNodeSet] sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=1) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 1, seq_no=initial_seq_no + 1, txn_time=node.master_replica.last_accepted_pre_prepare_time, ledger_id=DOMAIN_LEDGER_ID, txn_root=node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash, state_root=node.getState(DOMAIN_LEDGER_ID).committedHeadHash, pool_size=initial_pool_size, domain_size=initial_domain_size + 1, config_size=initial_config_size, last_pool_seqno=None, last_domain_seqno=None, last_config_seqno=None) # 2d domain txn sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=2) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 2, seq_no=initial_seq_no + 2, txn_time=node.master_replica.last_accepted_pre_prepare_time, ledger_id=DOMAIN_LEDGER_ID, txn_root=node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash, state_root=node.getState(DOMAIN_LEDGER_ID).committedHeadHash, pool_size=initial_pool_size, domain_size=initial_domain_size + 2, config_size=initial_config_size, last_pool_seqno=None, last_domain_seqno=None, last_config_seqno=None) # 1st pool txn sdk_change_bls_key(looper, txnPoolNodeSet, txnPoolNodeSet[3], sdk_pool_handle, sdk_wallet_stewards[3], check_functional=False) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=3) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 3, seq_no=initial_seq_no + 3, txn_time=node.master_replica.last_accepted_pre_prepare_time, ledger_id=POOL_LEDGER_ID, txn_root=node.getLedger(POOL_LEDGER_ID).tree.root_hash, state_root=node.getState(POOL_LEDGER_ID).committedHeadHash, pool_size=initial_pool_size + 1, domain_size=initial_domain_size + 2, config_size=initial_config_size, last_pool_seqno=None, last_domain_seqno=2, last_config_seqno=None) # 2d pool txn sdk_change_bls_key(looper, txnPoolNodeSet, txnPoolNodeSet[3], sdk_pool_handle, sdk_wallet_stewards[3], check_functional=False) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=4) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 4, seq_no=initial_seq_no + 4, txn_time=node.master_replica.last_accepted_pre_prepare_time, ledger_id=POOL_LEDGER_ID, txn_root=node.getLedger(POOL_LEDGER_ID).tree.root_hash, state_root=node.getState(POOL_LEDGER_ID).committedHeadHash, pool_size=initial_pool_size + 2, domain_size=initial_domain_size + 2, config_size=initial_config_size, last_pool_seqno=None, last_domain_seqno=2, last_config_seqno=None) # one more domain txn sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=5) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 5, seq_no=initial_seq_no + 5, txn_time=node.master_replica.last_accepted_pre_prepare_time, ledger_id=DOMAIN_LEDGER_ID, txn_root=node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash, state_root=node.getState(DOMAIN_LEDGER_ID).committedHeadHash, pool_size=initial_pool_size + 2, domain_size=initial_domain_size + 3, config_size=initial_config_size, last_pool_seqno=4, last_domain_seqno=None, last_config_seqno=None)
def test_audit_ledger_view_change(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward, initial_domain_size, initial_pool_size, initial_config_size, tdir, tconf, allPluginsPath, view_no, pp_seq_no, initial_seq_no, monkeypatch): ''' 1. Send a NODE transaction and add a 7th Node for adding a new instance, but delay Ordered messages. 2. Send a NYM txn. 3. Reset delays in executing force_process_ordered 4. Check that an audit txn for the NYM txn uses primary list from uncommitted audit with a new list of primaries. ''' other_nodes = txnPoolNodeSet[:-1] slow_node = txnPoolNodeSet[-1] # Add a new steward for creating a new node new_steward_wallet_handle = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias="newSteward", role=STEWARD_STRING) audit_size_initial = [node.auditLedger.size for node in txnPoolNodeSet] ordereds = [] monkeypatch.setattr(slow_node, 'try_processing_ordered', lambda msg: ordereds.append(msg)) with delay_rules([n.nodeIbStasher for n in txnPoolNodeSet], icDelay()): # Send NODE txn fo 7th node new_node = sdk_add_new_node(looper, sdk_pool_handle, new_steward_wallet_handle, "Theta", tdir, tconf, allPluginsPath) txnPoolNodeSet.append(new_node) looper.run(checkNodesConnected(other_nodes + [new_node])) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, [slow_node], audit_txns_added=0) looper.run(eventually(check_audit_ledger_uncommitted_updated, audit_size_initial, [slow_node], 2)) def patch_force_process_ordered(): for msg in list(ordereds): slow_node.replicas[msg.instId].outBox.append(msg) ordereds.remove(msg) monkeypatch.undo() slow_node.force_process_ordered() assert ordereds monkeypatch.setattr(slow_node, 'force_process_ordered', patch_force_process_ordered) looper.run(eventually(lambda: assertExp(all(n.viewNo == 1 for n in txnPoolNodeSet)))) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) looper.run(eventually(lambda: assertExp(not ordereds))) for node in txnPoolNodeSet: last_txn = node.auditLedger.get_last_txn() last_txn['txn']['data']['primaries'] = node._get_last_audited_primaries() check_audit_txn(txn=last_txn, view_no=view_no + 1, pp_seq_no=1, seq_no=initial_seq_no + 4, txn_time=node.master_replica._ordering_service.last_accepted_pre_prepare_time, txn_roots={DOMAIN_LEDGER_ID: node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash}, state_roots={DOMAIN_LEDGER_ID: node.getState(DOMAIN_LEDGER_ID).committedHeadHash}, pool_size=initial_pool_size + 1, domain_size=initial_domain_size + 2, config_size=initial_config_size, last_pool_seqno=2, last_domain_seqno=1, last_config_seqno=None, primaries=node.write_manager.future_primary_handler.get_last_primaries() or node.primaries)
def test_audit_ledger_updated_after_ordering( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, sdk_wallet_stewards, initial_domain_size, initial_pool_size, initial_config_size, view_no, pp_seq_no, initial_seq_no): """ Order 2 domain txns, 2 pool txns, and then 1 domain txn Check that audit ledger is correctly updated in all cases """ # 1st domain txn audit_size_initial = [node.auditLedger.size for node in txnPoolNodeSet] sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=1) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 1, seq_no=initial_seq_no + 1, txn_time=node.master_replica._ordering_service. last_accepted_pre_prepare_time, txn_roots={ POOL_LEDGER_ID: node.getLedger(POOL_LEDGER_ID).tree.root_hash, DOMAIN_LEDGER_ID: node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash }, state_roots={ POOL_LEDGER_ID: node.getState(POOL_LEDGER_ID).committedHeadHash, DOMAIN_LEDGER_ID: node.getState(DOMAIN_LEDGER_ID).committedHeadHash }, pool_size=initial_pool_size, domain_size=initial_domain_size + 1, config_size=initial_config_size, last_pool_seqno=None, last_domain_seqno=None, last_config_seqno=None, primaries=node.primaries, digest=node.master_replica._consensus_data.prepared[pp_seq_no + 1 - 1].pp_digest, node_reg=[n.name for n in txnPoolNodeSet]) # 2d domain txn sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=2) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 2, seq_no=initial_seq_no + 2, txn_time=node.master_replica._ordering_service. last_accepted_pre_prepare_time, txn_roots={ DOMAIN_LEDGER_ID: node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash }, state_roots={ DOMAIN_LEDGER_ID: node.getState(DOMAIN_LEDGER_ID).committedHeadHash }, pool_size=initial_pool_size, domain_size=initial_domain_size + 2, config_size=initial_config_size, last_pool_seqno=1, last_domain_seqno=None, last_config_seqno=None, primaries=1, digest=node.master_replica._consensus_data.prepared[pp_seq_no + 2 - 1].pp_digest, node_reg=1) # 1st pool txn sdk_change_bls_key(looper, txnPoolNodeSet, txnPoolNodeSet[3], sdk_pool_handle, sdk_wallet_stewards[3], check_functional=False) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=3) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 3, seq_no=initial_seq_no + 3, txn_time=node.master_replica._ordering_service. last_accepted_pre_prepare_time, txn_roots={ POOL_LEDGER_ID: node.getLedger(POOL_LEDGER_ID).tree.root_hash }, state_roots={ POOL_LEDGER_ID: node.getState(POOL_LEDGER_ID).committedHeadHash }, pool_size=initial_pool_size + 1, domain_size=initial_domain_size + 2, config_size=initial_config_size, last_pool_seqno=1, last_domain_seqno=2, last_config_seqno=None, primaries=2, digest=node.master_replica._consensus_data.prepared[pp_seq_no + 3 - 1].pp_digest, node_reg=2) # 2d pool txn sdk_change_bls_key(looper, txnPoolNodeSet, txnPoolNodeSet[3], sdk_pool_handle, sdk_wallet_stewards[3], check_functional=False) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=4) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 4, seq_no=initial_seq_no + 4, txn_time=node.master_replica._ordering_service. last_accepted_pre_prepare_time, txn_roots={ POOL_LEDGER_ID: node.getLedger(POOL_LEDGER_ID).tree.root_hash }, state_roots={ POOL_LEDGER_ID: node.getState(POOL_LEDGER_ID).committedHeadHash }, pool_size=initial_pool_size + 2, domain_size=initial_domain_size + 2, config_size=initial_config_size, last_pool_seqno=2, last_domain_seqno=2, last_config_seqno=None, primaries=3, digest=node.master_replica._consensus_data.prepared[pp_seq_no + 4 - 1].pp_digest, node_reg=3) # one more domain txn sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) check_audit_ledger_updated(audit_size_initial, txnPoolNodeSet, audit_txns_added=5) for node in txnPoolNodeSet: check_audit_txn( txn=node.auditLedger.get_last_txn(), view_no=view_no, pp_seq_no=pp_seq_no + 5, seq_no=initial_seq_no + 5, txn_time=node.master_replica._ordering_service. last_accepted_pre_prepare_time, txn_roots={ DOMAIN_LEDGER_ID: node.getLedger(DOMAIN_LEDGER_ID).tree.root_hash }, state_roots={ DOMAIN_LEDGER_ID: node.getState(DOMAIN_LEDGER_ID).committedHeadHash }, pool_size=initial_pool_size + 2, domain_size=initial_domain_size + 3, config_size=initial_config_size, last_pool_seqno=4, last_domain_seqno=None, last_config_seqno=None, primaries=4, digest=node.master_replica._consensus_data.prepared[pp_seq_no + 5 - 1].pp_digest, node_reg=4)