def __init__(self, viewNo, quorums, ledger_ids): node_stack = FakeSomething( name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) self.replicas = [] self.viewNo = viewNo super().__init__(name="fake node", ledger_ids=ledger_ids, _viewNo=viewNo, quorums=quorums, nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), mode=Mode.participating, view_change_in_progress=False, pre_view_change_in_progress=False, requests=Requests(), onBatchCreated=lambda self, *args, **kwargs: True, applyReq=lambda self, *args, **kwargs: True, primaries=[], get_validators=lambda: [], db_manager=None, internal_bus=InternalBus(), write_manager=FakeSomething( database_manager=DatabaseManager(), apply_request=lambda req, cons_time: None), timer=QueueTimer())
def orderer(consensus_data, internal_bus, external_bus, name, write_manager, txn_roots, state_roots, bls_bft_replica, tconf, stasher): orderer = OrderingService( data=consensus_data(name), timer=QueueTimer(), bus=internal_bus, network=external_bus, write_manager=write_manager, bls_bft_replica=bls_bft_replica, freshness_checker=FreshnessChecker( freshness_timeout=tconf.STATE_FRESHNESS_UPDATE_INTERVAL), stasher=stasher) orderer._data.node_mode = Mode.participating orderer._data.primary_name = "Alpha:0" orderer.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[ledger] orderer.get_state_root_hash = lambda ledger, to_str=False: state_roots[ ledger] orderer.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet() orderer._revert = lambda *args, **kwargs: None orderer.db_manager.stores[LAST_SENT_PP_STORE_LABEL] = \ FakeSomething(store_last_sent_pp_seq_no=lambda b, c: None) future_primaries_handler = FuturePrimariesBatchHandler( write_manager.database_manager, FakeSomething(nodeReg={}, nodeIds=[])) future_primaries_handler.get_primaries = lambda *args, **kwargs: orderer._data.primaries write_manager.register_batch_handler(future_primaries_handler) return orderer
def fake_view_changer(request, tconf): node_count = 4 node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}, conns={"Alpha", "Beta", "Gamma", "Delta"}) monitor = FakeSomething(isMasterDegraded=lambda: False, areBackupsDegraded=lambda: [], prettymetrics='') node = FakeSomething(name="SomeNode", viewNo=request.param, quorums=Quorums( getValueFromModule(request, 'nodeCount', default=node_count)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c: print(b), primaries_disconnection_times=[None] * getRequiredInstances(node_count), master_primary_name='Alpha', master_replica=FakeSomething(instId=0)) view_changer = ViewChanger(node) return view_changer
def view_changer(): config = FakeSomething(ViewChangeWindowSize=1, ForceViewChangeFreq=0) node = FakeSomething(name="fake node", ledger_ids=[0], config=config, quorums=Quorums(7)) return view_changer
def test_ordered_cleaning(tconf): global_view_no = 2 node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=global_view_no, utc_epoch=get_utc_epoch, get_validators=lambda: [], ) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica._consensus_data.view_no = global_view_no total = [] num_requests_per_view = 3 for viewNo in range(global_view_no + 1): for seqNo in range(num_requests_per_view): reqId = viewNo, seqNo replica.addToOrdered(*reqId) total.append(reqId) # gc is called after stable checkpoint, since no request executed # in this test starting it manually replica._gc(100) # Requests with view lower then previous view # should not be in ordered assert len(replica.ordered) == len(total[num_requests_per_view:])
def pool_upgrade_handler(): return PoolUpgradeHandler( None, FakeSomething(check_upgrade_possible=Upgrader.check_upgrade_possible), FakeSomething(), FakeSomething() )
def test_commit_works_after_catchup(alh, db_manager, initial_domain_size, initial_pool_size, initial_config_size, initial_seq_no): size_before = alh.ledger.size # apply and commit batch do_apply_audit_txn(alh, txns_count=7, ledger_id=DOMAIN_LEDGER_ID, view_no=3, pp_sq_no=35, txn_time=11111) txn_root_hash = db_manager.get_ledger( DOMAIN_LEDGER_ID).uncommitted_root_hash state_root_hash = db_manager.get_state(DOMAIN_LEDGER_ID).headHash alh.commit_batch(FakeSomething()) # add txns to audit ledger emulating catchup caughtup_txns = 5 txns_per_batch = 2 add_txns_to_audit(alh, count=caughtup_txns, ledger_id=POOL_LEDGER_ID, txns_per_batch=txns_per_batch, view_no=3, initial_pp_seq_no=36, pp_time=11222) alh.on_catchup_finished() # apply and commit new batch do_apply_audit_txn(alh, txns_count=3, ledger_id=DOMAIN_LEDGER_ID, view_no=3, pp_sq_no=45, txn_time=21111) assert alh.ledger.uncommitted_size == alh.ledger.size + 1 txn_root_hash = db_manager.get_ledger( DOMAIN_LEDGER_ID).uncommitted_root_hash state_root_hash = db_manager.get_state(DOMAIN_LEDGER_ID).headHash alh.commit_batch(FakeSomething()) assert alh.ledger.uncommitted_size == alh.ledger.size assert alh.ledger.size == size_before + 2 + caughtup_txns check_audit_txn(txn=alh.ledger.get_last_committed_txn(), view_no=3, pp_seq_no=45, seq_no=initial_seq_no + 2 + caughtup_txns, txn_time=21111, txn_roots={DOMAIN_LEDGER_ID: txn_root_hash}, state_roots={DOMAIN_LEDGER_ID: state_root_hash}, pool_size=initial_pool_size + txns_per_batch * caughtup_txns, domain_size=initial_domain_size + 7 + 3, config_size=initial_config_size, last_pool_seqno=initial_seq_no + 1 + caughtup_txns, last_domain_seqno=None, last_config_seqno=None, primaries=2 * (caughtup_txns + 1))
def view_changer(tconf): node = FakeSomething( config=tconf, master_replica=FakeSomething( inBox=deque(), inBoxRouter=Router(), logger=FakeSomething(info=lambda *args, **kwargs: True)), name="Alpha", master_primary_name="Alpha", on_view_change_start=lambda *args, **kwargs: True, start_catchup=lambda *args, **kwargs: True, nodeInBox=deque(), nodeMsgRouter=Router(), metrics=None, process_one_node_message=None, quota_control=FakeSomething(node_quota=Quota(count=100, size=100)), nodestack=FakeSomething( service=lambda *args, **kwargs: eventually(lambda: True))) node.metrics = functools.partial(Node._createMetricsCollector, node)() node.process_one_node_message = functools.partial( Node.process_one_node_message, node) view_changer = ViewChanger(node) node.view_changer = view_changer node.viewNo = view_changer.view_no node.master_replica.node = node return view_changer
def node(): n = FakeSomething() n.new_future_primaries_needed = False n.requests = { 'a': ReqState( Request( operation={ TARGET_NYM: 'nym7', TXN_TYPE: NODE, DATA: { SERVICES: ['VALIDATOR'], ALIAS: 'n7' } })) } n.nodeReg = {'n1': 1, 'n2': 1, 'n3': 1, 'n4': 1, 'n5': 1, 'n6': 1} n.nodeIds = { 'nym1': 'n1', 'nym2': 'n2', 'nym3': 'n3', 'nym4': 'n4', 'nym5': 'n5', 'nym6': 'n6' } n.primaries = {'n1', 'n2'} n.elector = FakeSomething() n.elector.process_selection = lambda a, b, c: ['n1', 'n2'] return n
def _create_bls_bft(self) -> BlsBft: # TODO: Create actual objects instead of fakes return BlsBft( bls_crypto_signer=FakeSomething(), bls_crypto_verifier=FakeSomething(), bls_key_register=FakeSomething(), bls_store=FakeSomething())
def test_primary_names_cleaning(): node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=0, ) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, bls_bft_replica=bls_bft_replica) replica.primaryName = "Node1:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0")] node.viewNo += 1 replica.primaryName = "Node2:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0"), (1, "Node2:0")] node.viewNo += 1 replica.primaryName = "Node3:0" assert list(replica.primaryNames.items()) == \ [(1, "Node2:0"), (2, "Node3:0")] node.viewNo += 1 replica.primaryName = "Node4:0" assert list(replica.primaryNames.items()) == \ [(2, "Node3:0"), (3, "Node4:0")]
def test_database(): led = FakeSomething() st = FakeSomething() db = Database(led, st) assert db.ledger == led assert db.state == st
def node(): n = FakeSomething() n.new_future_primaries_needed = False n.requests = { 'a': ReqState( Request( operation={ TARGET_NYM: 'nym7', TXN_TYPE: NODE, DATA: { SERVICES: ['VALIDATOR'], ALIAS: 'n7' } })) } n.nodeReg = {'n1': 1, 'n2': 1, 'n3': 1, 'n4': 1, 'n5': 1, 'n6': 1} n.primaries = ['n1', 'n2'] n.nodeIds = n.nodeReg n.primaries_selector = FakeSomething() n.primaries_selector.select_primaries = lambda view_no, instance_count, validators: [ 'n1', 'n2' ] n.viewNo = 0 return n
def __init__(self, viewNo, quorums, ledger_ids): node_names = ["Alpha", "Beta", "Gamma", "Delta"] node_stack = FakeSomething(name="fake stack", connecteds=set(node_names)) self.replicas = [] self.viewNo = viewNo audit_ledger = FakeSomething(size=0, get_last_txn=lambda *args: None, getAllTxn=lambda *args, **kwargs: []) db_manager = DatabaseManager() db_manager.register_new_database(AUDIT_LEDGER_ID, audit_ledger) super().__init__(name="fake node", ledger_ids=ledger_ids, _viewNo=viewNo, quorums=quorums, nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), mode=Mode.participating, view_change_in_progress=False, requests=Requests(), onBatchCreated=lambda self, *args, **kwargs: True, applyReq=lambda self, *args, **kwargs: True, primaries=[], get_validators=lambda: [], db_manager=db_manager, write_manager=FakeSomething( database_manager=db_manager, apply_request=lambda req, cons_time: None, future_primary_handler=FakeSomething( primaries={}, get_primaries=lambda *args: [])), timer=QueueTimer(), poolManager=FakeSomething( node_names_ordered_by_rank=lambda: node_names))
def bls_store(db_manager): multi_sigs = FakeSomething() multi_sigs.as_dict = lambda: {"a": "b"} bls = FakeSomething() bls.get = lambda _: multi_sigs db_manager.register_new_store(BLS_LABEL, bls) return bls
def fake_view_changer(request, tconf): node_count = 4 node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}, conns={"Alpha", "Beta", "Gamma", "Delta"}) monitor = FakeSomething(isMasterDegraded=lambda: False, areBackupsDegraded=lambda: [], prettymetrics='') node = FakeSomething(name="SomeNode", timer=QueueTimer(), viewNo=request.param, quorums=Quorums( getValueFromModule(request, 'nodeCount', default=node_count)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c, d: print(b), primaries_disconnection_times=[None] * getRequiredInstances(node_count), master_primary_name='Alpha', master_replica=FakeSomething( instId=0, viewNo=request.param, _consensus_data=FakeSomething( view_no=request.param, waiting_for_new_view=False)), nodeStatusDB=None) view_changer = create_view_changer(node) # TODO: This is a hack for tests compatibility, do something better view_changer.node = node return view_changer
def fake_node(tconf): node = FakeSomething(config=tconf, timer=QueueTimer(), nodeStatusDB=None, master_replica=FakeSomething(inBox=deque(), inBoxRouter=Router(), _external_bus=MockNetwork(), internal_bus=InternalBus(), logger=FakeSomething( info=lambda *args, **kwargs: True )), name="Alpha", master_primary_name="Alpha", on_view_change_start=lambda *args, **kwargs: True, start_catchup=lambda *args, **kwargs: True, nodeInBox=deque(), nodeMsgRouter=Router(), metrics=None, process_one_node_message=None, quota_control=FakeSomething( node_quota=Quota(count=100, size=100)), nodestack=FakeSomething( service=lambda *args, **kwargs: eventually(lambda: True)), set_view_for_replicas= lambda view_no: None, set_view_change_status=lambda view_no: None ) node.metrics = functools.partial(Node._createMetricsCollector, node)() node.process_one_node_message = functools.partial(Node.process_one_node_message, node) return node
def node_handler(): data_manager = DatabaseManager() bls = FakeSomething() handler = NodeHandler(data_manager, bls) state = PruningState(KeyValueStorageInMemory()) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) return handler
def fake_ordering_service(config_ledger, config_state, db_manager): ordering_service = FakeSomething( db_manager=db_manager, post_batch_rejection=lambda *args, **kwargs: True, _logger=FakeSomething(info=lambda *args, **kwargs: True)) ordering_service.l_revert = functools.partial(OrderingService.l_revert, ordering_service) return ordering_service
def fake_replica(config_ledger, config_state): replica = FakeSomething( node=FakeSomething(getLedger=lambda *args, **kwargs: config_ledger, getState=lambda *args, **kwargs: config_state, onBatchRejected=lambda *args, **kwargs: True), logger=FakeSomething(info=lambda *args, **kwargs: True)) replica.revert = functools.partial(Replica.revert, replica) return replica
def txn_author_agreement_disable_handler(tconf, domain_state, config_state): data_manager = DatabaseManager() handler = TxnAuthorAgreementDisableHandler(data_manager) data_manager.register_new_database(handler.ledger_id, FakeSomething(), config_state) data_manager.register_new_database(DOMAIN_LEDGER_ID, FakeSomething(), domain_state) return handler
def txn_author_agreement_aml_handler(tconf, domain_state): data_manager = DatabaseManager() handler = TxnAuthorAgreementAmlHandler(data_manager, FakeSomething()) state = PruningState(KeyValueStorageInMemory()) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) data_manager.register_new_database(DOMAIN_LEDGER_ID, FakeSomething(), domain_state) return handler
def replica(tconf): node = FakeSomething(name="fake node", ledger_ids=[0], viewNo=0) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, isMaster=False, config=tconf, bls_bft_replica=bls_bft_replica) return replica
def config_req_handler(config_state, config_ledger): return ConfigReqHandler(config_ledger, config_state, idrCache=FakeSomething(), upgrader=FakeSomething(), poolManager=FakeSomething(), poolCfg=FakeSomething(), write_req_validator=FakeSomething())
def test_replica_not_degraded_with_too_high_latency(): monitor = FakeSomething( is_instance_avg_req_latency_too_high=lambda a: True, is_instance_throughput_too_low=lambda a: False, acc_monitor=None, instances=FakeSomething(backupIds=[1, 2, 3])) monitor.areBackupsDegraded = functools.partial(Monitor.areBackupsDegraded, monitor) assert not monitor.areBackupsDegraded()
def test_node(test_node): test_node.view_changer = FakeSomething(view_change_in_progress=True, view_no=1, instance_changes=None) test_node.init_config_req_handler = lambda: TestConfigReqHandler(test_node.configLedger, test_node.states[CONFIG_LEDGER_ID], test_node.states[DOMAIN_LEDGER_ID], bls_store=FakeSomething()) test_node.register_req_handler(test_node.init_config_req_handler(), CONFIG_LEDGER_ID) return test_node
def node_with_nodestack(fake_node): nodestack = FakeSomething(name='fake state', connecteds=set(fake_node.allNodeNames), getRemote=lambda name: FakeSomething(uid=name)) fake_node.nodestack = nodestack fake_node.view_changer.last_completed_view_no = fake_node.viewNo fake_node.send = functools.partial(check_CurrentState, fake_node) fake_node.send_current_state_to_lagging_node = functools.partial( Node.send_current_state_to_lagging_node, fake_node) return fake_node
def ledgers_freeze_handler(tconf, domain_state, audit_ledger): data_manager = DatabaseManager() handler = LedgersFreezeHandler(data_manager) state = PruningState(KeyValueStorageInMemory()) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) data_manager.register_new_database(DOMAIN_LEDGER_ID, FakeSomething(), domain_state) data_manager.register_new_database(AUDIT_LEDGER_ID, audit_ledger) return handler
def txn_author_agreement_handler(db_manager): f = FakeSomething() f.validate = lambda request, action_list: True handler = TxnAuthorAgreementHandler(db_manager, FakeSomething(), f) state = State() state.txn_list = {} state.get = lambda key, isCommitted=False: state.txn_list.get(key, None) state.set = lambda key, value, isCommitted=False: state.txn_list.update({key: value}) db_manager.register_new_database(handler.ledger_id, FakeSomething(), state) return handler
def node_handler(): data_manager = DatabaseManager() bls = FakeSomething() handler = NodeHandler(data_manager, bls) state = State() state.txn_list = {} state.get = lambda key, is_committed: state.txn_list.get(key, None) state.set = lambda key, value: state.txn_list.update({key: value}) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) return handler