def view_changer(tconf): node = FakeSomething(config=tconf, master_replica=FakeSomething(inBox=deque(), inBoxRouter=Router(), logger=FakeSomething( info=lambda *args, **kwargs: True )), name="Alpha", master_primary_name="Alpha", on_view_change_start=lambda *args, **kwargs: True, start_catchup=lambda *args, **kwargs: True, nodeInBox=deque(), nodeMsgRouter=Router(), metrics=None, process_one_node_message=None, quota_control=FakeSomething( node_quota=Quota(count=100, size=100)), nodestack=FakeSomething( service=lambda *args, **kwargs: eventually(lambda: True) )) node.metrics = functools.partial(Node._createMetricsCollector, node)() node.process_one_node_message = functools.partial(Node.process_one_node_message, node) view_changer = ViewChanger(node) node.view_changer = view_changer node.viewNo = view_changer.view_no node.master_replica.node = node return view_changer
def test_replica_not_degraded_with_too_high_latency(): monitor = FakeSomething( is_instance_avg_req_latency_too_high=lambda a: True, is_instance_throughput_too_low=lambda a: False, acc_monitor=None, instances=FakeSomething(backupIds=[1, 2, 3])) monitor.areBackupsDegraded = functools.partial(Monitor.areBackupsDegraded, monitor) assert not monitor.areBackupsDegraded()
def fake_monitor(tconf): def getThroughput(self, instId): return self.throughputs[instId].throughput throughputs = dict() instances = Instances() num_of_replicas = 5 for i in range(num_of_replicas): throughputs[i] = Monitor.create_throughput_measurement(tconf) instances.add(i) monitor = FakeSomething( throughputs=throughputs, instances=instances, Delta=tconf.DELTA, throughput_avg_strategy_cls=MedianLowStrategy, ) monitor.numOrderedRequests = dict() for i in range(num_of_replicas): monitor.numOrderedRequests[i] = (100, 100) monitor.getThroughputs = functools.partial(Monitor.getThroughputs, monitor) monitor.getThroughput = functools.partial(getThroughput, monitor) monitor.getInstanceMetrics = functools.partial(Monitor.getInstanceMetrics, monitor) monitor.instance_throughput_ratio = functools.partial(Monitor.instance_throughput_ratio, monitor) monitor.is_instance_throughput_too_low = functools.partial(Monitor.is_instance_throughput_too_low, monitor) monitor.addInstance = functools.partial(Monitor.addInstance, monitor) return monitor
def orderer(consensus_data, internal_bus, external_bus, name, write_manager, txn_roots, state_roots, bls_bft_replica, tconf, stasher, validators): orderer = OrderingService( data=consensus_data(name), timer=QueueTimer(), bus=internal_bus, network=external_bus, write_manager=write_manager, bls_bft_replica=bls_bft_replica, freshness_checker=FreshnessChecker( freshness_timeout=tconf.STATE_FRESHNESS_UPDATE_INTERVAL), primaries_selector=RoundRobinConstantNodesPrimariesSelector( validators), stasher=stasher) orderer._data.node_mode = Mode.participating orderer._data.primary_name = "Alpha:0" orderer.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[ledger] orderer.get_state_root_hash = lambda ledger, to_str=False: state_roots[ ledger] orderer.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet() orderer._revert = lambda *args, **kwargs: None orderer.db_manager.stores[LAST_SENT_PP_STORE_LABEL] = \ FakeSomething(store_last_sent_pp_seq_no=lambda b, c: None) return orderer
def test_node(test_node): test_node.view_changer = FakeSomething(view_change_in_progress=True, view_no=1, instance_changes=None) bs = ConfigTestBootstrapClass(test_node) bs.register_config_req_handlers() return test_node
def test_init_state_from_ledger(config_ledger, config_state, config_req_handler, constraint_serializer, prepare_request): req_count = 1 action, constraint, request = prepare_request txn = reqToTxn(request) txn[TXN_METADATA][TXN_METADATA_SEQ_NO] = 1 """Add txn to ledger""" config_ledger.appendTxns([txn]) config_ledger.commitTxns(req_count) init_state_from_ledger = functools.partial( Node.init_state_from_ledger, FakeSomething(update_txn_with_extra_data=lambda txn: txn)) """Check that txn is not exist in state""" assert config_state.get(config.make_state_path_for_auth_rule( action.get_action_id()), isCommitted=False) is None assert config_state.get(config.make_state_path_for_auth_rule( action.get_action_id()), isCommitted=True) is None txns_from_ledger = [t for t in config_ledger.getAllTxn()] """Check, that txn exist in ledger""" assert len(txns_from_ledger) == 1 assert get_payload_data(txns_from_ledger[0][1]) == get_payload_data(txn) """Emulating node starting""" init_state_from_ledger(config_state, config_ledger, config_req_handler) """Check that txn was added into state""" from_state = config_state.get(config.make_state_path_for_auth_rule( action.get_action_id()), isCommitted=True) assert constraint_serializer.deserialize(from_state) == constraint
def test_audit_not_committed_if_pre_prepare_doesnt_have_audit(alh, db_manager): size_before = alh.ledger.size uncommited_size_before = alh.ledger.uncommitted_size do_apply_audit_txn(alh, txns_count=10, ledger_id=DOMAIN_LEDGER_ID, view_no=0, pp_sq_no=1, txn_time=10000, has_audit_txn=False) txn_root_hash_1 = db_manager.get_ledger( DOMAIN_LEDGER_ID).uncommitted_root_hash state_root_hash_1 = db_manager.get_state(DOMAIN_LEDGER_ID).headHash do_apply_audit_txn(alh, txns_count=15, ledger_id=DOMAIN_LEDGER_ID, view_no=0, pp_sq_no=2, txn_time=10000, has_audit_txn=True) # commit the first batch without audit txns alh.commit_batch(FakeSomething()) assert alh.ledger.uncommitted_size == uncommited_size_before + 1 assert alh.ledger.size == size_before
def replica_service(replica_service): write_manager = replica_service._write_manager future_primaries_handler = FuturePrimariesBatchHandler( write_manager.database_manager, FakeSomething(nodeReg={}, nodeIds=[])) future_primaries_handler._get_primaries = lambda *args, **kwargs: replica_service._data.primaries write_manager.register_batch_handler(future_primaries_handler) return replica_service
def fake_monitor(tconf): latencies = [] instances = Instances() for i in range(NUM_OF_REPLICAS): latencies.append(LatencyMeasurement()) instances.add() monitor = FakeSomething( instances=instances, Omega=tconf.OMEGA, clientAvgReqLatencies=latencies, latency_avg_strategy_cls=MedianHighStrategy, ) monitor.getLatencies = functools.partial(Monitor.getLatencies, monitor) monitor.isMasterAvgReqLatencyTooHigh = functools.partial( Monitor.isMasterAvgReqLatencyTooHigh, monitor) return monitor
def test_init_state_from_ledger(write_manager, db_manager, constraint_serializer, prepare_request): reset_state(db_manager, CONFIG_LEDGER_ID) req_count = 1 action, constraint, request = prepare_request txn = reqToTxn(request) txn[TXN_METADATA][TXN_METADATA_SEQ_NO] = 1 """Add txn to ledger""" db_manager.get_ledger(CONFIG_LEDGER_ID).appendTxns([txn]) db_manager.get_ledger(CONFIG_LEDGER_ID).commitTxns(req_count) # ToDo: ugly fix... Refactor this on pluggable req handler integration phase init_state_from_ledger = functools.partial( LedgersBootstrap._init_state_from_ledger, FakeSomething( db_manager=db_manager, write_manager=write_manager, _update_txn_with_extra_data=lambda txn: txn)) """Check that txn is not exist in state""" assert db_manager.get_state(CONFIG_LEDGER_ID).get(config.make_state_path_for_auth_rule(action.get_action_id()), isCommitted=False) is None assert db_manager.get_state(CONFIG_LEDGER_ID).get(config.make_state_path_for_auth_rule(action.get_action_id()), isCommitted=True) is None txns_from_ledger = [t for t in db_manager.get_ledger(CONFIG_LEDGER_ID).getAllTxn()] """Check, that txn exist in ledger""" assert len(txns_from_ledger) == 1 assert get_payload_data(txns_from_ledger[0][1]) == get_payload_data(txn) """Emulating node starting""" init_state_from_ledger(CONFIG_LEDGER_ID) """Check that txn was added into state""" from_state = db_manager.get_state(CONFIG_LEDGER_ID) .get( config.make_state_path_for_auth_rule(action.get_action_id()), isCommitted=True) assert constraint_serializer.deserialize(from_state) == constraint
def test_commit_one_batch(alh, db_manager, initial_domain_size, initial_pool_size, initial_config_size, initial_seq_no): size_before = alh.ledger.size digest = '123/0digest' do_apply_audit_txn(alh, txns_count=7, ledger_id=DOMAIN_LEDGER_ID, view_no=3, pp_sq_no=35, txn_time=11111, digest=digest) txn_root_hash = db_manager.get_ledger(DOMAIN_LEDGER_ID).uncommitted_root_hash state_root_hash = db_manager.get_state(DOMAIN_LEDGER_ID).headHash pool_txn_root_hash = db_manager.get_ledger(POOL_LEDGER_ID).uncommitted_root_hash pool_state_root_hash = db_manager.get_state(POOL_LEDGER_ID).headHash alh.commit_batch(FakeSomething()) assert alh.ledger.uncommitted_size == alh.ledger.size assert alh.ledger.size == size_before + 1 check_audit_txn(txn=alh.ledger.get_last_committed_txn(), view_no=3, pp_seq_no=35, seq_no=initial_seq_no + 1, txn_time=11111, txn_roots={ DOMAIN_LEDGER_ID: txn_root_hash, POOL_LEDGER_ID: pool_txn_root_hash }, state_roots={ DOMAIN_LEDGER_ID: state_root_hash, POOL_LEDGER_ID: pool_state_root_hash }, pool_size=initial_pool_size, domain_size=initial_domain_size + 7, config_size=initial_config_size, last_pool_seqno=None, last_domain_seqno=None, last_config_seqno=None, primaries=DEFAULT_PRIMARIES, digest=digest, node_reg=DEFAULT_NODE_REG)
def nym_handler(tconf): data_manager = DatabaseManager() handler = NymHandler(tconf, data_manager) state = PruningState(KeyValueStorageInMemory()) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) return handler
def orderer( _orderer, is_primary, ): # ToDo: For now, future_primary_handler is depended from the node. # And for now we need to patching set_node_state functionality write_manager = _orderer._write_manager future_primaries_handler = FuturePrimariesBatchHandler( write_manager.database_manager, FakeSomething(nodeReg={}, nodeIds=[], primaries=_orderer._data.primaries)) write_manager.register_batch_handler(future_primaries_handler) _orderer._validator = OrderingServiceMsgValidator(_orderer._data) _orderer.name = 'Alpha:0' _orderer._data.primary_name = 'some_node:0' if not is_primary else orderer.name def _apply_and_validate_applied_pre_prepare_fake(pp, sender): global applied_pre_prepares applied_pre_prepares += 1 _orderer._can_process_pre_prepare = lambda pp, sender: None _orderer._apply_and_validate_applied_pre_prepare = _apply_and_validate_applied_pre_prepare_fake return _orderer
def test_node(test_node): test_node.view_changer = FakeSomething(view_change_in_progress=True, view_no=1) test_node.init_config_req_handler = lambda: TestConfigReqHandler(test_node.configLedger, test_node.states[CONFIG_LEDGER_ID]) test_node.register_req_handler(test_node.init_config_req_handler(), CONFIG_LEDGER_ID) return test_node
def fake_view_changer(request, tconf): node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) monitor = FakeSomething(isMasterDegraded=lambda: False, ) node = FakeSomething( name="SomeNode", viewNo=request.param, quorums=Quorums(getValueFromModule(request, 'nodeCount', default=4)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c: print(b), ) view_changer = ViewChanger(node) return view_changer
def test_send_message_to_incorrect_replica(test_node): frm = "frm" msg = FakeSomething(instId=100000) test_node.sendToReplica(msg, frm) assert len(test_node.replicas) > 1 for r in test_node.replicas.values(): assert (msg, frm) not in r.inBox
def test_send_message_for_all_without_inst_id(test_node): frm = "frm" msg = FakeSomething() test_node.sendToReplica(msg, frm) assert len(test_node.replicas) > 1 for r in test_node.replicas.values(): assert (msg, frm) in r.inBox
def node(requests): return FakeSomething(requests=requests, propagates_phase_req_timeout=3600, ordering_phase_req_timeout=3600, propagates_phase_req_timeouts=0, ordering_phase_req_timeouts=0, _clean_req_from_verified=lambda *args, **kwargs: True, doneProcessingReq=lambda *args, **kwargs: True)
def test_taa_acceptance_required_default(): assert LedgerInfo( 0, FakeSomething(), preCatchupStartClbk=None, postCatchupCompleteClbk=None, postTxnAddedToLedgerClbk=None, verifier=None ).taa_acceptance_required == True
def __init__(self, tmpdir, config=None): self.basedirpath = tmpdir self.name = 'Node1' self.db_manager = DatabaseManager() self.timer = QueueTimer() self.f = 1 self.replicas = dict() self.requests = Requests() self.rank = None self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4'] self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames} self.nodeIds = [] self.totalNodes = len(self.allNodeNames) self.mode = Mode.starting self.config = config or getConfigOnce() self.nodeStatusDB = None self.quorums = Quorums(self.totalNodes) self.nodestack = FakeSomething(connecteds=set(self.allNodeNames)) self.write_manager = FakeSomething() self.replicas = { 0: Replica(node=self, instId=0, isMaster=True, config=self.config), 1: Replica(node=self, instId=1, isMaster=False, config=self.config), 2: Replica(node=self, instId=2, isMaster=False, config=self.config) } self.requiredNumberOfInstances = 2 self._found = False self.ledgerManager = LedgerManager(self) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.view_changer = create_view_changer(self) self.primaries_selector = RoundRobinPrimariesSelector() self.metrics = NullMetricsCollector() # For catchup testing self.catchup_rounds_without_txns = 0 self.view_change_in_progress = False self.ledgerManager.last_caught_up_3PC = (0, 0) self.master_last_ordered_3PC = (0, 0) self.seqNoDB = {} # callbacks self.onBatchCreated = lambda self, *args, **kwargs: True
def write_auth_req_validator(idr_cache): validator = WriteRequestValidator( config=FakeSomething(authPolicy=LOCAL_AUTH_POLICY, ANYONE_CAN_WRITE=False), auth_map=auth_map, cache=idr_cache, anyone_can_write_map=anyone_can_write_map) return validator
def test_write_request_manager_chain_of_responsib_batch( write_req_manager: WriteRequestManager, three_pc_batch, db): write_req_manager.batch_handlers[DOMAIN_LEDGER_ID] = [] handlers = write_req_manager.batch_handlers[DOMAIN_LEDGER_ID] check_list = [FakeSomething(), FakeSomething(), FakeSomething()] def modify_check_list(): assert not all(check.check_field for check in check_list) f_check = next(check for check in check_list if check.check_field is False) f_check.check_field = True def modify_check_list_post_apply(batch, prev_result): modify_check_list() return 1, 1, 1 def modify_check_list_commit(batch, prev_handler_result=None): modify_check_list() return 1, 1, 1 def modify_check_list_post_rejected(lid, prev_result): modify_check_list() return 1, 1, 1 for i in range(3): handler = DomainBatchHandler(db) handler.post_batch_applied = modify_check_list_post_apply handler.commit_batch = modify_check_list_commit handler.post_batch_rejected = modify_check_list_post_rejected handlers.append(handler) for check in check_list: check.check_field = False write_req_manager.post_apply_batch(three_pc_batch) assert all(check.check_field for check in check_list) for check in check_list: check.check_field = False write_req_manager.commit_batch(three_pc_batch) assert all(check.check_field for check in check_list) for check in check_list: check.check_field = False write_req_manager.post_batch_rejected(three_pc_batch.ledger_id) assert all(check.check_field for check in check_list)
def write_auth_req_validator(idr_cache, constraint_serializer, config_state): validator = WriteRequestValidator( config=FakeSomething(authPolicy=CONFIG_LEDGER_AUTH_POLICY), auth_map=auth_map, cache=idr_cache, config_state=config_state, state_serializer=constraint_serializer) return validator
def test_register_database_no_state(database_manager: DatabaseManager): db_id_1 = 1 db_led_1 = FakeSomething() db_state_1 = FakeSomething() database_manager.register_new_database(db_id_1, db_led_1, db_state_1) assert db_id_1 in database_manager._ledgers assert db_id_1 in database_manager._states assert database_manager._ledgers[db_id_1] == db_led_1 assert database_manager._states[db_id_1] == db_state_1 db_id_2 = 2 db_led_2 = FakeSomething() database_manager.register_new_database(db_id_2, db_led_2) assert db_id_2 in database_manager._ledgers assert db_id_2 not in database_manager._states assert database_manager._ledgers[db_id_2] == db_led_2
def bls_bft_replica(): return FakeSomething(gc=lambda *args, **kwargs: True, validate_pre_prepare=lambda *args, **kwargs: None, update_prepare=lambda params, lid: params, process_prepare=lambda *args, **kwargs: None, process_pre_prepare=lambda *args, **kwargs: None, validate_prepare=lambda *args, **kwargs: None, update_commit=lambda params, pre_prepare: params, process_commit=lambda *args, **kwargs: None)
def revoc_reg_entry_handler(db_manager): class Validator: def __init__(self, state): pass def validate(self, current_entry, request): pass def get_revocation_strategy(type): return Validator def get_current_revoc_entry_and_revoc_def(author_did, revoc_reg_def_id, req_id): return True, {VALUE: {ISSUANCE_TYPE: ISSUANCE_BY_DEFAULT}} f = FakeSomething() f.get_current_revoc_entry_and_revoc_def = get_current_revoc_entry_and_revoc_def return RevocRegEntryHandler(db_manager, f, get_revocation_strategy)
def replica_service(replica_service): write_manager = replica_service._write_manager future_primaries_handler = FuturePrimariesBatchHandler( write_manager.database_manager, FakeSomething(nodeReg={}, nodeIds=[], primaries=replica_service._data.primaries)) write_manager.register_batch_handler(future_primaries_handler) return replica_service
def node(): n = FakeSomething() n.new_future_primaries_needed = False n.requests = {'a': ReqState(Request(operation={TARGET_NYM: 'nym7', TXN_TYPE: NODE, DATA: { SERVICES: ['VALIDATOR'], ALIAS: 'n7'} } ))} n.nodeReg = {'n1': 1, 'n2': 1, 'n3': 1, 'n4': 1, 'n5': 1, 'n6': 1} n.primaries = ['n1', 'n2'] n.nodeIds = n.nodeReg n.primaries_selector = FakeSomething() n.primaries_selector.select_primaries = lambda view_no, instance_count, validators: ['n1', 'n2'] n.viewNo = 0 return n
def test_register_store(database_manager: DatabaseManager): store_label = 'aaa' store = FakeSomething() assert database_manager.get_store(store_label) == None database_manager.register_new_store(store_label, store) assert database_manager.get_store(store_label) == store
def test_primary_names_cleaning(tconf): node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=0, utc_epoch=get_utc_epoch, get_validators=lambda: [], db_manager=DatabaseManager(), requests=[], mode=Mode.participating, timer=QueueTimer(), quorums=Quorums(4), write_manager=None, poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []), primaries_selector=RoundRobinConstantNodesPrimariesSelector( ["Alpha", "Beta", "Gamma", "Delta"])) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica.primaryName = "Node1:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node2:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0"), (1, "Node2:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node3:0" assert list(replica.primaryNames.items()) == \ [(1, "Node2:0"), (2, "Node3:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node4:0" assert list(replica.primaryNames.items()) == \ [(2, "Node3:0"), (3, "Node4:0")]
def test_apply_revert_commit(alh, db_manager, initial_domain_size, initial_pool_size, initial_config_size, initial_seq_no): size_before = alh.ledger.size # apply 2 batches do_apply_audit_txn(alh, txns_count=7, ledger_id=DOMAIN_LEDGER_ID, view_no=3, pp_sq_no=35, txn_time=11111) txn_root_hash_1 = db_manager.get_ledger( DOMAIN_LEDGER_ID).uncommitted_root_hash state_root_hash_1 = db_manager.get_state(DOMAIN_LEDGER_ID).headHash txn_root_hash_2 = db_manager.get_ledger( POOL_LEDGER_ID).uncommitted_root_hash state_root_hash_2 = db_manager.get_state(POOL_LEDGER_ID).headHash do_apply_audit_txn(alh, txns_count=15, ledger_id=POOL_LEDGER_ID, view_no=3, pp_sq_no=36, txn_time=11112) # reject 2d batch alh.post_batch_rejected(POOL_LEDGER_ID) assert alh.ledger.uncommitted_size == alh.ledger.size + 1 assert alh.ledger.size == size_before # commit 1st batch alh.commit_batch(FakeSomething()) assert alh.ledger.uncommitted_size == alh.ledger.size assert alh.ledger.size == size_before + 1 check_audit_txn(txn=alh.ledger.get_last_committed_txn(), view_no=3, pp_seq_no=35, seq_no=initial_seq_no + 1, txn_time=11111, txn_roots={ DOMAIN_LEDGER_ID: txn_root_hash_1, POOL_LEDGER_ID: txn_root_hash_2 }, state_roots={ DOMAIN_LEDGER_ID: state_root_hash_1, POOL_LEDGER_ID: state_root_hash_2 }, pool_size=initial_pool_size, domain_size=initial_domain_size + 7, config_size=initial_config_size, last_pool_seqno=None, last_domain_seqno=None, last_config_seqno=None, primaries=DEFAULT_PRIMARIES, node_reg=DEFAULT_NODE_REG)
def replica(tconf, request): node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) node = FakeSomething(name="fake node", ledger_ids=[0], viewNo=request.param, quorums=Quorums( getValueFromModule(request, 'nodeCount', default=4)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch()) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, isMaster=False, config=tconf, bls_bft_replica=bls_bft_replica) return replica
def test_register_database(database_manager: DatabaseManager): db_id = 1 db_led = FakeSomething() db_state = FakeSomething() assert database_manager.get_database(db_id) is None assert database_manager.get_ledger(db_id) is None assert database_manager.get_state(db_id) is None database_manager.register_new_database(db_id, db_led, db_state) with pytest.raises(LogicError, match='Trying to add already existing database'): database_manager.register_new_database(db_id, FakeSomething(), FakeSomething()) assert database_manager.get_database(db_id).ledger == db_led assert database_manager.get_database(db_id).state == db_state assert database_manager.get_ledger(db_id) == db_led assert database_manager.get_state(db_id) == db_state