def nym_handler(tconf): data_manager = DatabaseManager() handler = NymHandler(tconf, data_manager) state = PruningState(KeyValueStorageInMemory()) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) return handler
def __init__(self, viewNo, quorums, ledger_ids): node_names = ["Alpha", "Beta", "Gamma", "Delta"] node_stack = FakeSomething(name="fake stack", connecteds=set(node_names)) self.replicas = [] self.viewNo = viewNo audit_ledger = FakeSomething(size=0, get_last_txn=lambda *args: None, getAllTxn=lambda *args, **kwargs: []) db_manager = DatabaseManager() db_manager.register_new_database(AUDIT_LEDGER_ID, audit_ledger) super().__init__(name="fake node", ledger_ids=ledger_ids, _viewNo=viewNo, quorums=quorums, nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), mode=Mode.participating, view_change_in_progress=False, requests=Requests(), onBatchCreated=lambda self, *args, **kwargs: True, applyReq=lambda self, *args, **kwargs: True, primaries=[], get_validators=lambda: [], db_manager=db_manager, write_manager=FakeSomething( database_manager=db_manager, apply_request=lambda req, cons_time: None, future_primary_handler=FakeSomething( primaries={}, get_primaries=lambda *args: [])), timer=QueueTimer(), poolManager=FakeSomething( node_names_ordered_by_rank=lambda: node_names))
def db_manager(tconf): _db_manager = DatabaseManager() storage = initKeyValueStorage( KeyValueStorageType.Memory, None, "tokenInMemoryStore", txn_serializer=serialization.multi_sig_store_serializer) ledger = get_fake_ledger() def commit_txns(count): ledger.committed_root_hash = ledger.uncommitted_root_hash return None, [1] ledger.commitTxns = commit_txns ledger.root_hash = txn_root_serializer.serialize("1") ledger.uncommitted_root_hash = "1" ledger.uncommitted_size = 1 ledger.size = 0 ledger.discardTxns = lambda x: None ledger.committed_root_hash = "-1" ledger.append_txns_metadata = lambda txns, txn_time: [ append_txn_metadata(txn, 2, txn_time, 2) for txn in txns ] ledger.appendTxns = lambda x: (None, x) _db_manager.register_new_database(TOKEN_LEDGER_ID, ledger, PruningState(storage)) return _db_manager
def node_handler(): data_manager = DatabaseManager() bls = FakeSomething() handler = NodeHandler(data_manager, bls) state = PruningState(KeyValueStorageInMemory()) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) return handler
def test_register_store(database_manager: DatabaseManager): store_label = 'aaa' store = FakeSomething() assert database_manager.get_store(store_label) == None database_manager.register_new_store(store_label, store) assert database_manager.get_store(store_label) == store
def nym_handler(tconf): data_manager = DatabaseManager() handler = NymHandler(tconf, data_manager) state = State() state.txn_list = {} state.get = lambda key, isCommitted: state.txn_list.get(key, None) state.set = lambda key, value: state.txn_list.update({key: value}) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) return handler
def db_manager(tconf, tdir, idr_cache): db_manager = DatabaseManager() db_manager.register_new_store(IDR_CACHE_LABEL, idr_cache) db_manager.register_new_database(DOMAIN_LEDGER_ID, get_fake_ledger(), PruningState(KeyValueStorageInMemory())) db_manager.register_new_database(CONFIG_LEDGER_ID, get_fake_ledger(), PruningState(KeyValueStorageInMemory())) db_manager.register_new_database(POOL_LEDGER_ID, get_fake_ledger(), PruningState(KeyValueStorageInMemory())) return db_manager
def node_handler(): data_manager = DatabaseManager() bls = FakeSomething() handler = NodeHandler(data_manager, bls) state = State() state.txn_list = {} state.get = lambda key, is_committed: state.txn_list.get(key, None) state.set = lambda key, value: state.txn_list.update({key: value}) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) return handler
def test_common_stores(database_manager: DatabaseManager): common_stores = [BLS_LABEL, TS_LABEL, IDR_CACHE_LABEL, ATTRIB_LABEL] assert database_manager.bls_store is None assert database_manager.ts_store is None assert database_manager.idr_cache is None assert database_manager.attribute_store is None for label in common_stores: database_manager.register_new_store(label, FakeSomething()) assert database_manager.bls_store is not None assert database_manager.ts_store is not None assert database_manager.idr_cache is not None assert database_manager.attribute_store is not None
def test_ordered_cleaning(tconf): global_view_no = 2 node = FakeSomething(name="fake node", ledger_ids=[0], viewNo=global_view_no, utc_epoch=get_utc_epoch, get_validators=lambda: [], internal_bus=InternalBus(), db_manager=DatabaseManager()) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica._consensus_data.view_no = global_view_no total = [] num_requests_per_view = 3 for viewNo in range(global_view_no + 1): for seqNo in range(num_requests_per_view): reqId = viewNo, seqNo replica.addToOrdered(*reqId) total.append(reqId) # gc is called after stable checkpoint, since no request executed # in this test starting it manually replica._gc(100) # Requests with view lower then previous view # should not be in ordered assert len(replica.ordered) == len(total[num_requests_per_view:])
def __init__(self, data: ConsensusSharedData, timer: TimerService, bus: InternalBus, network: ExternalBus, db_manager: DatabaseManager, stasher: StashingRouter, is_master_degraded: Callable[[], bool], metrics: MetricsCollector = NullMetricsCollector()): self._data = data self._timer = timer self._bus = bus self._network = network self._stasher = stasher self._is_master_degraded = is_master_degraded self.metrics = metrics self._config = getConfig() self._instance_changes = \ InstanceChangeProvider(outdated_ic_interval=self._config.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL, node_status_db=db_manager.get_store(NODE_STATUS_DB_LABEL), time_provider=timer.get_current_time) self._subscription = Subscription() self._subscription.subscribe(bus, VoteForViewChange, self.process_vote_for_view_change) self._subscription.subscribe(bus, NewViewAccepted, self.process_new_view_accepted) self._subscription.subscribe(stasher, InstanceChange, self.process_instance_change)
def __init__(self, viewNo, quorums, ledger_ids): node_stack = FakeSomething( name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) self.replicas = [] self.viewNo = viewNo super().__init__(name="fake node", ledger_ids=ledger_ids, _viewNo=viewNo, quorums=quorums, nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), mode=Mode.participating, view_change_in_progress=False, pre_view_change_in_progress=False, requests=Requests(), onBatchCreated=lambda self, *args, **kwargs: True, applyReq=lambda self, *args, **kwargs: True, primaries=[], get_validators=lambda: [], db_manager=None, internal_bus=InternalBus(), write_manager=FakeSomething( database_manager=DatabaseManager(), apply_request=lambda req, cons_time: None), timer=QueueTimer())
def ledgers_freeze_handler(tconf, domain_state, audit_ledger): data_manager = DatabaseManager() handler = LedgersFreezeHandler(data_manager) state = PruningState(KeyValueStorageInMemory()) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) data_manager.register_new_database(DOMAIN_LEDGER_ID, FakeSomething(), domain_state) data_manager.register_new_database(AUDIT_LEDGER_ID, audit_ledger) return handler
def test_register_database_no_state(database_manager: DatabaseManager): db_id_1 = 1 db_led_1 = FakeSomething() db_state_1 = FakeSomething() database_manager.register_new_database(db_id_1, db_led_1, db_state_1) assert db_id_1 in database_manager._ledgers assert db_id_1 in database_manager._states assert database_manager._ledgers[db_id_1] == db_led_1 assert database_manager._states[db_id_1] == db_state_1 db_id_2 = 2 db_led_2 = FakeSomething() database_manager.register_new_database(db_id_2, db_led_2) assert db_id_2 in database_manager._ledgers assert db_id_2 not in database_manager._states assert database_manager._ledgers[db_id_2] == db_led_2
def test_register_database(database_manager: DatabaseManager): db_id = 1 db_led = FakeSomething() db_state = FakeSomething() assert database_manager.get_database(db_id) is None assert database_manager.get_ledger(db_id) is None assert database_manager.get_state(db_id) is None database_manager.register_new_database(db_id, db_led, db_state) with pytest.raises(LogicError, match='Trying to add already existing database'): database_manager.register_new_database(db_id, FakeSomething(), FakeSomething()) assert database_manager.get_database(db_id).ledger == db_led assert database_manager.get_database(db_id).state == db_state assert database_manager.get_ledger(db_id) == db_led assert database_manager.get_state(db_id) == db_state
def view_change_trigger_service(internal_bus, external_bus, timer, stasher, validators): # TODO: Use validators fixture data = ConsensusSharedData("some_name", genNodeNames(4), 0) data.node_mode = Mode.participating data.node_status = Status.started return ViewChangeTriggerService(data=data, timer=timer, bus=internal_bus, network=external_bus, db_manager=DatabaseManager(), stasher=stasher, is_master_degraded=lambda: False)
def create_test_write_req_manager(name: str, genesis_txns: List) -> WriteRequestManager: db_manager = DatabaseManager() write_manager = WriteRequestManager(db_manager) read_manager = ReadRequestManager() register_test_handler(write_manager) db_manager.register_new_store(SEQ_NO_DB_LABEL, ReqIdrToTxn(KeyValueStorageInMemory())) bootstrap = TestLedgersBootstrap(write_req_manager=write_manager, read_req_manager=read_manager, action_req_manager=FakeSomething(), name=name, config=getConfig(), ledger_ids=Node.ledger_ids) bootstrap.set_genesis_transactions( [txn for txn in genesis_txns if get_type(txn) == NODE], [txn for txn in genesis_txns if get_type(txn) == NYM]) bootstrap.init() return write_manager
def __init__(self, tmpdir, config=None): node_names = ['Node1', 'Node2', 'Node3', 'Node4'] self.basedirpath = tmpdir self.name = node_names[0] self.viewNo = 0 self.db_manager = DatabaseManager() self.timer = QueueTimer() self.f = 1 self.replicas = dict() self.requests = Requests() self.rank = None self.allNodeNames = node_names self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames} self.nodeIds = [] self.totalNodes = len(self.allNodeNames) self.poolManager = FakeSomething( node_names_ordered_by_rank=lambda: node_names) self.mode = Mode.starting self.monitor = FakeSomething(isMasterDegraded=lambda: False) self.config = config or getConfigOnce() self.nodeStatusDB = None self.quorums = Quorums(self.totalNodes) self.nodestack = FakeSomething(connecteds=set(self.allNodeNames)) self.write_manager = FakeSomething( node_reg_handler=NodeRegHandler(self.db_manager)) self.primaries_selector = RoundRobinConstantNodesPrimariesSelector( node_names) self.replicas = { 0: Replica(node=self, instId=0, isMaster=True, config=self.config), 1: Replica(node=self, instId=1, isMaster=False, config=self.config), 2: Replica(node=self, instId=2, isMaster=False, config=self.config) } self.requiredNumberOfInstances = 2 self._found = False self.ledgerManager = LedgerManager(self) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.metrics = NullMetricsCollector() # For catchup testing self.view_change_in_progress = False self.ledgerManager.last_caught_up_3PC = (0, 0) self.master_last_ordered_3PC = (0, 0) self.seqNoDB = {} # callbacks self.onBatchCreated = lambda self, *args, **kwargs: True
def txn_author_agreement_disable_handler(tconf, domain_state, config_state): data_manager = DatabaseManager() handler = TxnAuthorAgreementDisableHandler(data_manager) data_manager.register_new_database(handler.ledger_id, FakeSomething(), config_state) data_manager.register_new_database(DOMAIN_LEDGER_ID, FakeSomething(), domain_state) return handler
def txn_author_agreement_aml_handler(tconf, domain_state): data_manager = DatabaseManager() handler = TxnAuthorAgreementAmlHandler(data_manager, FakeSomething()) state = PruningState(KeyValueStorageInMemory()) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) data_manager.register_new_database(DOMAIN_LEDGER_ID, FakeSomething(), domain_state) return handler
def database_manager(tdir_for_func): db = DatabaseManager() db.register_new_database( LEDGER_ID, Ledger(CompactMerkleTree(), dataDir=tdir_for_func), PruningState(KeyValueStorageRocksdb(tdir_for_func, 'kv1'))) db.register_new_store( TS_LABEL, StateTsDbStorage('test', {1: KeyValueStorageRocksdb(tdir_for_func, 'kv2')})) return db
def _service(name): data = consensus_data(name) data.node_mode = Mode.participating digest = cp_digest(DEFAULT_STABLE_CHKP) cp = Checkpoint(instId=0, viewNo=initial_view_no, seqNoStart=0, seqNoEnd=DEFAULT_STABLE_CHKP, digest=digest) data.checkpoints.append(cp) ViewChangeTriggerService(data=data, timer=timer, bus=internal_bus, network=external_bus, db_manager=DatabaseManager(), stasher=stasher, is_master_degraded=lambda: False) primaries_selector = RoundRobinConstantNodesPrimariesSelector(validators) service = ViewChangeService(data, timer, internal_bus, external_bus, stasher, primaries_selector) return service
def create_test_write_req_manager(name: str, genesis_txns: List) -> WriteRequestManager: db_manager = DatabaseManager() write_manager = WriteRequestManager(db_manager) read_manager = ReadRequestManager() bootstrap = TestLedgersBootstrap(write_req_manager=write_manager, read_req_manager=read_manager, action_req_manager=FakeSomething(), name=name, config=getConfig(), ledger_ids=Node.ledger_ids) bootstrap.set_genesis_transactions( [txn for txn in genesis_txns if get_type(txn) == NODE], [txn for txn in genesis_txns if get_type(txn) == NYM]) bootstrap.init() return write_manager
def db_manager(tconf, tdir): db_manager = DatabaseManager() name = 'name' idr_cache = IdrCache( name, initKeyValueStorage(KeyValueStorageType.Rocksdb, tdir, tconf.idrCacheDbName, db_config=tconf.db_idr_cache_db_config)) db_manager.register_new_store('idr', idr_cache) db_manager.register_new_database(DOMAIN_LEDGER_ID, get_fake_ledger(), State()) return db_manager
def __init__(self, tmpdir, config=None): self.basedirpath = tmpdir self.name = 'Node1' self.internal_bus = InternalBus() self.db_manager = DatabaseManager() self.timer = QueueTimer() self.f = 1 self.replicas = dict() self.requests = Requests() self.rank = None self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4'] self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames} self.nodeIds = [] self.totalNodes = len(self.allNodeNames) self.mode = Mode.starting self.config = config or getConfigOnce() self.nodeStatusDB = None self.replicas = { 0: Replica(node=self, instId=0, isMaster=True, config=self.config), 1: Replica(node=self, instId=1, isMaster=False, config=self.config), 2: Replica(node=self, instId=2, isMaster=False, config=self.config), } self._found = False self.ledgerManager = LedgerManager(self) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.view_changer = create_view_changer(self) self.elector = PrimarySelector(self) self.metrics = NullMetricsCollector() # For catchup testing self.catchup_rounds_without_txns = 0 self.view_change_in_progress = False self.ledgerManager.last_caught_up_3PC = (0, 0) self.master_last_ordered_3PC = (0, 0) self.seqNoDB = {} # callbacks self.onBatchCreated = lambda self, *args, **kwargs: True
def txn_author_agreement_aml_handler(tconf, domain_state): data_manager = DatabaseManager() handler = TxnAuthorAgreementAmlHandler(data_manager, FakeSomething()) state = State() state.txn_list = {} state.get = lambda key, isCommitted=False: state.txn_list.get(key, None) state.set = lambda key, value, isCommitted=False: state.txn_list.update( {key: value}) data_manager.register_new_database(handler.ledger_id, FakeSomething(), state) data_manager.register_new_database(DOMAIN_LEDGER_ID, FakeSomething(), domain_state) return handler
def test_primary_names_cleaning(tconf): node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=0, utc_epoch=get_utc_epoch, get_validators=lambda: [], db_manager=DatabaseManager(), requests=[], mode=Mode.participating, timer=QueueTimer(), quorums=Quorums(4), write_manager=None, poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []), primaries_selector=RoundRobinConstantNodesPrimariesSelector( ["Alpha", "Beta", "Gamma", "Delta"])) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica.primaryName = "Node1:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node2:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0"), (1, "Node2:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node3:0" assert list(replica.primaryNames.items()) == \ [(1, "Node2:0"), (2, "Node3:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node4:0" assert list(replica.primaryNames.items()) == \ [(2, "Node3:0"), (3, "Node4:0")]
def test_ordered_cleaning(tconf): global_view_no = 2 node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=global_view_no, utc_epoch=get_utc_epoch, get_validators=lambda: [], db_manager=DatabaseManager(), requests=[], mode=Mode.participating, timer=QueueTimer(), quorums=Quorums(4), write_manager=None, poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []), primaries_selector=RoundRobinConstantNodesPrimariesSelector( ["Alpha", "Beta", "Gamma", "Delta"])) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica._consensus_data.view_no = global_view_no total = [] num_requests_per_view = 3 for viewNo in range(global_view_no + 1): for seqNo in range(num_requests_per_view): reqId = viewNo, seqNo replica._ordering_service._add_to_ordered(*reqId) total.append(reqId) # gc is called after stable checkpoint, since no request executed # in this test starting it manually replica._ordering_service.gc(100) # Requests with view lower then previous view # should not be in ordered assert len(replica._ordering_service.ordered) == len( total[num_requests_per_view:])
def test_primary_names_cleaning(tconf): node = FakeSomething(name="fake node", ledger_ids=[0], viewNo=0, utc_epoch=get_utc_epoch, get_validators=lambda: [], internal_bus=InternalBus(), db_manager=DatabaseManager(), requests=[], mode=Mode.participating, timer=QueueTimer(), quorums=Quorums(4), write_manager=None) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica.primaryName = "Node1:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node2:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0"), (1, "Node2:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node3:0" assert list(replica.primaryNames.items()) == \ [(1, "Node2:0"), (2, "Node3:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node4:0" assert list(replica.primaryNames.items()) == \ [(2, "Node3:0"), (3, "Node4:0")]
def db_manager(tconf, tdir): db_manager = DatabaseManager() state = State() state.txn_list = {} state.get = lambda key, isCommitted=True: state.txn_list.get(key, None) state.set = lambda key, value: state.txn_list.update({key: value}) name = 'name' idr_cache = IdrCache(name, initKeyValueStorage(KeyValueStorageType.Rocksdb, tdir, tconf.idrCacheDbName, db_config=tconf.db_idr_cache_db_config)) db_manager.register_new_store(IDR_CACHE_LABEL, idr_cache) db_manager.register_new_database(DOMAIN_LEDGER_ID, get_fake_ledger(), state) return db_manager