def __init__(self, tmpdir, config=None): node_names = ['Node1', 'Node2', 'Node3', 'Node4'] self.basedirpath = tmpdir self.name = node_names[0] self.viewNo = 0 self.db_manager = DatabaseManager() self.timer = QueueTimer() self.f = 1 self.replicas = dict() self.requests = Requests() self.rank = None self.allNodeNames = node_names self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames} self.nodeIds = [] self.totalNodes = len(self.allNodeNames) self.poolManager = FakeSomething( node_names_ordered_by_rank=lambda: node_names) self.mode = Mode.starting self.monitor = FakeSomething(isMasterDegraded=lambda: False) self.config = config or getConfigOnce() self.nodeStatusDB = None self.quorums = Quorums(self.totalNodes) self.nodestack = FakeSomething(connecteds=set(self.allNodeNames)) self.write_manager = FakeSomething( node_reg_handler=NodeRegHandler(self.db_manager)) self.primaries_selector = RoundRobinConstantNodesPrimariesSelector( node_names) self.replicas = { 0: Replica(node=self, instId=0, isMaster=True, config=self.config), 1: Replica(node=self, instId=1, isMaster=False, config=self.config), 2: Replica(node=self, instId=2, isMaster=False, config=self.config) } self.requiredNumberOfInstances = 2 self._found = False self.ledgerManager = LedgerManager(self) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.metrics = NullMetricsCollector() # For catchup testing self.view_change_in_progress = False self.ledgerManager.last_caught_up_3PC = (0, 0) self.master_last_ordered_3PC = (0, 0) self.seqNoDB = {} # callbacks self.onBatchCreated = lambda self, *args, **kwargs: True
def __init__(self, tmpdir): self.basedirpath = tmpdir self.name = 'Node1' self.f = 1 self.replicas = [] self.viewNo = 0 self.rank = None self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4'] self.nodeReg = { name: HA("127.0.0.1", 0) for name in self.allNodeNames } self.totalNodes = len(self.allNodeNames) self.mode = Mode.starting self.replicas = [ Replica(node=self, instId=0, isMaster=True), Replica(node=self, instId=1, isMaster=False), Replica(node=self, instId=2, isMaster=False), ] self._found = False self.ledgerManager = LedgerManager(self, ownedByNode=True) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.view_change_in_progress = True self.propagate_primary = False
def __init__(self, tmpdir): self.basedirpath = tmpdir self.name = 'Node1' self.f = 1 self.replicas = [] self.rank = None self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4'] self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames} self.totalNodes = len(self.allNodeNames) self.mode = Mode.starting self.replicas = [ Replica(node=self, instId=0, isMaster=True), Replica(node=self, instId=1, isMaster=False), Replica(node=self, instId=2, isMaster=False), ] self._found = False self.ledgerManager = LedgerManager(self, ownedByNode=True) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.config = getConfig() # TODO do we need fake object here? self.view_changer = ViewChanger(self) self.elector = PrimarySelector(self)
def validate_multi_signature(state_proof, txnPoolNodeSet): """ Validates multi signature """ multi_signature = state_proof[MULTI_SIGNATURE] if not multi_signature: logger.debug("There is a state proof, but no multi signature") return False participants = multi_signature[MULTI_SIGNATURE_PARTICIPANTS] signature = multi_signature[MULTI_SIGNATURE_SIGNATURE] value = MultiSignatureValue( **(multi_signature[MULTI_SIGNATURE_VALUE])).as_single_value() quorums = Quorums(len(txnPoolNodeSet)) if not quorums.bls_signatures.is_reached(len(participants)): logger.debug("There is not enough participants of " "multi-signature") return False public_keys = [] for node_name in participants: key = next(node.bls_bft.bls_crypto_signer.pk for node in txnPoolNodeSet if node.name == node_name) if key is None: logger.debug("There is no bls key for node {}".format(node_name)) return False public_keys.append(key) _multi_sig_verifier = _create_multi_sig_verifier() return _multi_sig_verifier.verify_multi_sig(signature, value, public_keys)
def __init__(self, tmpdir, config=None): self.basedirpath = tmpdir self.name = 'Node1' self.f = 1 self.replicas = dict() self.requests = [] self.rank = None self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4'] self.nodeReg = { name: HA("127.0.0.1", 0) for name in self.allNodeNames } self.totalNodes = len(self.allNodeNames) self.mode = Mode.starting self.config = config or getConfigOnce() self.replicas = { 0: Replica(node=self, instId=0, isMaster=True, config=self.config), 1: Replica(node=self, instId=1, isMaster=False, config=self.config), 2: Replica(node=self, instId=2, isMaster=False, config=self.config), } self._found = False self.ledgerManager = LedgerManager(self, ownedByNode=True) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.view_changer = ViewChanger(self) self.elector = PrimarySelector(self) self.metrics = NullMetricsCollector()
def fake_view_changer(request, tconf): node_count = 4 node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}, conns={"Alpha", "Beta", "Gamma", "Delta"}) monitor = FakeSomething(isMasterDegraded=lambda: False, areBackupsDegraded=lambda: [], prettymetrics='') node = FakeSomething(name="SomeNode", timer=QueueTimer(), viewNo=request.param, quorums=Quorums( getValueFromModule(request, 'nodeCount', default=node_count)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c, d: print(b), primaries_disconnection_times=[None] * getRequiredInstances(node_count), master_primary_name='Alpha', master_replica=FakeSomething( instId=0, viewNo=request.param, _consensus_data=FakeSomething( view_no=request.param, waiting_for_new_view=False)), nodeStatusDB=None) view_changer = create_view_changer(node) # TODO: This is a hack for tests compatibility, do something better view_changer.node = node return view_changer
def request_CPs_if_needed(self, ledgerId): ledgerInfo = self.getLedgerInfoByType(ledgerId) if ledgerInfo.consistencyProofsTimer is None: return logger.debug("{} requesting consistency " "proofs after timeout".format(self)) quorum = Quorums(self.owner.totalNodes - 1) proofs = ledgerInfo.recvdConsistencyProofs groupedProofs, null_proofs_count = self._groupConsistencyProofs(proofs) if quorum.same_consistency_proof.is_reached(null_proofs_count): return result = self._latestReliableProof(groupedProofs, ledgerInfo.ledger) if not result: ledger_id, start, end = self.get_consistency_proof_request_params( ledgerId, groupedProofs) logger.debug("{} sending consistency proof request: {}".format( self, ledger_id, start, end)) self.owner.request_msg( CONSISTENCY_PROOF, { f.LEDGER_ID.nm: ledger_id, f.SEQ_NO_START.nm: start, f.SEQ_NO_END.nm: end }, self.nodes_to_request_txns_from) ledgerInfo.recvdConsistencyProofs = {} ledgerInfo.consistencyProofsTimer = None ledgerInfo.recvdCatchupRepliesFrm = {}
def expectedClientToPoolRequestDeliveryTime(nodeCount): """ From: the Client send a request To: the request is delivered to f nodes """ qN = Quorums(nodeCount).commit.value return __Peer2PeerRequestExchangeTime * qN
def view_changer(): config = FakeSomething(ViewChangeWindowSize=1, ForceViewChangeFreq=0) node = FakeSomething(name="fake node", ledger_ids=[0], config=config, quorums=Quorums(7)) return view_changer
def __init__(self, ledger_id: int, config: object, input: RxChannel, output: TxChannel, timer: TimerService, metrics: MetricsCollector, provider: CatchupDataProvider): router = Router(input) router.add(LedgerStatus, self.process_ledger_status) router.add(ConsistencyProof, self.process_consistency_proof) self._ledger_id = ledger_id self._ledger = provider.ledger(ledger_id) self._config = config self._output = output self._timer = timer self.metrics = metrics self._provider = provider self._is_working = False self._quorum = Quorums(len(self._provider.all_nodes_names())) self._same_ledger_status = set() self._cons_proofs = {} self._already_asked_for_cons_proofs_without_timeout = False self._last_txn_3PC_key = {} self._ledger_status_timer = \ RepeatingTimer(self._timer, self._config.LedgerStatusTimeout * (len(self._provider.all_nodes_names()) - 1), self._reask_for_ledger_status, active=False) self._consistency_proof_timer = \ RepeatingTimer(self._timer, self._config.ConsistencyProofsTimeout * (len(self._provider.all_nodes_names()) - 1), self._reask_for_last_consistency_proof, active=False)
def fake_view_changer(request, tconf): node_count = 4 node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}, conns={"Alpha", "Beta", "Gamma", "Delta"}) monitor = FakeSomething(isMasterDegraded=lambda: False, areBackupsDegraded=lambda: [], prettymetrics='') node = FakeSomething(name="SomeNode", viewNo=request.param, quorums=Quorums( getValueFromModule(request, 'nodeCount', default=node_count)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c: print(b), primaries_disconnection_times=[None] * getRequiredInstances(node_count), master_primary_name='Alpha', master_replica=FakeSomething(instId=0)) view_changer = ViewChanger(node) return view_changer
def expectedClientCatchupTime(nodeCount): """ From: the Client finished the consistency proof procedure To: the Client finished the catchup procedure """ qN = Quorums(nodeCount).commit.value return qN * 2 * __Peer2PeerRequestExchangeTime + \ config.CatchupTransactionsTimeout
def set_validators(self, validators: List[str]): logger.info("{} updated validators list to {}".format( self.name, validators)) self._validators = validators # TODO: INDY-2263 For some reason test_send_txns_bls_consensus fails without this check if self.quorums is None or self.quorums.n != len(validators): self.quorums = Quorums(len(validators)) self.view_change_votes.update_quorums(self.quorums)
def expectedClientConsistencyProof(nodeCount): """ From: the Client is connected to the Pool To: the Client finished the consistency proof procedure """ qN = Quorums(nodeCount).commit.value return qN * __Peer2PeerRequestExchangeTime + \ config.ConsistencyProofsTimeout
def checkLedgerIsOutOfSync(self, ledgerInfo) -> bool: recvdConsProof = ledgerInfo.recvdConsistencyProofs # Consider an f value when this node had not been added adjustedQuorum = Quorums(self.owner.totalNodes - 1) equal_state_proofs = self.__get_equal_state_proofs_count( recvdConsProof) return not adjustedQuorum.same_consistency_proof.is_reached( equal_state_proofs)
def setPoolParams(self): nodeCount = len(self.nodeReg) self.f = getMaxFailures(nodeCount) self.minNodesToConnect = self.f + 1 self.totalNodes = nodeCount self.quorums = Quorums(nodeCount) logger.info("{} updated its pool parameters: f {}, totalNodes {}," "minNodesToConnect {}, quorums {}".format( self.alias, self.f, self.totalNodes, self.minNodesToConnect, self.quorums))
def replica(tconf, viewNo, inst_id, ledger_ids, mock_timestamp, fake_requests, txn_roots, state_roots, request): node = ReplicaFakeNode(viewNo=viewNo, quorums=Quorums( getValueFromModule(request, 'nodeCount', default=4)), ledger_ids=ledger_ids) bls_bft_replica = FakeSomething( gc=lambda *args: None, update_pre_prepare=lambda params, l_id: params, validate_pre_prepare=lambda a, b: None, validate_prepare=lambda a, b: None, update_prepare=lambda a, b: a, process_prepare=lambda a, b: None, process_pre_prepare=lambda a, b: None, process_order=lambda *args: None) replica = Replica(node, instId=inst_id, isMaster=inst_id == 0, config=tconf, bls_bft_replica=bls_bft_replica, get_current_time=mock_timestamp, get_time_for_3pc_batch=mock_timestamp) node.add_replica(replica) ReplicaFakeNode.master_last_ordered_3PC = replica.last_ordered_3pc replica._ordering_service.last_accepted_pre_prepare_time = replica.get_time_for_3pc_batch( ) replica.primaryName = "Alpha:{}".format(replica.instId) replica.primaryNames[replica.viewNo] = replica.primaryName replica._ordering_service.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[ ledger] replica._ordering_service.get_state_root_hash = lambda ledger, to_str=False: state_roots[ ledger] replica._ordering_service._revert = lambda ledgerId, stateRootHash, reqCount: None replica._ordering_service.post_batch_creation = lambda three_pc_batch: None replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet() replica._ordering_service._get_primaries_for_ordered = lambda pp: [ replica.primaryName ] replica._ordering_service._get_node_reg_for_ordered = lambda pp: [ "Alpha", "Beta", "Gamma", "Delta" ] def reportSuspiciousNodeEx(ex): assert False, ex replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx return replica
def __init__(self, nodes: List[str]): self._message_delay = 1 self._node_names = nodes self._quorum = Quorums(len(nodes)) self._connections = PoolConnections() self._nodes = { name: NodeModel(name, self._node_names, self._connections) for name in self._node_names } self._outbox = CompositeEventStream( *(node.outbox for node in self._nodes.values()))
def start(self, request_ledger_statuses: bool): self._is_working = True self._quorum = Quorums(len(self._provider.all_nodes_names())) self._same_ledger_status = set() self._cons_proofs = {} self._requested_consistency_proof = set() self._last_txn_3PC_key = {} if request_ledger_statuses: self._request_ledger_status_from_nodes() self._schedule_reask_ledger_status()
def __init__(self, name: str, node_names: List[str], connections: PoolConnections): self._name = name self._node_names = node_names self._quorum = Quorums(len(node_names)) self._ts = 0 self._corrupted_name = None self._timer = TimerModel(name) self._connections = connections self._view_changer = create_view_changer(self) self._internal_outbox = ListEventStream() self.outbox = CompositeEventStream(self._internal_outbox, self._timer.outbox()) self._check_performance_timer = RepeatingTimer(self._timer, 30, self._check_performance)
def backup_instance_faulty_processor(tdir, tconf): node = FakeNode(tdir, config=tconf) node.view_change_in_progress = False node.requiredNumberOfInstances = len(node.replicas) node.allNodeNames = ["Node{}".format(i) for i in range(1, (node.requiredNumberOfInstances - 1) * 3 + 2)] node.totalNodes = len(node.allNodeNames) node.quorums = Quorums(node.totalNodes) node.name = node.allNodeNames[0] node.replicas = FakeReplicas(node, node.replicas) node.backup_instance_faulty_processor = BackupInstanceFaultyProcessor(node) node.request_propagates = lambda: True return node.backup_instance_faulty_processor
def _reliableProofs(self, groupedProofs): adjustedQuorum = Quorums(self.owner.totalNodes - 1) result = {} for (start, end), val in groupedProofs.items(): for (view_no, lastPpSeqNo, oldRoot, newRoot, hashes), count in val.items(): if adjustedQuorum.same_consistency_proof.is_reached(count): result[(start, end)] = (view_no, lastPpSeqNo, oldRoot, newRoot, hashes) # There would be only one correct proof for a range of # sequence numbers break return result
def replica(tconf, viewNo, inst_id, request): node = ReplicaFakeNode(viewNo=viewNo, quorums=Quorums(getValueFromModule(request, 'nodeCount', default=4))) bls_bft_replica = FakeSomething( gc=lambda *args: None, update_pre_prepare=lambda params, l_id: params ) replica = Replica( node, instId=inst_id, isMaster=inst_id == 0, config=tconf, bls_bft_replica=bls_bft_replica ) ReplicaFakeNode.master_last_ordered_3PC = replica.last_ordered_3pc return replica
def start(self, request_ledger_statuses: bool): logger.info("{} starts".format(self)) self._is_working = True self._quorum = Quorums(len(self._provider.all_nodes_names())) self._same_ledger_status = set() self._cons_proofs = {} self._already_asked_for_cons_proofs_without_timeout = False self._last_txn_3PC_key = {} if request_ledger_statuses: self._request_ledger_status_from_nodes() self._schedule_reask_ledger_status()
def replica(tconf, viewNo, inst_id, ledger_ids, mock_timestamp, request): node = ReplicaFakeNode(viewNo=viewNo, quorums=Quorums(getValueFromModule(request, 'nodeCount', default=4)), ledger_ids=ledger_ids) bls_bft_replica = FakeSomething( gc=lambda *args: None, update_pre_prepare=lambda params, l_id: params ) replica = Replica( node, instId=inst_id, isMaster=inst_id == 0, config=tconf, bls_bft_replica=bls_bft_replica, get_current_time=mock_timestamp, get_time_for_3pc_batch=mock_timestamp ) ReplicaFakeNode.master_last_ordered_3PC = replica.last_ordered_3pc return replica
def fake_view_changer(request, tconf): node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) monitor = FakeSomething(isMasterDegraded=lambda: False, ) node = FakeSomething( name="SomeNode", viewNo=request.param, quorums=Quorums(getValueFromModule(request, 'nodeCount', default=4)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c: print(b), ) view_changer = ViewChanger(node) return view_changer
def _get_last_txn_3PC_key(self, ledgerInfo): quorum = Quorums(self.owner.totalNodes) quorumed_3PC_keys = \ [ most_common_element for most_common_element, freq in Counter(ledgerInfo.last_txn_3PC_key.values()).most_common() if quorum.ledger_status_last_3PC.is_reached(freq) and most_common_element[0] is not None and most_common_element[1] is not None ] if len(quorumed_3PC_keys) == 0: return None min_quorumed_3PC_key = min_3PC_key(quorumed_3PC_keys) return min_quorumed_3PC_key
def __init__(self, tmpdir, config=None): self.basedirpath = tmpdir self.name = 'Node1' self.internal_bus = InternalBus() self.db_manager = DatabaseManager() self.timer = QueueTimer() self.f = 1 self.replicas = dict() self.requests = Requests() self.rank = None self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4'] self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames} self.nodeIds = [] self.totalNodes = len(self.allNodeNames) self.mode = Mode.starting self.config = config or getConfigOnce() self.nodeStatusDB = None self.replicas = { 0: Replica(node=self, instId=0, isMaster=True, config=self.config), 1: Replica(node=self, instId=1, isMaster=False, config=self.config), 2: Replica(node=self, instId=2, isMaster=False, config=self.config), } self._found = False self.ledgerManager = LedgerManager(self) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.view_changer = create_view_changer(self) self.elector = PrimarySelector(self) self.metrics = NullMetricsCollector() # For catchup testing self.catchup_rounds_without_txns = 0 self.view_change_in_progress = False self.ledgerManager.last_caught_up_3PC = (0, 0) self.master_last_ordered_3PC = (0, 0) self.seqNoDB = {} # callbacks self.onBatchCreated = lambda self, *args, **kwargs: True
def test_primary_names_cleaning(tconf): node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=0, utc_epoch=get_utc_epoch, get_validators=lambda: [], db_manager=DatabaseManager(), requests=[], mode=Mode.participating, timer=QueueTimer(), quorums=Quorums(4), write_manager=None, poolManager=FakeSomething(node_names_ordered_by_rank=lambda: []), primaries_selector=RoundRobinConstantNodesPrimariesSelector( ["Alpha", "Beta", "Gamma", "Delta"])) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica.primaryName = "Node1:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node2:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0"), (1, "Node2:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node3:0" assert list(replica.primaryNames.items()) == \ [(1, "Node2:0"), (2, "Node3:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node4:0" assert list(replica.primaryNames.items()) == \ [(2, "Node3:0"), (3, "Node4:0")]
def lt_eq_gt(request, rs, nvm): without_primaries = [ v for v in rs._data.validators if v != replica_name_to_node_name(rs._data.primary_name) ] quorums = Quorums(len(rs._data.validators)) if request.param == 'lt': for i in range(0, quorums.strong.value - 1): rs._data.new_view_votes.add_new_view(nvm, without_primaries.pop()) return None, rs if request.param == 'eq': for i in range(0, quorums.strong.value): rs._data.new_view_votes.add_new_view(nvm, without_primaries.pop()) return nvm, rs for i in range(0, quorums.strong.value + 1): if without_primaries: rs._data.new_view_votes.add_new_view(nvm, without_primaries.pop()) return nvm, rs