class TxnVersionController(TVController): def __init__(self) -> None: self._versions = SortedDict() self._f = 0 self._votes_for_new_version = SortedDict() @property def version(self): return self._versions.peekitem(-1)[1] if self._versions else None def get_pool_version(self, timestamp): if timestamp is None: return self.version last_version = None for upgrade_tm, version in self._versions.items(): if timestamp < upgrade_tm: return last_version last_version = version return last_version def update_version(self, txn): if get_type(txn) == POOL_UPGRADE and get_payload_data(txn).get(ACTION) == START: N = len(get_payload_data(txn).get(SCHEDULE, {})) self._f = (N - 1) // 3 elif get_type(txn) == NODE_UPGRADE and get_payload_data(txn)[DATA][ACTION] == COMPLETE: version = get_payload_data(txn)[DATA][VERSION] self._votes_for_new_version.setdefault(version, set()) self._votes_for_new_version[version].add(get_from(txn)) if len(self._votes_for_new_version[version]) > self._f: self._versions[get_txn_time(txn)] = version self._votes_for_new_version = SortedDict({v: senders for v, senders in self._votes_for_new_version.items() if v > version})
class TxnVersionController(ITxnVersionController): def __init__(self) -> None: self._version = None self._f = 0 self._votes_for_new_version = SortedDict() @property def version(self): return self._version def update_version(self, txn): if get_type(txn) == POOL_UPGRADE and get_payload_data(txn).get( ACTION) == START: N = len(get_payload_data(txn).get(SCHEDULE, {})) self._f = (N - 1) // 3 elif get_type(txn) == NODE_UPGRADE and get_payload_data( txn)[DATA][ACTION] == COMPLETE: version = get_payload_data(txn)[DATA][VERSION] self._votes_for_new_version.setdefault(version, set()) self._votes_for_new_version[version].add(get_from(txn)) if len(self._votes_for_new_version[version]) > self._f: self._version = version self._votes_for_new_version = SortedDict({ v: senders for v, senders in self._votes_for_new_version.items() if v > version })
def test_update_state(handler_and_request): handler, request = handler_and_request seq_no = 1 txn_time = 1560241033 txn = reqToTxn(request) append_txn_metadata(txn, seq_no, txn_time) op = request.operation handler.update_state(txn, None, context_request) value = { 'id': op[RS_ID], 'rsType': op[RS_TYPE], 'rsName': op[RS_NAME], 'rsVersion': op[RS_VERSION], 'content': op[RS_CONTENT], 'from': request.identifier, 'endorser': request.endorser, 'ver': op[OP_VER], } primary_key = op[RS_ID] secondary_key = "{RS_TYPE}:{RS_NAME}:{RS_VERSION}".format( RS_TYPE=op['rsType'], RS_NAME=op['rsName'], RS_VERSION=op['rsVersion']).encode() value_from_state = handler.get_from_state(primary_key) assert SortedDict(value_from_state[0]) == SortedDict(value) assert value_from_state[1] == seq_no assert value_from_state[2] == txn_time assert handler.state.get(secondary_key, isCommitted=False) == op[RS_ID].encode()
def test_state_proof_returned_for_get_rich_schema_obj_by_metadata( looper, nodeSetWithOneNodeResponding, sdk_wallet_endorser, sdk_pool_handle, sdk_wallet_client, write_rich_schema, txn_type, rs_type, content, rs_id, rs_name, rs_version): """ Tests that state proof is returned in the reply for GET_RICH_SCHEMA_OBJECT_BY_METADATA. Use different submitter and reader! """ get_rich_schema_by_metadata_operation = { TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_METADATA, RS_NAME: rs_name, RS_VERSION: rs_version, RS_TYPE: rs_type } result = sdk_submit_operation_and_get_result( looper, sdk_pool_handle, sdk_wallet_client, get_rich_schema_by_metadata_operation) expected_data = SortedDict({ 'id': rs_id, 'rsType': rs_type, 'rsName': rs_name, 'rsVersion': rs_version, 'content': content, 'from': sdk_wallet_endorser[1] }) assert SortedDict(result['data']) == expected_data assert result['seqNo'] assert result['txnTime'] assert result['state_proof'] check_valid_proof(result)
def __init__(self, data: ConsensusSharedData, bus: InternalBus, network: ExternalBus, stasher: StashingRouter, db_manager: DatabaseManager, old_stasher: ReplicaStasher, is_master=True): self._data = data self._bus = bus self._network = network self._checkpoint_state = SortedDict(lambda k: k[1]) self._stasher = stasher self._is_master = is_master self._validator = CheckpointMsgValidator(self._data) self._db_manager = db_manager # Stashed checkpoints for each view. The key of the outermost # dictionary is the view_no, value being a dictionary with key as the # range of the checkpoint and its value again being a mapping between # senders and their sent checkpoint # Dict[view_no, Dict[(seqNoStart, seqNoEnd), Dict[sender, Checkpoint]]] self._stashed_recvd_checkpoints = {} self._config = getConfig() self._logger = getlogger() self._old_stasher = old_stasher
def test_send_get_rich_schema_obj_by_metadata(looper, sdk_pool_handle, sdk_wallet_endorser, sdk_wallet_client, write_rich_schema, txn_type, rs_type, content, rs_id, rs_name, rs_version): get_rich_schema_by_metadata_operation = { TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_METADATA, RS_NAME: rs_name, RS_VERSION: rs_version, RS_TYPE: rs_type } result = sdk_submit_operation_and_get_result( looper, sdk_pool_handle, sdk_wallet_client, get_rich_schema_by_metadata_operation) expected_data = SortedDict({ 'id': rs_id, 'rsType': rs_type, 'rsName': rs_name, 'rsVersion': rs_version, 'content': json.dumps(content), 'from': sdk_wallet_endorser[1], 'endorser': None, 'ver': None }) assert SortedDict(result['data']) == expected_data assert result['seqNo'] assert result['txnTime'] assert result['state_proof'] check_valid_proof(result)
def test_append_txn_metadata(): txn = init_empty_txn(txn_type=NODE, protocol_version="3") set_payload_data(txn, {"somekey": "somevalue"}) append_payload_metadata(txn, frm="DID1", req_id=12345) append_txn_metadata(txn, seq_no=144, txn_time=12345678, txn_id="dddd") expected = SortedDict({ "reqSignature": {}, "txn": { "data": { "somekey": "somevalue" }, "metadata": { "from": "DID1", "reqId": 12345, }, "protocolVersion": "3", "type": NODE, }, "txnMetadata": { "seqNo": 144, "txnId": "dddd", "txnTime": 12345678, }, "ver": "1" }) assert SortedDict(expected) == SortedDict(txn)
def test_append_payload_metadata(): txn = init_empty_txn(txn_type=NODE, protocol_version="3") set_payload_data(txn, {"somekey": "somevalue"}) append_payload_metadata(txn, frm="DID1", req_id=12345, digest="random req digest", payload_digest="random payload") expected = SortedDict({ "reqSignature": {}, "txn": { "data": { "somekey": "somevalue" }, "metadata": { "from": "DID1", "reqId": 12345, "digest": "random req digest", "payloadDigest": "random payload" }, "protocolVersion": "3", "type": NODE, }, "txnMetadata": {}, "ver": "1" }) assert SortedDict(expected) == SortedDict(txn)
def __init__(self, node, monitor: Monitor, config=None, metrics: MetricsCollector = NullMetricsCollector()): # passing full node because Replica requires it self._node = node self._monitor = monitor self._metrics = metrics self._config = config self._replicas = SortedDict() # type: SortedDict[int, Replica] self._messages_to_replicas = dict() # type: Dict[deque] self.register_monitor_handler()
def __init__(self, node): self.node = node self.view_no = 0 # type: int HasActionQueue.__init__(self) self.inBox = deque() self.outBox = deque() self.inBoxRouter = Router( (InstanceChange, self.process_instance_change_msg), (ViewChangeDone, self.process_vchd_msg) ) self.instanceChanges = InstanceChanges() # The quorum of `ViewChangeDone` msgs is different depending on whether we're doing a real view change, # or just propagating view_no and Primary from `CurrentState` messages sent to a newly joined Node. # TODO: separate real view change and Propagation of Primary # TODO: separate catch-up, view-change and primary selection so that # they are really independent. self.propagate_primary = False # Tracks if other nodes are indicating that this node is in lower view # than others. Keeps a map of view no to senders # TODO: Consider if sufficient ViewChangeDone for 2 different (and # higher views) are received, should one view change be interrupted in # between. self._next_view_indications = SortedDict() self._view_change_in_progress = False self.previous_master_primary = None self.set_defaults() self.initInsChngThrottling() # Action for _schedule instanceChange messages self.instance_change_action = None # Count of instance change rounds self.instance_change_rounds = 0 # Time for view_change_starting self.start_view_change_ts = 0 # Last successful viewNo. # In some cases view_change process can be uncompleted in time. # In that case we want to know, which viewNo was successful (last completed view_change) self.last_completed_view_no = 0 # Force periodic view change if enabled in config force_view_change_freq = node.config.ForceViewChangeFreq if force_view_change_freq > 0: self.startRepeating(self.on_master_degradation, force_view_change_freq)
def update_version(self, txn): if get_type(txn) == POOL_UPGRADE and get_payload_data(txn).get(ACTION) == START: N = len(get_payload_data(txn).get(SCHEDULE, {})) self._f = (N - 1) // 3 elif get_type(txn) == NODE_UPGRADE and get_payload_data(txn)[DATA][ACTION] == COMPLETE: version = get_payload_data(txn)[DATA][VERSION] self._votes_for_new_version.setdefault(version, set()) self._votes_for_new_version[version].add(get_from(txn)) if len(self._votes_for_new_version[version]) > self._f: self._versions[get_txn_time(txn)] = version self._votes_for_new_version = SortedDict({v: senders for v, senders in self._votes_for_new_version.items() if v > version})
def test_init_empty_txn_no_protocol_ver(): txn = init_empty_txn(txn_type=NYM) expected = { "reqSignature": {}, "txn": { "data": {}, "metadata": {}, "type": NYM, "protocolVersion": CURRENT_PROTOCOL_VERSION }, "txnMetadata": {}, "ver": "1" } assert SortedDict(expected) == SortedDict(txn)
def test_init_empty_txn_with_protocol_ver(): txn = init_empty_txn(txn_type=NODE, protocol_version="3") expected = { "reqSignature": {}, "txn": { "data": {}, "metadata": {}, "protocolVersion": "3", "type": NODE, }, "txnMetadata": {}, "ver": "1" } assert SortedDict(expected) == SortedDict(txn)
def test_init_empty_txn_no_protocol_ver(): txn = init_empty_txn(txn_type=NYM) expected = { "reqSignature": {}, "txn": { "data": {}, "metadata": { }, "type": NYM, }, "txnMetadata": { }, "ver": "1" } assert SortedDict(expected) == SortedDict(txn)
def _add_txns_to_ledger(node, looper, sdk_wallet_client, num_txns_in_reply, reply_count): ''' Add txn_count transactions to node's ledger and return ConsistencyProof for all new transactions and list of CatchupReplies :return: ConsistencyProof, list of CatchupReplies ''' txn_count = num_txns_in_reply * reply_count ledger_manager = node.ledgerManager ledger = ledger_manager.ledgerRegistry[ledger_id].ledger catchup_rep_service = ledger_manager._node_leecher._leechers[ledger_id]._catchup_rep_service reqs = sdk_signed_random_requests(looper, sdk_wallet_client, txn_count) # add transactions to ledger for req in reqs: txn = append_txn_metadata(reqToTxn(req), txn_time=12345678) catchup_rep_service._add_txn(txn) # generate CatchupReps replies = [] for i in range(ledger.seqNo - txn_count + 1, ledger.seqNo + 1, num_txns_in_reply): start = i end = i + num_txns_in_reply - 1 cons_proof = ledger_manager._node_seeder._make_consistency_proof(ledger, end, ledger.size) txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[str(seq_no)] = ledger_manager.owner.update_txn_with_extra_data(txn) replies.append(CatchupRep(ledger_id, SortedDict(txns), cons_proof)) three_pc_key = node.three_phase_key_for_txn_seq_no(ledger_id, ledger.seqNo) view_no, pp_seq_no = three_pc_key if three_pc_key else (0, 0) return CatchupTill(start_size=ledger.seqNo - txn_count, final_size=ledger.seqNo, final_hash=Ledger.hashToStr(ledger.tree.merkle_tree_hash(0, ledger.seqNo)), view_no=view_no, pp_seq_no=pp_seq_no), replies
def _add_txns_to_ledger(node, looper, sdk_wallet_client, num_txns_in_reply, reply_count): ''' Add txn_count transactions to node's ledger and return ConsistencyProof for all new transactions and list of CatchupReplies :return: ConsistencyProof, list of CatchupReplies ''' txn_count = num_txns_in_reply * reply_count ledger_manager = node.ledgerManager ledger = ledger_manager.ledgerRegistry[ledger_id].ledger ledger_info = ledger_manager.getLedgerInfoByType(ledger_id) reqs = sdk_signed_random_requests(looper, sdk_wallet_client, txn_count) # add transactions to ledger for req in reqs: txn = append_txn_metadata(reqToTxn(req), txn_time=12345678) ledger_manager._add_txn(ledger_id, ledger, ledger_info, txn) # generate CatchupReps replies = [] for i in range(ledger.seqNo - txn_count + 1, ledger.seqNo + 1, num_txns_in_reply): start = i end = i + num_txns_in_reply - 1 cons_proof = ledger_manager._make_consistency_proof( ledger, end, ledger.size) txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[str( seq_no)] = ledger_manager.owner.update_txn_with_extra_data(txn) replies.append(CatchupRep(ledger_id, SortedDict(txns), cons_proof)) return ledger_manager._buildConsistencyProof(ledger_id, ledger.seqNo - txn_count, ledger.seqNo), replies
def test_req_to_txn_with_seq_no(req_and_expected): req, new_expected = req_and_expected new = SortedDict( append_txn_metadata(reqToTxn(req), seq_no=143, txn_time=2613945121)) new_expected["txnMetadata"]["txnTime"] = 2613945121 new_expected["txnMetadata"]["seqNo"] = 143 assert new == new_expected
def test_set_payload_metadata(): txn = init_empty_txn(txn_type=NODE, protocol_version="3") set_payload_data(txn, {"somekey": "somevalue"}) expected = SortedDict({ "reqSignature": {}, "txn": { "data": { "somekey": "somevalue" }, "metadata": {}, "protocolVersion": "3", "type": NODE, }, "txnMetadata": {}, "ver": "1" }) assert SortedDict(expected) == SortedDict(txn)
def _split(message): txns = list(message.txns.items()) divider = len(message.txns) // 2 left = txns[:divider] left_last_seq_no = left[-1][0] right = txns[divider:] right_last_seq_no = right[-1][0] left_cons_proof = self._make_consistency_proof( ledger, left_last_seq_no, initial_seq_no) right_cons_proof = self._make_consistency_proof( ledger, right_last_seq_no, initial_seq_no) ledger_id = getattr(message, f.LEDGER_ID.nm) left_rep = CatchupRep(ledger_id, SortedDict(left), left_cons_proof) right_rep = CatchupRep(ledger_id, SortedDict(right), right_cons_proof) return left_rep, right_rep
def test_init_empty_txn_with_payload_ver(): txn = init_empty_txn(txn_type=NODE, protocol_version="3", txn_payload_version="10") expected = { "reqSignature": {}, "txn": { "data": {}, "metadata": {}, "protocolVersion": "3", "type": NODE, "ver": "10" }, "txnMetadata": {}, "ver": CURRENT_TXN_VERSION } assert SortedDict(expected) == SortedDict(txn)
def test_get_rich_schema_obj_committed_only(db_manager, handler_and_request, metadata, get_rich_schema_by_meta_handler, get_rich_schema_req): # prepare: store object in state with bls multi-sig, and then update the object (uncommitted) handler, request = handler_and_request rs_name, rs_version, rs_type = metadata op = request.operation op[RS_NAME] = rs_name op[RS_VERSION] = rs_version op[RS_TYPE] = rs_type seq_no = 1 txn_time = 1560241033 txn = reqToTxn(request) append_txn_metadata(txn, seq_no, txn_time) handler.update_state(txn, None, request) handler.state.commit() get_payload_data(txn)[RS_CONTENT] = "new uncommitted content" handler.update_state(txn, None, request) save_multi_sig(db_manager) # execute: get object result = get_rich_schema_by_meta_handler.get_result(get_rich_schema_req) # check assert result expected_data = SortedDict({ 'ver': op[OP_VER], 'id': op[RS_ID], 'rsType': op[RS_TYPE], 'rsName': op[RS_NAME], 'rsVersion': op[RS_VERSION], 'content': op[RS_CONTENT], 'from': request.identifier, 'endorser': request.endorser, }) assert SortedDict(result['data']) == expected_data assert result['seqNo'] == seq_no assert result['txnTime'] == txn_time assert result['state_proof'] check_valid_proof(result) path = op[RS_ID].encode() assert is_proof_verified(db_manager, result['state_proof'], path, result[DATA], seq_no, txn_time)
def __init__(self, node): self.node = node self.view_no = 0 # type: int HasActionQueue.__init__(self) self.inBox = deque() self.outBox = deque() self.inBoxRouter = Router( (InstanceChange, self.process_instance_change_msg), (ViewChangeDone, self.process_vchd_msg) ) self.instanceChanges = InstanceChanges() # The quorum of `ViewChangeDone` msgs is different depending on whether we're doing a real view change, # or just propagating view_no and Primary from `CurrentState` messages sent to a newly joined Node. # TODO: separate real view change and Propagation of Primary # TODO: separate catch-up, view-change and primary selection so that # they are really independent. self.propagate_primary = False # Tracks if other nodes are indicating that this node is in lower view # than others. Keeps a map of view no to senders # TODO: Consider if sufficient ViewChangeDone for 2 different (and # higher views) are received, should one view change be interrupted in # between. self._next_view_indications = SortedDict() self._view_change_in_progress = False self.previous_master_primary = None self.set_defaults() self.initInsChngThrottling() # Action for _schedule instanceChange messages self.instance_change_action = None # Count of instance change rounds self.instance_change_rounds = 0
def process_catchup_req(self, req: CatchupReq, frm: str): logger.info("{} received catchup request: {} from {}".format( self, req, frm)) ledger_id, ledger = self._get_ledger_and_id(req) if ledger is None: self._provider.discard(req, reason="it references invalid ledger", logMethod=logger.warning) return start = req.seqNoStart end = req.seqNoEnd if start > end: self._provider.discard( req, reason= "not able to service since start = {} greater than end = {}". format(start, end), logMethod=logger.debug) return if end > req.catchupTill: self._provider.discard( req, reason= "not able to service since end = {} greater than catchupTill = {}" .format(end, req.catchupTill), logMethod=logger.debug) return if req.catchupTill > ledger.size: self._provider.discard( req, reason= "not able to service since catchupTill = {} greater than ledger size = {}" .format(req.catchupTill, ledger.size), logMethod=logger.debug) return cons_proof = ledger.tree.consistency_proof(end, req.catchupTill) cons_proof = [Ledger.hashToStr(p) for p in cons_proof] txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[seq_no] = self._provider.update_txn_with_extra_data(txn) txns = SortedDict( txns) # TODO: Do we really need them sorted on the sending side? rep = CatchupRep(ledger_id, txns, cons_proof) message_splitter = self._make_splitter_for_catchup_rep( ledger, req.catchupTill) self._provider.send_to(rep, frm, message_splitter)
def processCatchupReq(self, req: CatchupReq, frm: str): logger.debug("{} received catchup request: {} from {}".format( self, req, frm)) if not self.ownedByNode: self.discard(req, reason="Only node can serve catchup requests", logMethod=logger.warning) return start = getattr(req, f.SEQ_NO_START.nm) end = getattr(req, f.SEQ_NO_END.nm) ledger = self.getLedgerForMsg(req) if end < start: self.discard(req, reason="Invalid range", logMethod=logger.warning) return ledger_size = ledger.size if start > ledger_size: self.discard(req, reason="{} not able to service since " "ledger size is {} and start is {}".format( self, ledger_size, start), logMethod=logger.debug) return if req.catchupTill > ledger_size: self.discard(req, reason="{} not able to service since " "ledger size is {} and catchupTill is {}".format( self, ledger_size, req.catchupTill), logMethod=logger.debug) return # Adjusting for end greater than ledger size if end > ledger_size: logger.debug("{} does not have transactions till {} " "so sending only till {}".format( self, end, ledger_size)) end = ledger_size logger.debug("node {} requested catchup for {} from {} to {}".format( frm, end - start + 1, start, end)) logger.debug("{} generating consistency proof: {} from {}".format( self, end, req.catchupTill)) cons_proof = self._make_consistency_proof(ledger, end, req.catchupTill) txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[seq_no] = self.owner.update_txn_with_extra_data(txn) sorted_txns = SortedDict(txns) rep = CatchupRep(getattr(req, f.LEDGER_ID.nm), sorted_txns, cons_proof) message_splitter = self._make_split_for_catchup_rep( ledger, req.catchupTill) self.sendTo(msg=rep, to=frm, message_splitter=message_splitter)
def test_state_proof_returned_for_get_rich_schema_obj_by_id( looper, nodeSetWithOneNodeResponding, sdk_wallet_endorser, sdk_pool_handle, sdk_wallet_client, txn_type, rs_type, content): """ Tests that state proof is returned in the reply for GET_RICH_SCHEMA_OBJECT_BY_ID. Use different submitter and reader! """ rs_id = randomString() rs_name = randomString() rs_version = '1.0' sdk_write_rich_schema_object_and_check(looper, sdk_wallet_endorser, sdk_pool_handle, txn_type=txn_type, rs_id=rs_id, rs_name=rs_name, rs_version=rs_version, rs_type=rs_type, rs_content=content) get_rich_schema_by_id_operation = { TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_ID, RS_ID: rs_id, } result = sdk_submit_operation_and_get_result( looper, sdk_pool_handle, sdk_wallet_client, get_rich_schema_by_id_operation) expected_data = SortedDict({ 'id': rs_id, 'rsType': rs_type, 'rsName': rs_name, 'rsVersion': rs_version, 'content': content, 'from': sdk_wallet_endorser[1] }) assert SortedDict(result['data']) == expected_data assert result['seqNo'] assert result['txnTime'] assert result['state_proof'] check_valid_proof(result)
def __init__(self, database_manager: DatabaseManager): BatchRequestHandler.__init__(self, database_manager, POOL_LEDGER_ID) WriteRequestHandler.__init__(self, database_manager, NODE, POOL_LEDGER_ID) self.uncommitted_node_reg = [] self.committed_node_reg = [] # committed node reg at the beginning of view # matches the committed node reg BEFORE the first txn in a view is applied (that is according to the last txn in the last view) self.committed_node_reg_at_beginning_of_view = SortedDict() # uncommitted node reg at the beginning of view # matches the uncommittednode reg BEFORE the first txn in a view is applied (that is according to the last txn in the last view) self.uncommitted_node_reg_at_beginning_of_view = SortedDict() self._uncommitted = deque() # type: deque[UncommittedNodeReg] self._uncommitted_view_no = 0 self._committed_view_no = 0 self.internal_bus = None # type: InternalBus
def test_send_get_rich_schema_obj_by_id(looper, sdk_pool_handle, sdk_wallet_endorser, txn_type, rs_type, content): rs_id = randomString() rs_name = randomString() rs_version = '1.0' sdk_write_rich_schema_object_and_check(looper, sdk_wallet_endorser, sdk_pool_handle, txn_type=txn_type, rs_id=rs_id, rs_name=rs_name, rs_version=rs_version, rs_type=rs_type, rs_content=content) get_rich_schema_by_id_operation = { TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_ID, RS_ID: rs_id, } result = sdk_submit_operation_and_get_result( looper, sdk_pool_handle, sdk_wallet_endorser, get_rich_schema_by_id_operation) expected_data = SortedDict({ 'id': rs_id, 'rsType': rs_type, 'rsName': rs_name, 'rsVersion': rs_version, 'content': json.dumps(content), 'from': sdk_wallet_endorser[1], 'endorser': None, 'ver': None }) assert SortedDict(result['data']) == expected_data assert result['seqNo'] assert result['txnTime'] assert result['state_proof'] check_valid_proof(result)
def _split(message): txns = list(message.txns.items()) if len(message.txns) < 2: logger.warning("CatchupRep has {} txn(s). This is not enough " "to split. Message: {}".format( len(message.txns), message)) return None divider = len(message.txns) // 2 left = txns[:divider] left_last_seq_no = left[-1][0] right = txns[divider:] right_last_seq_no = right[-1][0] left_cons_proof = self._make_consistency_proof( ledger, left_last_seq_no, initial_seq_no) right_cons_proof = self._make_consistency_proof( ledger, right_last_seq_no, initial_seq_no) ledger_id = message.ledgerId left_rep = CatchupRep(ledger_id, SortedDict(left), left_cons_proof) right_rep = CatchupRep(ledger_id, SortedDict(right), right_cons_proof) return left_rep, right_rep
def __init__( self, data: ConsensusSharedData, bus: InternalBus, network: ExternalBus, stasher: StashingRouter, db_manager: DatabaseManager, metrics: MetricsCollector = NullMetricsCollector(), ): self._data = data self._bus = bus self._network = network self._checkpoint_state = SortedDict(lambda k: k[1]) self._stasher = stasher self._subscription = Subscription() self._validator = CheckpointMsgValidator(self._data) self._db_manager = db_manager self.metrics = metrics # Stashed checkpoints for each view. The key of the outermost # dictionary is the view_no, value being a dictionary with key as the # range of the checkpoint and its value again being a mapping between # senders and their sent checkpoint # Dict[view_no, Dict[(seqNoStart, seqNoEnd), Dict[sender, Checkpoint]]] self._stashed_recvd_checkpoints = {} self._config = getConfig() self._logger = getlogger() self._subscription.subscribe(stasher, Checkpoint, self.process_checkpoint) self._subscription.subscribe(bus, Ordered, self.process_ordered) self._subscription.subscribe(bus, BackupSetupLastOrdered, self.process_backup_setup_last_ordered) self._subscription.subscribe(bus, NewViewAccepted, self.process_new_view_accepted)
def test_get_payload_data(txn): expected_paylaod_data = SortedDict({ "type": NYM, "something": "nothing", }) assert SortedDict(get_payload_data(txn)) == expected_paylaod_data
class Replicas: _replica_class = Replica def __init__(self, node, monitor: Monitor, config=None, metrics: MetricsCollector = NullMetricsCollector()): # passing full node because Replica requires it self._node = node self._monitor = monitor self._metrics = metrics self._config = config self._replicas = SortedDict() # type: SortedDict[int, Replica] self._messages_to_replicas = dict() # type: Dict[deque] self.register_monitor_handler() def add_replica(self, instance_id) -> int: is_master = instance_id == 0 description = "master" if is_master else "backup" bls_bft = self._create_bls_bft_replica(is_master) replica = self._new_replica(instance_id, is_master, bls_bft) self._replicas[instance_id] = replica self._messages_to_replicas[instance_id] = deque() self._monitor.addInstance(instance_id) logger.display("{} added replica {} to instance {} ({})" .format(self._node.name, replica, instance_id, description), extra={"tags": ["node-replica"]}) def remove_replica(self, inst_id: int): if inst_id not in self._replicas: return replica = self._replicas.pop(inst_id) # Aggregate all the currently forwarded requests req_keys = set() for msg in replica.inBox: if isinstance(msg, ReqKey): req_keys.add(msg.digest) for req_queue in replica.requestQueues.values(): for req_key in req_queue: req_keys.add(req_key) for pp in replica.sentPrePrepares.values(): for req_key in pp.reqIdr: req_keys.add(req_key) for pp in replica.prePrepares.values(): for req_key in pp.reqIdr: req_keys.add(req_key) for req_key in req_keys: if req_key in replica.requests: replica.requests.free(req_key) self._messages_to_replicas.pop(inst_id, None) self._monitor.removeInstance(inst_id) logger.display("{} removed replica {} from instance {}". format(self._node.name, replica, replica.instId), extra={"tags": ["node-replica"]}) # TODO unit test @property def some_replica_is_primary(self) -> bool: return any([r.isPrimary for r in self._replicas.values()]) @property def master_replica_is_primary(self): if self.num_replicas > 0: return self._master_replica.isPrimary @property def _master_replica(self): return self._replicas[MASTER_REPLICA_INDEX] def service_inboxes(self, limit: int = None): number_of_processed_messages = \ sum(replica.serviceQueues(limit) for replica in self._replicas.values()) return number_of_processed_messages def pass_message(self, message, instance_id=None): if instance_id is not None: if instance_id not in self._replicas.keys(): return self._replicas[instance_id].inBox.append(message) else: for replica in self._replicas.values(): replica.inBox.append(message) def get_output(self, limit: int = None) -> Generator: if limit is None: per_replica = None else: per_replica = round(limit / self.num_replicas) if per_replica == 0: logger.debug("{} forcibly setting replica " "message limit to {}" .format(self._node.name, per_replica)) per_replica = 1 for replica in list(self._replicas.values()): num = 0 while replica.outBox: yield replica.outBox.popleft() num += 1 if per_replica and num >= per_replica: break def take_ordereds_out_of_turn(self) -> tuple: """ Takes all Ordered messages from outbox out of turn """ for replica in self._replicas.values(): yield replica.instId, replica._remove_ordered_from_queue() def _new_replica(self, instance_id: int, is_master: bool, bls_bft: BlsBft) -> Replica: """ Create a new replica with the specified parameters. """ return self._replica_class(self._node, instance_id, self._config, is_master, bls_bft, self._metrics) def _create_bls_bft_replica(self, is_master): bls_factory = create_default_bls_bft_factory(self._node) bls_bft_replica = bls_factory.create_bls_bft_replica(is_master) return bls_bft_replica @property def num_replicas(self): return len(self._replicas) @property def sum_inbox_len(self): return sum(len(replica.inBox) for replica in self._replicas.values()) @property def all_instances_have_primary(self) -> bool: return all(replica.primaryName is not None for replica in self._replicas.values()) @property def primary_name_by_inst_id(self) -> dict: return {r.instId: r.primaryName.split(":", maxsplit=1)[0] if r.primaryName else None for r in self._replicas.values()} @property def inst_id_by_primary_name(self) -> dict: return {r.primaryName.split(":", maxsplit=1)[0]: r.instId for r in self._replicas.values() if r.primaryName} def register_new_ledger(self, ledger_id): for replica in self._replicas.values(): replica.register_ledger(ledger_id) def register_monitor_handler(self): # attention: handlers will work over unordered request only once self._monitor.unordered_requests_handlers.append( self.unordered_request_handler_logging) def unordered_request_handler_logging(self, unordereds): replica = self._master_replica for unordered in unordereds: reqId, duration = unordered # get ppSeqNo and viewNo preprepares = replica.sentPrePrepares if replica.isPrimary else replica.prePrepares ppSeqNo = None viewNo = None for key in preprepares: if any([pre_pre_req == reqId for pre_pre_req in preprepares[key].reqIdr]): ppSeqNo = preprepares[key].ppSeqNo viewNo = preprepares[key].viewNo break if ppSeqNo is None or viewNo is None: logger.warning('Unordered request with reqId: {} was not found in prePrepares. ' 'Prepares count: {}, Commits count: {}'.format(reqId, len(replica.prepares), len(replica.commits))) continue # get pre-prepare sender prepre_sender = replica.primaryNames[viewNo] # get prepares info prepares = replica.prepares[(viewNo, ppSeqNo)][0] \ if (viewNo, ppSeqNo) in replica.prepares else [] n_prepares = len(prepares) str_prepares = 'noone' if n_prepares: str_prepares = ', '.join(prepares) # get commits info commits = replica.commits[(viewNo, ppSeqNo)][0] \ if (viewNo, ppSeqNo) in replica.commits else [] n_commits = len(commits) str_commits = 'noone' if n_commits: str_commits = ', '.join(commits) # get txn content content = replica.requests[reqId].finalised.as_dict \ if reqId in replica.requests else 'no content saved' logger.warning('Consensus for digest {} was not achieved within {} seconds. ' 'Primary node is {}. ' 'Received Pre-Prepare from {}. ' 'Received {} valid Prepares from {}. ' 'Received {} valid Commits from {}. ' 'Transaction contents: {}. ' .format(reqId, duration, replica.primaryName.split(':')[0], prepre_sender, n_prepares, str_prepares, n_commits, str_commits, content)) def keys(self): return self._replicas.keys() def values(self): return self._replicas.values() def items(self): return self._replicas.items() def __getitem__(self, item): if not isinstance(item, int): raise PlenumTypeError('item', item, int) return self._replicas[item] def __len__(self): return self.num_replicas def __iter__(self): return self._replicas.__iter__()