def getCatchupReqs(self, consProof: ConsistencyProof): # TODO: This needs to be optimised, there needs to be a minimum size # of catchup requests so if a node is trying to catchup only 50 txns # from 10 nodes, each of thise 10 nodes will servce 5 txns and prepare # a consistency proof for other txns. This is bad for the node catching # up as it involves more network traffic and more computation to verify # so many consistency proofs and for the node serving catchup reqs. But # if the node sent only 2 catchup requests the network traffic greatly # reduces and 25 txns can be read of a single chunk probably # (if txns dont span across multiple chunks). A practical value of this # "minimum size" is some multiple of chunk size of the ledger # nodeCount = len(self.nodestack.conns) nodeCount = len(self.nodes_to_request_txns_from) if nodeCount == 0: logger.debug('{} did not find any connected to nodes to send ' 'CatchupReq'.format(self)) return # TODO: Consider setting start to `max(ledger.size, consProof.start)` # since ordered requests might have been executed after receiving # sufficient ConsProof in `preCatchupClbk` start = getattr(consProof, f.SEQ_NO_START.nm) end = getattr(consProof, f.SEQ_NO_END.nm) batchLength = math.ceil((end - start) / nodeCount) reqs = [] s = start + 1 e = min(s + batchLength - 1, end) for i in range(nodeCount): req = CatchupReq(getattr(consProof, f.LEDGER_ID.nm), s, e, end) reqs.append(req) s = e + 1 e = min(s + batchLength - 1, end) if s > end: break return reqs
def addReqsForMissing(frm, to): # Add Catchup requests for missing transactions. # `frm` and `to` are inclusive missing = to - frm + 1 numBatches = int(math.ceil(missing / batchSize)) for i in range(numBatches): s = frm + (i * batchSize) e = min(to, frm + ((i + 1) * batchSize) - 1) req = CatchupReq(self._ledger_id, s, e, end) logger.info("{} creating catchup request {} to {} till {}".format(self, s, e, end)) cReqs.append(req) return missing
def _generate_catchup_reqs(self, start, end, node_count): batch_length = math.ceil((end - start) / node_count) reqs = [] s = start + 1 e = min(s + batch_length - 1, end) for i in range(node_count): req = CatchupReq(self._ledger_id, s, e, end) reqs.append(req) s = e + 1 e = min(s + batch_length - 1, end) if s > end: break return reqs
def test_receive_incorrect_catchup_request_with_catchuptill_greater_ledger_size( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): catchup_till = 100 req = CatchupReq(leger_id, 0, 10, catchup_till) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 4) ledger_manager = txnPoolNodeSet[0].ledgerManager ledger_manager.processCatchupReq(req, "frm") ledger_size = ledger_manager.getLedgerForMsg(req).size _check_call_discard( ledger_manager, "not able to service since " "catchupTill = {} greater than " "ledger size = {}".format(catchup_till, ledger_size))
def test_receive_incorrect_catchup_request_with_start_greater_end( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): start = 10 end = 5 req = CatchupReq(leger_id, start, end, 11) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 4) ledger_manager = txnPoolNodeSet[0].ledgerManager ledger_manager.processCatchupReq(req, "frm") _check_call_discard( ledger_manager, "not able to service since " "start = {} greater than " "end = {}".format(start, end))
def test_receive_incorrect_catchup_request_with_end_greater_catchuptill( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): end = 15 catchup_till = 10 req = CatchupReq(leger_id, 0, end, catchup_till) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 4) ledger_manager = txnPoolNodeSet[0].ledgerManager ledger_manager.processCatchupReq(req, "frm") _check_call_discard( ledger_manager, "not able to service since " "end = {} greater than " "catchupTill = {}".format(end, catchup_till))
def test_receive_incorrect_catchup_request_with_start_greater_end( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, monkeypatch): start = 10 end = 5 def _check_discard(msg, reason, logMethod=logging.error, cliOutput=False): assert reason.find("not able to service since " "start = {} greater than " "end = {}".format(start, end)) req = CatchupReq(leger_id, start, end, 11) _process_catchup_req(req, _check_discard, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, monkeypatch)
def test_receive_incorrect_catchup_request_with_end_greater_catchuptill( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, monkeypatch): end = 15 catchup_till = 10 def _check_discard(msg, reason, logMethod=logging.error, cliOutput=False): assert reason.find("not able to service since " "end = {} greater than " "catchupTill = {}".format(end, catchup_till)) req = CatchupReq(leger_id, 0, end, catchup_till) _process_catchup_req(req, _check_discard, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, monkeypatch)
def test_receive_incorrect_catchup_request_with_catchuptill_greater_ledger_size( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, monkeypatch): catchup_till = 100 req = CatchupReq(leger_id, 0, 10, catchup_till) ledger_size = txnPoolNodeSet[0].ledgerManager.getLedgerForMsg(req).size def _check_discard(msg, reason, logMethod=logging.error, cliOutput=False): assert reason.find("not able to service since " "catchupTill = {} greater than " "ledger size = {}".format(catchup_till, ledger_size)) _process_catchup_req(req, _check_discard, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, monkeypatch)
def test_receive_incorrect_catchup_request_for_seq_no_zero(txnPoolNodeSet): req = CatchupReq(ledger_id, 0, 0, 1) ledger_manager = txnPoolNodeSet[0].ledgerManager ledger_manager.processCatchupReq(req, "frm") _check_call_discard(ledger_manager, "not able to service since start 0 is zero or less")
def _build_catchup_reqs( ledger_id: int, start_seq_no: int, end_seq_no: int, catchup_till: int, nodes_ledger_sizes: Dict[str, int], catchup_batch_size: int = 5) -> Dict[str, CatchupReq]: # Utility def find_node_idx(ledger_sizes: List[Tuple[str, int]], max_seq_no: int) -> int: for i, (_, size) in enumerate(ledger_sizes): if size >= max_seq_no: return i def find_next_best_node_idx(ledger_sizes: List[Tuple[str, int]], exclude_idx) -> int: idx_txns = ((idx, size) for idx, (_, size) in enumerate(ledger_sizes) if idx != exclude_idx) return max(idx_txns, key=lambda v: v[1])[0] # Gather all nodes that have transactions we potentially need. # Register nodes having more than needed transactions as having only # needed transactions to reduce ability to manipulate distribution # of catchup requests by malicious nodes nodes_ledger_sizes = [(node_id, min(size, end_seq_no)) for node_id, size in nodes_ledger_sizes.items() if size >= start_seq_no] # Shuffle nodes so that catchup requests will be sent randomly shuffle(nodes_ledger_sizes) reqs = {} pos = end_seq_no while len(nodes_ledger_sizes) > 0 and pos >= start_seq_no: txns_left = pos - start_seq_no + 1 txns_to_catchup = txns_left // len(nodes_ledger_sizes) txns_to_catchup = max( catchup_batch_size, txns_to_catchup ) # Always try to ask some minimum number of txns per node txns_to_catchup = min( txns_left, txns_to_catchup) # But no more than number of txns left node_index = find_node_idx(nodes_ledger_sizes, pos) if len(nodes_ledger_sizes) > 1: # If we have more than one node left to request data from then it may be needed to # adjust number of txns requested from current node so that we don't ask next node # for txns that it doesn't have next_node_index = find_next_best_node_idx( nodes_ledger_sizes, node_index) next_node_ledger_size = nodes_ledger_sizes[next_node_index][1] if pos - txns_to_catchup > next_node_ledger_size: txns_to_catchup = pos - next_node_ledger_size if txns_to_catchup > 0: node_id = nodes_ledger_sizes[node_index][0] reqs[node_id] = CatchupReq(ledgerId=ledger_id, seqNoStart=pos - txns_to_catchup + 1, seqNoEnd=pos, catchupTill=catchup_till) pos -= txns_to_catchup del nodes_ledger_sizes[node_index] return reqs