def test_equal_votes_dont_accumulate_when_added(instance_change_provider, time_provider): frm = "Node1" view_no = 1 quorum = 2 time_provider.value = 0 second_vote_time = 1 msg = InstanceChange(view_no, Suspicions.PRIMARY_DEGRADED.code) instance_change_provider.add_vote(msg, frm) time_provider.value = second_vote_time instance_change_provider.add_vote(msg, frm) assert instance_change_provider.has_view(view_no) assert instance_change_provider.has_inst_chng_from(view_no, frm) assert not instance_change_provider.has_quorum(view_no, quorum) instance_changes_db = instance_change_provider._node_status_db.get( instance_change_provider.generate_db_key(view_no)) assert len(node_status_db_serializer.deserialize(instance_changes_db)) == 1
def _load_last_sent_pp_key(self) -> Optional[PrePrepareKey]: if LAST_SENT_PRE_PREPARE not in self.node.nodeStatusDB: logger.info("{} did not find stored lastSentPrePrepare".format( self.node)) return None serialized_value = self.node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE) logger.info("{} found stored lastSentPrePrepare value {}".format( self.node, serialized_value)) value_as_dict = node_status_db_serializer.deserialize(serialized_value) pp_key = PrePrepareKey(**value_as_dict) if not isinstance(pp_key.inst_id, int): raise TypeError("inst_id must be of int type") if not isinstance(pp_key.view_no, int): raise TypeError("view_no must be of int type") if not isinstance(pp_key.pp_seq_no, int): raise TypeError("pp_seq_no must be of int type") return pp_key
def _load_last_sent_pp_key(self) -> Optional[PrePrepareKey]: if LAST_SENT_PRE_PREPARE not in self.node.nodeStatusDB: logger.info("{} did not find stored lastSentPrePrepare" .format(self.node)) return None serialized_value = self.node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE) logger.info("{} found stored lastSentPrePrepare value {}" .format(self.node, serialized_value)) value_as_dict = node_status_db_serializer.deserialize(serialized_value) pp_key = PrePrepareKey(**value_as_dict) if not isinstance(pp_key.inst_id, int): raise TypeError("inst_id must be of int type") if not isinstance(pp_key.view_no, int): raise TypeError("view_no must be of int type") if not isinstance(pp_key.pp_seq_no, int): raise TypeError("pp_seq_no must be of int type") return pp_key
def test_too_old_messages_dont_count_towards_quorum(instance_change_provider, time_provider, tconf): frm1 = "Node1" frm2 = "Node2" view_no = 1 quorum = 2 time_provider.value = 0 msg = InstanceChange(view_no, Suspicions.PRIMARY_DEGRADED.code) instance_change_provider.add_vote(msg, frm1) time_provider.value += (tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL / 2) instance_change_provider.add_vote(msg, frm2) assert instance_change_provider.has_quorum(view_no, quorum) time_provider.value += (tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL / 2) + 1 assert not instance_change_provider.has_quorum(view_no, quorum) assert not instance_change_provider.has_inst_chng_from(view_no, frm1) assert instance_change_provider.has_inst_chng_from(view_no, frm2) instance_changes_db = instance_change_provider._node_status_db.get( instance_change_provider.generate_db_key(view_no)) assert len(node_status_db_serializer.deserialize(instance_changes_db)) == 1
def _load_last_sent_pp_key(self) -> Optional[Dict]: if LAST_SENT_PRE_PREPARE not in self.node.nodeStatusDB: logger.info("{} did not find stored lastSentPrePrepare" .format(self.node)) return None serialized_value = self.node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE) logger.debug("{} found stored lastSentPrePrepare value {}" .format(self.node, serialized_value)) stored = node_status_db_serializer.deserialize(serialized_value) if not stored or not isinstance(stored, dict): raise TypeError("stored pp_store has wrong format") for inst_id, pair_3pc in stored.items(): if not inst_id.isdigit(): raise TypeError("inst_id must be of int type") if len(pair_3pc) != 2: raise TypeError("extra data found") if not isinstance(pair_3pc[0], int): raise TypeError("view_no must be of int type") if not isinstance(pair_3pc[1], int): raise TypeError("pp_seq_no must be of int type") return stored
def unpack_pp_key(value: bytes) -> Dict: return node_status_db_serializer.deserialize(value)
def test_backup_primary_restores_pp_seq_no_if_view_is_same( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf, tdir, allPluginsPath, chkFreqPatched, view_no): # Get a node with a backup primary replica replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id) batches_count = 0 if view_no == 0 else 1 node = replica.node # Send some 3PC-batches and wait until the replica orders the 3PC-batches sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs=7, num_batches=num_batches, timeout=tconf.Max3PCBatchWait) batches_count += num_batches looper.run( eventually(lambda r: assertExp(r.last_ordered_3pc == (view_no, batches_count)), replica, retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount))) # Check view no of the node and lastPrePrepareSeqNo of the replica assert node.viewNo == view_no assert replica.lastPrePrepareSeqNo == batches_count # Ensure that the node has stored the last sent PrePrepare key assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB last_sent_pre_prepare_key = \ node_status_db_serializer.deserialize( node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE)) assert last_sent_pre_prepare_key == { str(backup_inst_id): [view_no, batches_count] } # Restart the node containing the replica disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node.name, stopNode=True) looper.removeProdable(node) txnPoolNodeSet.remove(node) node = start_stopped_node(node, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet.append(node) looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper, txnPoolNodeSet) replica = node.replicas[backup_inst_id] # Verify that after the successful propagate primary procedure the replica # (which must still be the primary in its instance) has restored # lastPrePrepareSeqNo and adjusted last_ordered_3pc and shifted # the watermarks correspondingly assert node.viewNo == view_no assert replica.isPrimary assert replica.lastPrePrepareSeqNo == batches_count assert replica.last_ordered_3pc == (view_no, batches_count) assert replica.h == batches_count assert replica.H == batches_count + LOG_SIZE # Verify also that the stored last sent PrePrepare key has not been erased assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB # Send a 3PC-batch and ensure that the replica orders it sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs=1, num_batches=1, timeout=tconf.Max3PCBatchWait) batches_count += 1 looper.run( eventually(lambda: assertExp(replica.last_ordered_3pc == (view_no, batches_count)), retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount)))
def test_backup_primary_restores_pp_seq_no_if_view_is_same( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf, tdir, allPluginsPath, chkFreqPatched, view_no): # Get a node with a backup primary replica replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id) node = replica.node # Send some 3PC-batches and wait until the replica orders the 3PC-batches sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs=7, num_batches=7, timeout=tconf.Max3PCBatchWait) looper.run( eventually(lambda: assertExp(replica.last_ordered_3pc == (view_no, 7)), retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount))) # Check view no of the node and lastPrePrepareSeqNo of the replica assert node.viewNo == view_no assert replica.lastPrePrepareSeqNo == 7 # Ensure that the node has stored the last sent PrePrepare key assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB last_sent_pre_prepare_key = \ PrePrepareKey(**node_status_db_serializer.deserialize( node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE))) assert last_sent_pre_prepare_key == PrePrepareKey(inst_id=backup_inst_id, view_no=view_no, pp_seq_no=7) # Restart the node containing the replica disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, node.name, stopNode=True) looper.removeProdable(node) txnPoolNodeSet.remove(node) node = start_stopped_node(node, looper, tconf, tdir, allPluginsPath) txnPoolNodeSet.append(node) looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper, txnPoolNodeSet) replica = node.replicas[backup_inst_id] # Verify that after the successful propagate primary procedure the replica # (which must still be the primary in its instance) has restored # lastPrePrepareSeqNo and adjusted last_ordered_3pc and shifted # the watermarks correspondingly assert node.viewNo == view_no assert replica.isPrimary assert replica.lastPrePrepareSeqNo == 7 assert replica.last_ordered_3pc == (view_no, 7) assert replica.h == 7 assert replica.H == 7 + LOG_SIZE # Verify also that the stored last sent PrePrepare key has not been erased assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB # Send a 3PC-batch and ensure that the replica orders it sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs=1, num_batches=1, timeout=tconf.Max3PCBatchWait) looper.run( eventually(lambda: assertExp(replica.last_ordered_3pc == (view_no, 8)), retryWait=1, timeout=waits.expectedTransactionExecutionTime(nodeCount)))
def unpack_pp_key(value: bytes) -> PrePrepareKey: return PrePrepareKey(**node_status_db_serializer.deserialize(value))