def test_serialization_of_submessages_to_dict(): message = LedgerStatus(1, 10, None, None, "AwgQhPR9cgRubttBGjRruCRMLhZFBffbejbPipj7WBBm", CURRENT_PROTOCOL_VERSION) message_rep = MessageRep( **{ f.MSG_TYPE.nm: "LEDGER_STATUS", f.PARAMS.nm: { "ledger_id": 1, f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION }, f.MSG.nm: message, }) serialized_message = ZStack.serializeMsg(message).decode() serialized_message_reply = ZStack.serializeMsg(message_rep).decode() # check that submessage (LedgerStatus) is serialized to the same dict as # it were a common message assert serialized_message in serialized_message_reply # check that de-serialized into the same message deserialized_message = LedgerStatus( **ZStack.deserializeMsg(serialized_message)) deserialized_submessage = LedgerStatus( **ZStack.deserializeMsg(serialized_message_reply)[f.MSG.nm]) assert message == deserialized_message assert message_rep.msg == deserialized_submessage assert message == deserialized_submessage
def testZStackSendRecvHugeDataUnderLimit(set_info_log_level, tdir, looper, tconf): names = ['Alpha', 'Beta'] genKeys(tdir, names) # we use json serializer now, so final form will be {'k':'vvv...vvv'} # so here we try to prepare exactly tconf.MSG_LEN_LIMIT bytes after serialization msg = {'k': 'v' * (tconf.MSG_LEN_LIMIT - len("{'k':''}"))} betaHandler = [False] def recvHandlerAlpha(wrpMsg): pass def recvHandlerBeta(wrpMsg): rmsg, frm = wrpMsg betaHandler[0] = True assert frm == 'Alpha' assert rmsg == msg alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=recvHandlerAlpha, restricted=True, config=adict(**tconf.__dict__), msgRejectHandler=None) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=recvHandlerBeta, restricted=True, config=adict(**tconf.__dict__), msgRejectHandler=None) assert len(alpha.serializeMsg(msg)) == tconf.MSG_LEN_LIMIT prepStacks(looper, *(alpha, beta), connect=True, useKeys=True) stat = alpha.send(msg, beta.name) assert stat[0] is True looper.runFor(5) assert betaHandler[0] is True
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare( 0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def _serialize_deserialize(self, msg): serialized_msg = Batched().prepForSending(msg) serialized_msg = ZStack.serializeMsg(serialized_msg) new_msg = node_message_factory.get_instance( **ZStack.deserializeMsg(serialized_msg)) if not isinstance(msg, MessageRep): assert MessageProcessor().toDict(msg) == MessageProcessor().toDict(new_msg), \ "\n {} \n {}".format(MessageProcessor().toDict(msg), MessageProcessor().toDict(new_msg)) return new_msg
def _serialize_deserialize(self, msg): serialized_msg = Batched().prepForSending(msg) serialized_msg = ZStack.serializeMsg(serialized_msg) new_msg = node_message_factory.get_instance(**ZStack.deserializeMsg(serialized_msg)) # TODO: Figure out why BatchIDs are not deserialized back if not isinstance(msg, (MessageRep, OldViewPrePrepareRequest, OldViewPrePrepareReply)): assert MessageProcessor().toDict(msg) == MessageProcessor().toDict(new_msg), \ "\n {} \n {}".format(MessageProcessor().toDict(msg), MessageProcessor().toDict(new_msg)) return new_msg
def test_process_pre_prepare_validation_old_schema_no_audit( replica_with_requests, pre_prepare, mock_schema_audit_txn_root): serialized_pp = ZStack.serializeMsg(pre_prepare) deserialized_pp = ZStack.deserializeMsg(serialized_pp) assert f.AUDIT_TXN_ROOT_HASH.nm not in PrePrepare.schema pp = PrePrepare(**deserialized_pp) replica_with_requests.processPrePrepare(pp, replica_with_requests.primaryName)
def test_process_pre_prepare_validation_old_schema_no_pool( replica_with_requests, pre_prepare, mock_schema_pool_state_root): serialized_pp = ZStack.serializeMsg(pre_prepare) deserialized_pp = ZStack.deserializeMsg(serialized_pp) assert f.POOL_STATE_ROOT_HASH.nm not in PrePrepare.schema pp = PrePrepare(**deserialized_pp) replica_with_requests.processPrePrepare(pp, replica_with_requests.primaryName)
def test_that_service_fields_not_being_serialized(): """ Checks that service fields of validators, like 'typename' and 'schema' ] are excluded from serialized message """ message = LedgerStatus(1,10,None,None,"AwgQhPR9cgRubttBGjRruCRMLhZFBffbejbPipj7WBBm") serialized = ZStack.serializeMsg(message) deserialized = ZStack.deserializeMsg(serialized) service_fields = {'typename', 'schema', 'optional', 'nullable'} assert service_fields - set(deserialized) == service_fields
def testZStackRecvHugeDataOverLimit(set_info_log_level, tdir, looper, tconf): names = ['Alpha', 'Beta'] genKeys(tdir, names) # we use json serializer now, so final form will be {'k':'vvv...vvv'} # so here we try to prepare exactly tconf.MSG_LEN_LIMIT + 1 bytes after serialization msg = {'k': 'v' * (tconf.MSG_LEN_LIMIT - len("{'k':''}") + 1)} betaHandlers = [False, False] def recvHandlerAlpha(wrpMsg): pass def recvHandlerBeta(wrpMsg): rmsg, frm = wrpMsg betaHandlers[0] = True assert frm is not None assert rmsg is not None def rejectHandlerBeta(reason, frm): betaHandlers[1] = True assert 'exceeded allowed limit of {}'.format( tconf.MSG_LEN_LIMIT) in reason assert frm == 'Alpha' alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=recvHandlerAlpha, restricted=True, config=adict(**tconf.__dict__), msgRejectHandler=None) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=recvHandlerBeta, restricted=True, config=adict(**tconf.__dict__), msgRejectHandler=rejectHandlerBeta) bytemsg = alpha.serializeMsg(msg) assert len(bytemsg) == (tconf.MSG_LEN_LIMIT + 1) prepStacks(looper, *(alpha, beta), connect=True, useKeys=True) stat = alpha._remotes['Beta'].socket.send(bytemsg, copy=False, track=True) assert stat looper.runFor(5) assert betaHandlers[0] is False assert betaHandlers[1] is True
def test_that_service_fields_not_being_serialized(): """ Checks that service fields of validators, like 'typename' and 'schema' ] are excluded from serialized message """ message = LedgerStatus( 1, 10, None, None, "AwgQhPR9cgRubttBGjRruCRMLhZFBffbejbPipj7WBBm", CURRENT_PROTOCOL_VERSION) serialized = ZStack.serializeMsg(message) deserialized = ZStack.deserializeMsg(serialized) service_fields = {'typename', 'schema', 'optional', 'nullable'} assert service_fields - set(deserialized) == service_fields
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare(0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def test_serialization_of_submessages_to_dict(): message = LedgerStatus( 1, 10, None, None, "AwgQhPR9cgRubttBGjRruCRMLhZFBffbejbPipj7WBBm", CURRENT_PROTOCOL_VERSION) message_rep = MessageRep(**{ f.MSG_TYPE.nm: "LEDGER_STATUS", f.PARAMS.nm: {"ledger_id": 1, f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION}, f.MSG.nm: message, }) serialized_message = ZStack.serializeMsg(message).decode() serialized_message_reply = ZStack.serializeMsg(message_rep).decode() # check that submessage (LedgerStatus) is serialized to the same dict as # it were a common message assert serialized_message in serialized_message_reply # check that de-serialized into the same message deserialized_message = LedgerStatus( **ZStack.deserializeMsg(serialized_message)) deserialized_submessage = LedgerStatus( **ZStack.deserializeMsg(serialized_message_reply)[f.MSG.nm]) assert message == deserialized_message assert message_rep.msg == deserialized_submessage assert message == deserialized_submessage
def test_process_pre_prepare_validation_old_schema(fake_replica, pre_prepare, pool_state_root, fake_state_root_hash): serialized_pp = ZStack.serializeMsg(pre_prepare) deserialized_pp = ZStack.deserializeMsg(serialized_pp) new_schema = copy(PrePrepare.schema) PrePrepare.schema = tuple(y for y in PrePrepare.schema if y[0] != f.POOL_STATE_ROOT_HASH.nm) assert f.POOL_STATE_ROOT_HASH.nm not in PrePrepare.schema pp = PrePrepare(**deserialized_pp) state_roots = [pool_state_root, fake_state_root_hash] fake_replica.stateRootHash = lambda ledger, to_str=False: state_roots[ ledger] def reportSuspiciousNodeEx(ex): assert False, ex fake_replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx fake_replica.processPrePrepare(pp, fake_replica.primaryName) PrePrepare.schema = new_schema