def testMessageQuota(set_info_log_level, tdir, looper): names = ['Alpha', 'Beta'] genKeys(tdir, names) alphaP = Printer(names[0]) betaMsgHandler = CollectingMsgsHandler() alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=True) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=betaMsgHandler.handler, restricted=True, onlyListener=True) prepStacks(looper, alpha, beta, connect=True, useKeys=True) messages = [] numMessages = 150 * beta.listenerQuota for i in range(numMessages): msg = json.dumps({'random': randomSeed().decode()}).encode() if alpha.send(msg, beta.name): messages.append(json.loads(msg.decode())) def checkAllReceived(): assert len(messages) == len(betaMsgHandler.receivedMessages) assert messages == betaMsgHandler.receivedMessages looper.run(eventually(checkAllReceived, retryWait=0.5, timeout=5))
def testZStackSendRecvHugeDataUnderLimit(set_info_log_level, tdir, looper, tconf): names = ['Alpha', 'Beta'] genKeys(tdir, names) # we use json serializer now, so final form will be {'k':'vvv...vvv'} # so here we try to prepare exactly tconf.MSG_LEN_LIMIT bytes after serialization msg = {'k': 'v' * (tconf.MSG_LEN_LIMIT - len("{'k':''}"))} betaHandler = [False] def recvHandlerAlpha(wrpMsg): pass def recvHandlerBeta(wrpMsg): rmsg, frm = wrpMsg betaHandler[0] = True assert frm == 'Alpha' assert rmsg == msg alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=recvHandlerAlpha, restricted=True, config=adict(**tconf.__dict__), msgRejectHandler=None) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=recvHandlerBeta, restricted=True, config=adict(**tconf.__dict__), msgRejectHandler=None) assert len(alpha.serializeMsg(msg)) == tconf.MSG_LEN_LIMIT prepStacks(looper, *(alpha, beta), connect=True, useKeys=True) stat = alpha.send(msg, beta.name) assert stat[0] is True looper.runFor(5) assert betaHandler[0] is True
def testMessageQuota(tdir, looper): names = ['Alpha', 'Beta'] genKeys(tdir, names) alphaP = Printer(names[0]) receivedMessages = [] betaMshHandler = makeHandler(receivedMessages) alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=True) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=betaMshHandler, restricted=True, onlyListener=True) prepStacks(looper, alpha, beta, connect=True, useKeys=True) messages = [] numMessages = 100 * beta.listenerQuota for i in range(numMessages): msg = json.dumps({'random': randomSeed().decode()}).encode() messages.append(json.loads(msg.decode())) alpha.send(msg, beta.name) looper.runFor(2) assert messages == receivedMessages
def test_serialization_of_submessages_to_dict(): message = LedgerStatus(1, 10, None, None, "AwgQhPR9cgRubttBGjRruCRMLhZFBffbejbPipj7WBBm", CURRENT_PROTOCOL_VERSION) message_rep = MessageRep( **{ f.MSG_TYPE.nm: "LEDGER_STATUS", f.PARAMS.nm: { "ledger_id": 1, f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION }, f.MSG.nm: message, }) serialized_message = ZStack.serializeMsg(message).decode() serialized_message_reply = ZStack.serializeMsg(message_rep).decode() # check that submessage (LedgerStatus) is serialized to the same dict as # it were a common message assert serialized_message in serialized_message_reply # check that de-serialized into the same message deserialized_message = LedgerStatus( **ZStack.deserializeMsg(serialized_message)) deserialized_submessage = LedgerStatus( **ZStack.deserializeMsg(serialized_message_reply)[f.MSG.nm]) assert message == deserialized_message assert message_rep.msg == deserialized_submessage assert message == deserialized_submessage
def testManyMessages(tdir, looper, set_info_log_level): names = ['Alpha', 'Beta'] genKeys(tdir, names) alphaP = Printer(names[0]) betaMsgHandler = CounterMsgsHandler() alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=True) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=betaMsgHandler.handler, restricted=True) prepStacks(looper, alpha, beta, connect=True, useKeys=True) looper.runFor(1) msgNum = 100000 msgSender = MessageSender(msgNum, alpha, beta.name) looper.add(msgSender) def checkAllReceived(): assert msgSender.sentMsgCount == msgNum assert betaMsgHandler.receivedMsgCount == msgNum looper.run(eventually(checkAllReceived, retryWait=1, timeout=60))
def testMessageQuota(tdir, looper): names = ['Alpha', 'Beta'] genKeys(tdir, names) alphaP = Printer(names[0]) betaMsgHandler = CollectingMsgsHandler() alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=True) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=betaMsgHandler.handler, restricted=True, onlyListener=True) prepStacks(looper, alpha, beta, connect=True, useKeys=True) messages = [] numMessages = 150 * beta.listenerQuota for i in range(numMessages): msg = json.dumps({'random': randomSeed().decode()}).encode() if alpha.send(msg, beta.name): messages.append(json.loads(msg.decode())) def checkAllReceived(): assert len(messages) == len(betaMsgHandler.receivedMessages) assert messages == betaMsgHandler.receivedMessages looper.run(eventually(checkAllReceived, retryWait=0.5, timeout=5))
def _serialize_deserialize(self, msg): serialized_msg = Batched().prepForSending(msg) serialized_msg = ZStack.serializeMsg(serialized_msg) new_msg = node_message_factory.get_instance( **ZStack.deserializeMsg(serialized_msg)) if not isinstance(msg, MessageRep): assert MessageProcessor().toDict(msg) == MessageProcessor().toDict(new_msg), \ "\n {} \n {}".format(MessageProcessor().toDict(msg), MessageProcessor().toDict(new_msg)) return new_msg
def test_process_pre_prepare_validation_old_schema_no_pool( replica_with_requests, pre_prepare, mock_schema_pool_state_root): serialized_pp = ZStack.serializeMsg(pre_prepare) deserialized_pp = ZStack.deserializeMsg(serialized_pp) assert f.POOL_STATE_ROOT_HASH.nm not in PrePrepare.schema pp = PrePrepare(**deserialized_pp) replica_with_requests.processPrePrepare(pp, replica_with_requests.primaryName)
def _serialize_deserialize(self, msg): serialized_msg = Batched().prepForSending(msg) serialized_msg = ZStack.serializeMsg(serialized_msg) new_msg = node_message_factory.get_instance(**ZStack.deserializeMsg(serialized_msg)) # TODO: Figure out why BatchIDs are not deserialized back if not isinstance(msg, (MessageRep, OldViewPrePrepareRequest, OldViewPrePrepareReply)): assert MessageProcessor().toDict(msg) == MessageProcessor().toDict(new_msg), \ "\n {} \n {}".format(MessageProcessor().toDict(msg), MessageProcessor().toDict(new_msg)) return new_msg
def test_process_pre_prepare_validation_old_schema_no_audit( replica_with_requests, pre_prepare, mock_schema_audit_txn_root): serialized_pp = ZStack.serializeMsg(pre_prepare) deserialized_pp = ZStack.deserializeMsg(serialized_pp) assert f.AUDIT_TXN_ROOT_HASH.nm not in PrePrepare.schema pp = PrePrepare(**deserialized_pp) replica_with_requests.processPrePrepare(pp, replica_with_requests.primaryName)
def learnKeysFromOthers(baseDir, nodeName, otherNodes): homeDir = ZStack.homeDirPath(baseDir, nodeName) verifDirPath = ZStack.verifDirPath(homeDir) pubDirPath = ZStack.publicDirPath(homeDir) for d in (homeDir, verifDirPath, pubDirPath): os.makedirs(d, exist_ok=True) for otherNode in otherNodes: for stack in (otherNode.nodestack, otherNode.clientstack): createCertsFromKeys(verifDirPath, stack.name, stack.verKey) createCertsFromKeys(pubDirPath, stack.name, stack.publicKey)
def test_that_service_fields_not_being_serialized(): """ Checks that service fields of validators, like 'typename' and 'schema' ] are excluded from serialized message """ message = LedgerStatus(1,10,None,None,"AwgQhPR9cgRubttBGjRruCRMLhZFBffbejbPipj7WBBm") serialized = ZStack.serializeMsg(message) deserialized = ZStack.deserializeMsg(serialized) service_fields = {'typename', 'schema', 'optional', 'nullable'} assert service_fields - set(deserialized) == service_fields
def testZStackRecvHugeDataOverLimit(set_info_log_level, tdir, looper, tconf): names = ['Alpha', 'Beta'] genKeys(tdir, names) # we use json serializer now, so final form will be {'k':'vvv...vvv'} # so here we try to prepare exactly tconf.MSG_LEN_LIMIT + 1 bytes after serialization msg = {'k': 'v' * (tconf.MSG_LEN_LIMIT - len("{'k':''}") + 1)} betaHandlers = [False, False] def recvHandlerAlpha(wrpMsg): pass def recvHandlerBeta(wrpMsg): rmsg, frm = wrpMsg betaHandlers[0] = True assert frm is not None assert rmsg is not None def rejectHandlerBeta(reason, frm): betaHandlers[1] = True assert 'exceeded allowed limit of {}'.format( tconf.MSG_LEN_LIMIT) in reason assert frm == 'Alpha' alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=recvHandlerAlpha, restricted=True, config=adict(**tconf.__dict__), msgRejectHandler=None) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=recvHandlerBeta, restricted=True, config=adict(**tconf.__dict__), msgRejectHandler=rejectHandlerBeta) bytemsg = alpha.serializeMsg(msg) assert len(bytemsg) == (tconf.MSG_LEN_LIMIT + 1) prepStacks(looper, *(alpha, beta), connect=True, useKeys=True) stat = alpha._remotes['Beta'].socket.send(bytemsg, copy=False, track=True) assert stat looper.runFor(5) assert betaHandlers[0] is False assert betaHandlers[1] is True
def test_zstack_creates_keys_with_secure_permissions(tdir): any_seed = b'0' * 32 stack_name = 'aStack' key_paths = get_zstack_key_paths(stack_name, tdir) ZStack.initLocalKeys(stack_name, tdir, any_seed) for file_path in key_paths['secret']: assert get_file_permission_mask(file_path) == '600' for file_path in key_paths['public']: assert get_file_permission_mask(file_path) == '644'
def test_that_service_fields_not_being_serialized(): """ Checks that service fields of validators, like 'typename' and 'schema' ] are excluded from serialized message """ message = LedgerStatus( 1, 10, None, None, "AwgQhPR9cgRubttBGjRruCRMLhZFBffbejbPipj7WBBm", CURRENT_PROTOCOL_VERSION) serialized = ZStack.serializeMsg(message) deserialized = ZStack.deserializeMsg(serialized) service_fields = {'typename', 'schema', 'optional', 'nullable'} assert service_fields - set(deserialized) == service_fields
def test_queue_size_limit_set(tdir, tconf): stack = ZStack("Alpha", ha=genHa(), basedirpath=tdir, msgHandler=None, restricted=False, seed=randomSeed(), config=tconf) stack.start() assert stack.listener.get_hwm() == 0 stack.stop() queue_size = 100 stack = ZStack("Alpha", ha=genHa(), basedirpath=tdir, msgHandler=None, restricted=False, seed=randomSeed(), config=tconf, queue_size=queue_size) stack.start() assert stack.listener.get_hwm() == queue_size stack.stop()
def testRestricted2ZStackCommunication(tdir, looper): """ Create 2 ZStack and make them send and receive messages. Both stacks allow communication only when keys are shared :return: """ names = ['Alpha', 'Beta'] genKeys(tdir, names) alphaP = Printer(names[0]) betaP = Printer(names[1]) alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=True) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=betaP.print, restricted=True) prepStacks(looper, alpha, beta, connect=True, useKeys=True) alpha.send({'greetings': 'hi'}, beta.name) beta.send({'greetings': 'hello'}, alpha.name) looper.run(eventually(chkPrinted, alphaP, {'greetings': 'hello'})) looper.run(eventually(chkPrinted, betaP, {'greetings': 'hi'}))
def get_zstack_key_paths(stack_name, common_path): home_dir = ZStack.homeDirPath(common_path, stack_name) # secrets sigDirPath = ZStack.sigDirPath(home_dir) secretDirPath = ZStack.secretDirPath(home_dir) # public verifDirPath = ZStack.verifDirPath(home_dir) pubDirPath = ZStack.publicDirPath(home_dir) return dict( secret=(os.path.join(sigDirPath, stack_name) + '.key_secret', os.path.join(secretDirPath, stack_name) + '.key_secret'), public=(os.path.join(verifDirPath, stack_name) + '.key', os.path.join(pubDirPath, stack_name) + '.key'), )
def genKeys(baseDir, names): generate_certificates(baseDir, *names, clean=True) for n in names: d = os.path.join(baseDir, n) os.makedirs(d, exist_ok=True) for kd in ZStack.keyDirNames(): copy_tree(os.path.join(baseDir, kd), os.path.join(d, kd))
def create_and_prep_stacks(names, tdir, looper): genKeys(tdir, names) printers = [Printer(n) for n in names] stacks = [ZStack(n, ha=genHa(), basedirpath=tdir, msgHandler=printers[i].print, restricted=True) for i, n in enumerate(names)] prepStacks(looper, *stacks, connect=True, useKeys=True) return stacks, printers
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare( 0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def create_stack(name, handler=None): return ZStack(name, ha=genHa(), basedirpath=tdir, msgHandler=handler, restricted=False, seed=randomSeed(), config=tconf)
def dummyZStack(tdir, tconf): name = 'Alpha' alphaP = Printer(name) return ZStack(name, ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, seed=randomSeed(), config=tconf)
def create_beta(tdir, name): beta_msg_handler = CounterMsgsHandler() beta = ZStack(name, ha=genHa(), basedirpath=tdir, msgHandler=beta_msg_handler.handler, restricted=True) return beta, beta_msg_handler
def create_alpha(tdir, name): alphaP = Printer(name) alpha = ZStack(name, ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=True) return alpha
def test_no_size_limit_for_outbound_msgs(looper, tdir, msg): names = ['Alpha', 'Beta'] genKeys(tdir, names) alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=None, restricted=False) alpha.msgLenVal.max_allowed = MSG_LEN_LIMIT beta_msg_handler = CounterMsgsHandler() beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=beta_msg_handler.handler, restricted=False) prepStacks(looper, alpha, beta) def check_received(value): assert beta_msg_handler.receivedMsgCount == value alpha.send(msg) looper.run(eventually(check_received, 1, retryWait=1, timeout=10))
def create_and_prep_stacks(names, basedir, looper, conf): genKeys(basedir, names) printers = [Printer(n) for n in names] # adict is used below to copy the config module since one stack might # have different config from others stacks = [ZStack(n, ha=genHa(), basedirpath=basedir, msgHandler=printers[i].print, restricted=True, config=adict(**conf.__dict__)) for i, n in enumerate(names)] prepStacks(looper, *stacks, connect=True, useKeys=True) return stacks, printers
def test_process_pre_prepare_validation_old_schema(fake_replica, pre_prepare, pool_state_root, fake_state_root_hash): serialized_pp = ZStack.serializeMsg(pre_prepare) deserialized_pp = ZStack.deserializeMsg(serialized_pp) new_schema = copy(PrePrepare.schema) PrePrepare.schema = tuple(y for y in PrePrepare.schema if y[0] != f.POOL_STATE_ROOT_HASH.nm) assert f.POOL_STATE_ROOT_HASH.nm not in PrePrepare.schema pp = PrePrepare(**deserialized_pp) state_roots = [pool_state_root, fake_state_root_hash] fake_replica.stateRootHash = lambda ledger, to_str=False: state_roots[ ledger] def reportSuspiciousNodeEx(ex): assert False, ex fake_replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx fake_replica.processPrePrepare(pp, fake_replica.primaryName) PrePrepare.schema = new_schema
def create_stacks(tdir, looper): names = ['Alpha', 'Beta'] genKeys(tdir, names) alphaP = Printer(names[0]) alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=True) prepStacks(looper, alpha, connect=False) beta_msg_handler = CounterMsgsHandler() beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=beta_msg_handler.handler, restricted=True) prepStacks(looper, beta, connect=False) return alpha, beta
def create_alpha(tdir, looper, name): alphaP = Printer(name) alpha = ZStack(name, ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=True) prepStacks(looper, alpha, connect=False) patch_ping_pong(alpha) return alpha
def create_beta(tdir, looper, name, start_stack=True): beta_msg_handler = CounterMsgsHandler() beta = ZStack(name, ha=genHa(), basedirpath=tdir, msgHandler=beta_msg_handler.handler, restricted=True) if start_stack: prepStacks(looper, beta, connect=False) patch_ping_pong(beta) return beta, beta_msg_handler
def test_serialization_of_submessages_to_dict(): message = LedgerStatus( 1, 10, None, None, "AwgQhPR9cgRubttBGjRruCRMLhZFBffbejbPipj7WBBm", CURRENT_PROTOCOL_VERSION) message_rep = MessageRep(**{ f.MSG_TYPE.nm: "LEDGER_STATUS", f.PARAMS.nm: {"ledger_id": 1, f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION}, f.MSG.nm: message, }) serialized_message = ZStack.serializeMsg(message).decode() serialized_message_reply = ZStack.serializeMsg(message_rep).decode() # check that submessage (LedgerStatus) is serialized to the same dict as # it were a common message assert serialized_message in serialized_message_reply # check that de-serialized into the same message deserialized_message = LedgerStatus( **ZStack.deserializeMsg(serialized_message)) deserialized_submessage = LedgerStatus( **ZStack.deserializeMsg(serialized_message_reply)[f.MSG.nm]) assert message == deserialized_message assert message_rep.msg == deserialized_submessage assert message == deserialized_submessage
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare(0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def testUnrestricted2ZStackCommunication(tdir, looper, tconf): """ Create 2 ZStack and make them send and receive messages. Both stacks allow communication even when keys are not shared :return: """ names = ['Alpha', 'Beta'] alphaP = Printer(names[0]) betaP = Printer(names[1]) alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=alphaP.print, restricted=False, seed=randomSeed(), config=tconf) beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=betaP.print, restricted=False, seed=randomSeed(), config=tconf) prepStacks(looper, alpha, beta, connect=True, useKeys=True) alpha.send({'greetings': 'hi'}, beta.name) beta.send({'greetings': 'hello'}, alpha.name) looper.run(eventually(chkPrinted, alphaP, {'greetings': 'hello'})) looper.run(eventually(chkPrinted, betaP, {'greetings': 'hi'}))
def checkRemoteExists( frm: ZStack, to: str, # remoteName state: Optional[RemoteState] = None): remote = frm.getRemote(to) checkState(state, remote, "{}'s remote {}".format(frm.name, to))
def test_stashing_unknown_remote_msgs(looper, tdir, tconf): names = ['Alpha', 'Beta'] genKeys(tdir, names) alpha = ZStack(names[0], ha=genHa(), basedirpath=tdir, msgHandler=None, restricted=False) beta_msg_handler = CounterMsgsHandler() beta = ZStack(names[1], ha=genHa(), basedirpath=tdir, msgHandler=beta_msg_handler.handler, restricted=False) prepStacks(looper, alpha, beta, connect=False) assert not alpha.hasRemote(beta.name) assert not alpha.isConnectedTo(beta.name) assert not beta.hasRemote(alpha.name) assert not beta.isConnectedTo(alpha.name) alpha.connect(name=beta.name, ha=beta.ha, verKeyRaw=beta.verKeyRaw, publicKeyRaw=beta.publicKeyRaw) alpha.getRemote(beta.name, beta.ha).setConnected() assert alpha.hasRemote(beta.name) assert alpha.isConnectedTo(beta.name) assert not beta.hasRemote(alpha.name) assert not beta.isConnectedTo(alpha.name) def check_unknown_remote_msg(): assert len(alpha._stashed_to_disconnected) == 0 assert len(beta._stashed_unknown_remote_msgs) == len(sent_msgs) for index, item in enumerate(sent_msgs): assert item == beta._stashed_unknown_remote_msgs[index][0] assert alpha.remotes[ 'Beta'].socket.IDENTITY == beta._stashed_unknown_remote_msgs[ index][1] sent_msgs = deque(maxlen=tconf.ZMQ_STASH_UNKNOWN_REMOTE_MSGS_QUEUE_SIZE) msg = 'message num: {}' for i in range(5): _msg = msg.format(i) alpha.send(_msg) sent_msgs.append(_msg) looper.run( eventually(check_unknown_remote_msg, retryWait=1, timeout=60))