def __init__(self, stackParams: dict, msgHandler: Callable, registry: Dict[str, HA], seed=None, sighex: str=None, config=None): config = config or getConfig() Batched.__init__(self) KITZStack.__init__(self, stackParams, msgHandler, registry=registry, seed=seed, sighex=sighex, config=config) MessageProcessor.__init__(self, allowDictOnly=False)
def __init__(self, stackParams: dict, msgHandler: Callable, registry: Dict[str, HA], seed=None, sighex: str=None): Batched.__init__(self) # TODO: Just to get around the restriction of port numbers changed on # Azure. Remove this soon to relax port numbers only but not IP. stackParams["mutable"] = stackParams.get("mutable", True) stackParams["messageTimeout"] = config.RAETMessageTimeout KITRStack.__init__(self, stackParams, msgHandler, registry, sighex) MessageProcessor.__init__(self, allowDictOnly=True)
def __init__(self, stackParams: dict, msgHandler: Callable, registry: Dict[str, HA], seed=None, sighex: str=None, config=None, metrics=NullMetricsCollector()): config = config or getConfig() Batched.__init__(self, config=config, metrics=metrics) KITZStack.__init__(self, stackParams, msgHandler, registry=registry, seed=seed, sighex=sighex, config=config, metrics=metrics, mt_incoming_size=MetricType.INCOMING_NODE_MESSAGE_SIZE, mt_outgoing_size=MetricType.OUTGOING_NODE_MESSAGE_SIZE) MessageProcessor.__init__(self, allowDictOnly=False)
def __init__(self, stackParams: dict, msgHandler: Callable, registry: Dict[str, HA], seed=None, sighex: str = None): Batched.__init__(self) KITZStack.__init__(self, stackParams, msgHandler, registry=registry, seed=seed, sighex=sighex, listenerQuota=config.LISTENER_MESSAGE_QUOTA, remoteQuota=config.REMOTES_MESSAGE_QUOTA) MessageProcessor.__init__(self, allowDictOnly=False)
def _serialize_deserialize(self, msg): serialized_msg = Batched().prepForSending(msg) serialized_msg = ZStack.serializeMsg(serialized_msg) new_msg = node_message_factory.get_instance( **ZStack.deserializeMsg(serialized_msg)) if not isinstance(msg, MessageRep): assert MessageProcessor().toDict(msg) == MessageProcessor().toDict(new_msg), \ "\n {} \n {}".format(MessageProcessor().toDict(msg), MessageProcessor().toDict(new_msg)) return new_msg
def _serialize_deserialize(self, msg): serialized_msg = Batched().prepForSending(msg) serialized_msg = ZStack.serializeMsg(serialized_msg) new_msg = node_message_factory.get_instance(**ZStack.deserializeMsg(serialized_msg)) # TODO: Figure out why BatchIDs are not deserialized back if not isinstance(msg, (MessageRep, OldViewPrePrepareRequest, OldViewPrePrepareReply)): assert MessageProcessor().toDict(msg) == MessageProcessor().toDict(new_msg), \ "\n {} \n {}".format(MessageProcessor().toDict(msg), MessageProcessor().toDict(new_msg)) return new_msg
def batched(message_size_limit): b = Batched(FakeSomething(MSG_LEN_LIMIT=message_size_limit)) b.sign_and_serialize = lambda msg, signer: msg return b
def batched(message_size_limit): b = Batched( FakeSomething(MSG_LEN_LIMIT=message_size_limit, TRANSPORT_BATCH_ENABLED=True)) b.sign_and_serialize = lambda msg, signer: msg return b