def test_batch_size_limitations():
    msg_limit = 100
    msgs = [{1: randomString(10)}] * 100
    res = split_messages_on_batches(msgs, json.dumps, lambda l: l <= msg_limit)
    for r in res:
        batch, length = r
        assert len(batch) <= msg_limit
def test_split_messages_by_size():
    str_len = 10
    msg_count = 100
    msg_limit = len(json.dumps([{1: randomString(str_len)}]))
    msgs = [{1: randomString(str_len)} for i in range(msg_count)]
    res = split_messages_on_batches(msgs, json.dumps, lambda l: l <= msg_limit)
    assert len(res) == msg_count
def test_split_messages_on_batches():
    str_len = 10
    max_depth = 2 ** 9
    msg_limit = len(json.dumps([{1: randomString(str_len)}]))
    msgs = [{1: randomString(str_len)} for i in range(max_depth)]
    res = split_messages_on_batches(msgs, json.dumps, lambda l: l <= msg_limit)
    assert res is not None
Beispiel #4
0
    def flushOutBoxes(self) -> None:
        """
        Clear the outBoxes and transmit batched messages to remotes.
        """
        removedRemotes = []
        for rid, msgs in self.outBoxes.items():
            try:
                dest = self.remotes[rid].name
            except KeyError:
                removedRemotes.append(rid)
                continue
            if msgs:
                if self._should_batch(msgs):
                    logger.trace(
                        "{} batching {} msgs to {} into fewer transmissions".
                        format(self, len(msgs), dest))
                    logger.trace("    messages: {}".format(msgs))
                    batches = split_messages_on_batches(list(msgs),
                                                        self._make_batch,
                                                        self._test_batch_len,
                                                        )
                    msgs.clear()
                    if batches:
                        for batch, size in batches:
                            logger.trace("{} sending payload to {}: {}".format(
                                self, dest, batch))
                            self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, size)
                            # Setting timeout to never expire
                            self.transmit(
                                batch,
                                rid,
                                timeout=self.messageTimeout,
                                serialized=True)
                    else:
                        logger.error("{} cannot create batch(es) for {}".format(self, dest))
                else:
                    while msgs:
                        msg = msgs.popleft()
                        logger.trace(
                            "{} sending msg {} to {}".format(self, msg, dest))
                        self.metrics.add_event(MetricsName.TRANSPORT_BATCH_SIZE, 1)
                        # Setting timeout to never expire
                        self.transmit(msg, rid, timeout=self.messageTimeout,
                                      serialized=True)

        for rid in removedRemotes:
            logger.warning("{}{} has removed rid {}"
                           .format(CONNECTION_PREFIX, self,
                                   z85_to_friendly(rid)),
                           extra={"cli": False})
            msgs = self.outBoxes[rid]
            if msgs:
                self.discard(msgs,
                             "{}rid {} no longer available"
                             .format(CONNECTION_PREFIX,
                                     z85_to_friendly(rid)),
                             logMethod=logger.debug)
            del self.outBoxes[rid]
Beispiel #5
0
    def flushOutBoxes(self) -> None:
        """
        Clear the outBoxes and transmit batched messages to remotes.
        """
        removedRemotes = []
        for rid, msgs in self.outBoxes.items():
            try:
                dest = self.remotes[rid].name
            except KeyError:
                removedRemotes.append(rid)
                continue
            if msgs:
                if self._should_batch(msgs):
                    logger.debug(
                        "{} batching {} msgs to {} into fewer transmissions".
                        format(self, len(msgs), dest))
                    logger.trace("    messages: {}".format(msgs))
                    batches = split_messages_on_batches(
                        list(msgs),
                        self._make_batch,
                        self._test_batch_len,
                    )
                    msgs.clear()
                    if batches:
                        for batch in batches:
                            logger.trace("{} sending payload to {}: {}".format(
                                self, dest, batch))
                            # Setting timeout to never expire
                            self.transmit(batch,
                                          rid,
                                          timeout=self.messageTimeout,
                                          serialized=True)
                    else:
                        logger.warning("Cannot create batch(es) for {}".format(
                            self, dest))
                else:
                    while msgs:
                        msg = msgs.popleft()
                        # Setting timeout to never expire
                        self.transmit(msg,
                                      rid,
                                      timeout=self.messageTimeout,
                                      serialized=True)
                        logger.trace("{} sending msg {} to {}".format(
                            self, msg, dest))

        for rid in removedRemotes:
            logger.warning("{}{} rid {} has been removed".format(
                CONNECTION_PREFIX, self, rid),
                           extra={"cli": False})
            msgs = self.outBoxes[rid]
            if msgs:
                self.discard(msgs,
                             "{}rid {} no longer available".format(
                                 CONNECTION_PREFIX, rid),
                             logMethod=logger.debug)
            del self.outBoxes[rid]
def test_no_batch_if_msg_size_more_then_limit():
    msg_limit = 100
    msgs = [{1: randomString(101)}]
    res = split_messages_on_batches(msgs, json.dumps, lambda l: l <= msg_limit)
    assert res is None
def test_no_split_if_msg_size_less_then_limit():
    msg_limit = 100
    msgs = [{1: randomString(10)}]
    res = split_messages_on_batches(msgs, json.dumps, lambda l: l <= msg_limit)
    assert len(res) == 1
def split_ut(msgs):
    return split_messages_on_batches(msgs, make_batch_func,
                                     check_batch_len_func)
def test_no_batch_if_msg_size_more_then_limit():
    msg_limit = 100
    msgs = [{1: randomString(101)}]
    res = split_messages_on_batches(msgs, json.dumps, lambda l: l <= msg_limit)
    assert res is None
def test_no_split_if_msg_size_less_then_limit():
    msg_limit = 100
    msgs = [{1: randomString(10)}]
    res = split_messages_on_batches(msgs, json.dumps, lambda l: l <= msg_limit)
    assert len(res) == 1
def split_ut(msgs):
    return split_messages_on_batches(msgs, make_batch_func, check_batch_len_func)