示例#1
0
def test_batch_done_unsuccessfully():
    client = create_client()
    message = create_message()
    batch1 = mock.Mock(spec=client._batch_class)
    batch2 = mock.Mock(spec=client._batch_class)
    batch3 = mock.Mock(spec=client._batch_class)

    sequencer = ordered_sequencer.OrderedSequencer(client, "topic_name",
                                                   _ORDERING_KEY)
    sequencer._set_batches([batch1, batch2, batch3])

    # Make the batch fail.
    sequencer._batch_done_callback(success=False)

    # Sequencer should remain as a sentinel to indicate this ordering key is
    # paused. Therefore, don't call the cleanup callback.
    assert not sequencer.is_finished()

    # Cancel the remaining batches.
    assert batch2.cancel.call_count == 1
    assert batch3.cancel.call_count == 1

    # Remove all the batches.
    assert len(sequencer._get_batches()) == 0

    # Verify that the sequencer is paused. Publishing while paused returns a
    # future with an exception.
    future = sequencer.publish(message)
    assert future.exception().ordering_key == _ORDERING_KEY
    def _get_or_create_sequencer(self, topic, ordering_key):
        """ Get an existing sequencer or create a new one given the (topic,
            ordering_key) pair.
        """
        sequencer_key = (topic, ordering_key)
        sequencer = self._sequencers.get(sequencer_key)
        if sequencer is None:
            if ordering_key == "":
                sequencer = unordered_sequencer.UnorderedSequencer(self, topic)
            else:
                sequencer = ordered_sequencer.OrderedSequencer(
                    self, topic, ordering_key)
            self._sequencers[sequencer_key] = sequencer

        return sequencer
示例#3
0
def test_batch_done_successfully():
    client = create_client()
    batch = mock.Mock(spec=client._batch_class)

    sequencer = ordered_sequencer.OrderedSequencer(client, "topic_name", _ORDERING_KEY)
    sequencer._set_batch(batch)

    sequencer._batch_done_callback(success=True)

    # One batch is done, so the OrderedSequencer has no more work, and should
    # return true for is_finished().
    assert sequencer.is_finished()

    # No batches remain in the batches list.
    assert len(sequencer._get_batches()) == 0
示例#4
0
def test_publish_after_finish():
    client = create_client()
    batch = mock.Mock(spec=client._batch_class)

    sequencer = ordered_sequencer.OrderedSequencer(client, "topic_name", _ORDERING_KEY)
    sequencer._set_batch(batch)

    sequencer._batch_done_callback(success=True)

    # One batch is done, so the OrderedSequencer has no more work, and should
    # return true for is_finished().
    assert sequencer.is_finished()

    message = create_message()
    # It's legal to publish after being finished.
    sequencer.publish(message)

    # Go back to accepting-messages mode.
    assert not sequencer.is_finished()
示例#5
0
def test_batch_done_successfully_one_batch_remains():
    client = create_client()
    batch1 = mock.Mock(spec=client._batch_class)
    batch2 = mock.Mock(spec=client._batch_class)

    sequencer = ordered_sequencer.OrderedSequencer(client, "topic_name", _ORDERING_KEY)
    sequencer._set_batches([batch1, batch2])

    sequencer._batch_done_callback(success=True)

    # One batch is done, but the OrderedSequencer has more work, so is_finished()
    # should return false.
    assert not sequencer.is_finished()

    # Second batch should be not be committed since the it may still be able to
    # accept messages.
    assert batch2.commit.call_count == 0

    # Only the second batch remains in the batches list.
    assert len(sequencer._get_batches()) == 1
示例#6
0
def test_batch_done_successfully_many_batches_remain():
    client = create_client()
    batch1 = mock.Mock(spec=client._batch_class)
    batch2 = mock.Mock(spec=client._batch_class)
    batch3 = mock.Mock(spec=client._batch_class)

    sequencer = ordered_sequencer.OrderedSequencer(client, "topic_name", _ORDERING_KEY)
    sequencer._set_batches([batch1, batch2, batch3])

    sequencer._batch_done_callback(success=True)

    # One batch is done, but the OrderedSequencer has more work, so DO NOT
    # return true for is_finished().
    assert not sequencer.is_finished()

    # Second batch should be committed since it is full. We know it's full
    # because there exists a third batch. Batches are created only if the
    # previous one can't accept messages any more / is full.
    assert batch2.commit.call_count == 1

    # Both the second and third batches remain in the batches list.
    assert len(sequencer._get_batches()) == 2
示例#7
0
def create_ordered_sequencer(client):
    return ordered_sequencer.OrderedSequencer(client, "topic_name",
                                              _ORDERING_KEY)