Пример #1
0
def test_publish_new_batch_needed(creds):
    client = publisher.Client(credentials=creds)

    # Use mocks in lieu of the actual batch class.
    batch1 = mock.Mock(spec=client._batch_class)
    batch2 = mock.Mock(spec=client._batch_class)

    # Set the first mock up to claim indiscriminately that it rejects all
    # messages and the second accepts all.
    future = mock.sentinel.future
    future.add_done_callback = mock.Mock(spec=["__call__"])
    batch1.publish.return_value = None
    batch2.publish.return_value = future

    topic = "topic/path"
    client._set_batch(topic, batch1)

    # Actually mock the batch class now.
    batch_class = mock.Mock(spec=(), return_value=batch2)
    client._set_batch_class(batch_class)

    # Publish a message.
    future = client.publish(topic, b"foo", bar=b"baz")
    assert future is mock.sentinel.future

    # Check the mocks.
    batch_class.assert_called_once_with(
        client=mock.ANY,
        topic=topic,
        settings=client.batch_settings,
        batch_done_callback=None,
        commit_when_full=True,
        commit_retry=gapic_v1.method.DEFAULT,
    )
    message_pb = gapic_types.PubsubMessage(data=b"foo",
                                           attributes={"bar": "baz"})
    batch1.publish.assert_called_once_with(message_pb)
    batch2.publish.assert_called_once_with(message_pb)
Пример #2
0
def test_batch_done_callback_called_on_publish_failure():
    batch_done_callback_tracker = BatchDoneCallbackTracker()
    batch = create_batch(batch_done_callback=batch_done_callback_tracker)

    # Ensure messages exist.
    message = gapic_types.PubsubMessage(data=b"foobarbaz")
    batch.publish(message)

    # One response for one published message.
    publish_response = gapic_types.PublishResponse(message_ids=["a"])

    # Induce publish error.
    error = google.api_core.exceptions.InternalServerError("uh oh")

    with mock.patch.object(
            type(batch.client.api),
            "publish",
            return_value=publish_response,
            side_effect=error,
    ):
        batch._commit()

    assert batch_done_callback_tracker.called
    assert not batch_done_callback_tracker.success
Пример #3
0
def test__on_response_with_ordering_keys():
    manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
    manager._callback = mock.sentinel.callback

    # Set up the messages.
    response = gapic_types.StreamingPullResponse(received_messages=[
        gapic_types.ReceivedMessage(
            ack_id="fack",
            message=gapic_types.PubsubMessage(
                data=b"foo", message_id="1", ordering_key=""),
        ),
        gapic_types.ReceivedMessage(
            ack_id="back",
            message=gapic_types.PubsubMessage(
                data=b"bar", message_id="2", ordering_key="key1"),
        ),
        gapic_types.ReceivedMessage(
            ack_id="zack",
            message=gapic_types.PubsubMessage(
                data=b"baz", message_id="3", ordering_key="key1"),
        ),
    ])

    # Make leaser with zero initial messages, so we don't test lease management
    # behavior.
    fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)

    # Actually run the method and prove that modack and schedule are called in
    # the expected way.
    manager._on_response(response)

    # All messages should be added to the lease management and have their ACK
    # deadline extended, even those not dispatched to callbacks.
    dispatcher.modify_ack_deadline.assert_called_once_with([
        requests.ModAckRequest("fack", 10),
        requests.ModAckRequest("back", 10),
        requests.ModAckRequest("zack", 10),
    ])

    # The first two messages should be scheduled, The third should be put on
    # hold because it's blocked by the completion of the second, which has the
    # same ordering key.
    schedule_calls = scheduler.schedule.mock_calls
    assert len(schedule_calls) == 2
    call_args = schedule_calls[0][1]
    assert call_args[0] == mock.sentinel.callback
    assert isinstance(call_args[1], message.Message)
    assert call_args[1].message_id == "1"

    call_args = schedule_calls[1][1]
    assert call_args[0] == mock.sentinel.callback
    assert isinstance(call_args[1], message.Message)
    assert call_args[1].message_id == "2"

    # Message 3 should have been put on hold.
    assert manager._messages_on_hold.size == 1
    # No messages available because message 2 (with "key1") has not completed yet.
    assert manager._messages_on_hold.get() is None

    # Complete message 2 (with "key1").
    manager.activate_ordering_keys(["key1"])

    # Completing message 2 should release message 3.
    schedule_calls = scheduler.schedule.mock_calls
    assert len(schedule_calls) == 3
    call_args = schedule_calls[2][1]
    assert call_args[0] == mock.sentinel.callback
    assert isinstance(call_args[1], message.Message)
    assert call_args[1].message_id == "3"

    # No messages available in the queue.
    assert manager._messages_on_hold.get() is None
Пример #4
0
def test_threads_posting_large_messages_do_not_starve():
    settings = types.PublishFlowControl(
        message_limit=100,
        byte_limit=110,
        limit_exceeded_behavior=types.LimitExceededBehavior.BLOCK,
    )
    flow_controller = FlowController(settings)

    large_msg = grpc_types.PubsubMessage(data=b"x" *
                                         100)  # close to entire byte limit

    adding_initial_done = threading.Event()
    adding_large_done = threading.Event()
    adding_busy_done = threading.Event()
    releasing_busy_done = threading.Event()
    releasing_large_done = threading.Event()

    # Occupy some of the flow capacity, then try to add a large message. Releasing
    # enough messages should eventually allow the large message to come through, even
    # if more messages are added after it (those should wait for the large message).
    initial_messages = [grpc_types.PubsubMessage(data=b"x" * 10)] * 5
    _run_in_daemon(flow_controller.add, initial_messages, adding_initial_done)
    assert adding_initial_done.wait(timeout=0.1)

    _run_in_daemon(flow_controller.add, [large_msg], adding_large_done)

    # Continuously keep adding more messages after the large one.
    messages = [grpc_types.PubsubMessage(data=b"x" * 10)] * 10
    _run_in_daemon(flow_controller.add,
                   messages,
                   adding_busy_done,
                   action_pause=0.1)

    # At the same time, gradually keep releasing the messages - the freeed up
    # capacity should be consumed by the large message, not the other small messages
    # being added after it.
    _run_in_daemon(flow_controller.release,
                   messages,
                   releasing_busy_done,
                   action_pause=0.1)

    # Sanity check - releasing should have completed by now.
    if not releasing_busy_done.wait(timeout=1.1):
        pytest.fail(
            "Releasing messages blocked or errored.")  # pragma: NO COVER

    # Enough messages released, the large message should have come through in
    # the meantime.
    if not adding_large_done.wait(timeout=0.1):
        pytest.fail(
            "A thread adding a large message starved.")  # pragma: NO COVER

    if adding_busy_done.wait(timeout=0.1):
        pytest.fail("Adding multiple small messages did not block."
                    )  # pragma: NO COVER

    # Releasing the large message should unblock adding the remaining "busy" messages
    # that have not been added yet.
    _run_in_daemon(flow_controller.release, [large_msg], releasing_large_done)
    if not releasing_large_done.wait(timeout=0.1):
        pytest.fail(
            "Releasing a message blocked or errored.")  # pragma: NO COVER

    if not adding_busy_done.wait(timeout=1.0):
        pytest.fail("Adding messages blocked or errored.")  # pragma: NO COVER
Пример #5
0
def test_blocking_on_overflow_until_free_capacity():
    settings = types.PublishFlowControl(
        message_limit=1,
        byte_limit=150,
        limit_exceeded_behavior=types.LimitExceededBehavior.BLOCK,
    )
    flow_controller = FlowController(settings)

    msg1 = grpc_types.PubsubMessage(data=b"x" * 100)
    msg2 = grpc_types.PubsubMessage(data=b"y" * 100)
    msg3 = grpc_types.PubsubMessage(data=b"z" * 100)
    msg4 = grpc_types.PubsubMessage(data=b"w" * 100)

    # If there is a concurrency bug in FlowController, we do not want to block
    # the main thread running the tests, thus we delegate all add/release
    # operations to daemon threads and check the outcome (blocked/not blocked)
    # through Events.
    adding_1_done = threading.Event()
    adding_2_done = threading.Event()
    adding_3_done = threading.Event()
    adding_4_done = threading.Event()
    releasing_1_done = threading.Event()
    releasing_x_done = threading.Event()

    # Adding a message with free capacity should not block.
    _run_in_daemon(flow_controller.add, [msg1], adding_1_done)
    if not adding_1_done.wait(timeout=0.1):
        pytest.fail(  # pragma: NO COVER
            "Adding a message with enough flow capacity blocked or errored.")

    # Adding messages when there is not enough capacity should block, even if
    # added through multiple threads.
    _run_in_daemon(flow_controller.add, [msg2], adding_2_done)
    if adding_2_done.wait(timeout=0.1):
        pytest.fail(
            "Adding a message on overflow did not block.")  # pragma: NO COVER

    _run_in_daemon(flow_controller.add, [msg3], adding_3_done)
    if adding_3_done.wait(timeout=0.1):
        pytest.fail(
            "Adding a message on overflow did not block.")  # pragma: NO COVER

    _run_in_daemon(flow_controller.add, [msg4], adding_4_done)
    if adding_4_done.wait(timeout=0.1):
        pytest.fail(
            "Adding a message on overflow did not block.")  # pragma: NO COVER

    # After releasing one message, there should be room for a new message, which
    # should result in unblocking one of the waiting threads.
    _run_in_daemon(flow_controller.release, [msg1], releasing_1_done)
    if not releasing_1_done.wait(timeout=0.1):
        pytest.fail(
            "Releasing a message blocked or errored.")  # pragma: NO COVER

    done_status = [
        adding_2_done.wait(timeout=0.1),
        adding_3_done.wait(timeout=0.1),
        adding_4_done.wait(timeout=0.1),
    ]

    # In sum() we use the fact that True==1 and False==0, and that Event.wait()
    # returns False only if it times out, i.e. its internal flag has not been set.
    done_count = sum(done_status)
    assert done_count == 1, "Exactly one thread should have been unblocked."

    # Release another message and verify that yet another thread gets unblocked.
    added_msg = [msg2, msg3, msg4][done_status.index(True)]
    _run_in_daemon(flow_controller.release, [added_msg], releasing_x_done)

    if not releasing_x_done.wait(timeout=0.1):
        pytest.fail(
            "Releasing messages blocked or errored.")  # pragma: NO COVER

    released_count = sum((
        adding_2_done.wait(timeout=0.1),
        adding_3_done.wait(timeout=0.1),
        adding_4_done.wait(timeout=0.1),
    ))
    assert released_count == 2, "Exactly two threads should have been unblocked."
Пример #6
0
def test_len():
    batch = create_batch(status=BatchStatus.ACCEPTING_MESSAGES)
    assert len(batch) == 0
    batch.publish(gapic_types.PubsubMessage(data=b"foo"))
    assert len(batch) == 1
Пример #7
0
def make_message(ack_id, ordering_key):
    proto_msg = gapic_types.PubsubMessage(data=b"Q", ordering_key=ordering_key)
    return message.Message(proto_msg._pb, ack_id, 0, queue.Queue())
def create_message():
    return gapic_types.PubsubMessage(data=b"foo", attributes={"bar": u"baz"})