Пример #1
0
def test_multiple_groups_pipelines(stub_broker, stub_worker, result_backend):
    # Given a result backend
    # And a broker with the results middleware
    stub_broker.add_middleware(Results(backend=result_backend))

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_work():
        return 1

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_sum(results):
        return sum(results)

    # And this actor is declared
    stub_broker.declare_actor(do_work)
    stub_broker.declare_actor(do_sum)

    pipe = pipeline(
        [group([do_work.message(), do_work.message()]), group([do_sum.message(), do_sum.message()]), do_sum.message()]
    ).run()

    result = pipe.result.get(block=True)

    assert 4 == result
Пример #2
0
def test_pipelines_with_groups(stub_broker, stub_worker, result_backend):
    # Given a result backend
    # And a broker with the results middleware
    stub_broker.add_middleware(Results(backend=result_backend))

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_work(a):
        return a

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_sum(results):
        return sum(results)

    # And this actor is declared
    stub_broker.declare_actor(do_work)
    stub_broker.declare_actor(do_sum)

    # When I pipe some messages intended for that actor together and run the pipeline
    g = group([do_work.message(12), do_work.message(15)])
    pipe = g | do_sum.message()

    pipe.build()
    assert result_backend.get_group_message_ids(g.group_id) == list(
        g.message_ids)

    pipe.run()

    result = pipe.result.get(block=True)

    assert 12 + 15 == result

    stub_broker.join(do_work.queue_name)
    stub_worker.join()

    # the group result has been forgotten
    assert list(g.results.get()) == [None, None]

    # the message_ids has been forgotten
    with pytest.raises(MessageIdsMissing):
        result_backend.get_group_message_ids(g.group_id)

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def add(a, b):
        return a + b

    stub_broker.declare_actor(add)

    pipe = do_work.message(13) | group([add.message(12), add.message(15)])
    pipe.run()

    result = pipe.result.get(block=True)

    assert [13 + 12, 13 + 15] == list(result)
Пример #3
0
def test_cannot_cancel_on_error_if_no_cancel(stub_broker):
    # Given an actor
    @remoulade.actor()
    def do_work():
        return 42

    # And this actor is declared
    stub_broker.declare_actor(do_work)

    with pytest.raises(NoCancelBackend):
        group((do_work.message() for _ in range(4)), cancel_on_error=True)
Пример #4
0
def test_composition_id_override(stub_broker, do_work):
    group_messages = group([pipeline([do_work.message(),
                                      do_work.message()])
                            ]).build(options={"composition_id": "id"})
    assert group_messages[0].options["composition_id"] == "id"
    assert group_messages[0].options["pipe_target"][0]["options"][
        "composition_id"] == "id"
    pipeline_messages = pipeline(
        [group([do_work.message(),
                do_work.message()])]).build(composition_id="id")
    assert all(message.options["composition_id"] == "id"
               for message in pipeline_messages)
Пример #5
0
def test_inner_groups_forbidden(stub_broker, stub_worker, result_backend):
    # Given that I have a result backend
    stub_broker.add_middleware(Results(backend=result_backend))

    # And I have an actor
    @remoulade.actor()
    def do_work():
        return 1
    # And this actor is declared
    stub_broker.declare_actor(do_work)

    # groups of groups are forbidden
    with pytest.raises(ValueError):
        group(group(do_work.message() for _ in range(2)) for _ in range(3))
Пример #6
0
def test_retry_if_increment_group_completion_fail(stub_broker, stub_worker):
    with patch.object(
            StubBackend,
            "increment_group_completion") as mock_increment_group_completion:
        mock_increment_group_completion.side_effect = Exception(
            "Cannot increment")
        middleware = Results(backend=StubBackend())
        stub_broker.add_middleware(middleware)

        attempts = []

        # And an actor that stores results
        @remoulade.actor(store_results=True)
        def do_work(*args):
            attempts.append(1)

        # And this actor is declared
        stub_broker.declare_actor(do_work)

        # When I send that actor a message
        (group([do_work.message(), do_work.message()])
         | do_work.message()).run()

        # And wait for a result
        stub_broker.join(do_work.queue_name)
        stub_worker.join()

        # The actor has been tried 8 times (4 time each do_work and never the last one)
        assert len(attempts) == 8
Пример #7
0
def test_compositions_are_canceled_on_actor_failure(stub_broker, stub_worker, cancel_backend):
    # Given a cancel backend
    # And a broker with the cancel middleware
    stub_broker.add_middleware(Cancel(backend=cancel_backend))

    # And an actor who doesn't fail
    @remoulade.actor
    def do_work(arg=None):
        return 1

    # And an actor who fails
    @remoulade.actor
    def do_fail(arg):
        raise Exception

    # And those actors are declared
    remoulade.declare_actors([do_work, do_fail])

    g = group([do_work.message(), do_fail.message() | do_work.message()], cancel_on_error=True)

    # When I group a few jobs together and run it
    g.run()

    stub_broker.join(do_fail.queue_name)
    stub_worker.join()

    # All actors should have been canceled
    assert cancel_backend.is_canceled("", g.group_id)
Пример #8
0
def test_complex_pipelines(stub_broker, stub_worker, result_backend):
    # Given a result backend
    # And a broker with the results middleware
    stub_broker.add_middleware(Results(backend=result_backend))

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_work():
        return 1

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def add(a):
        return 1 + a

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_sum(results):
        return sum(results)

    # And this actor is declared
    stub_broker.declare_actor(do_work)
    stub_broker.declare_actor(do_sum)
    stub_broker.declare_actor(add)

    pipe = do_work.message_with_options(pipe_ignore=True) | add.message() | add.message()  # return 3 [1, 2, 3] ?
    g = group([pipe, add.message(), add.message(), do_work.message_with_options(pipe_ignore=True)])  # return [3,2,2,1]
    final_pipe = do_work.message() | g | do_sum.message() | add.message()  # return 9
    final_pipe.run()

    result = final_pipe.result.get(block=True)

    assert 9 == result
Пример #9
0
def test_groups_expose_completion_stats(stub_broker, stub_worker, result_backend):
    # Given that I have a result backend
    stub_broker.add_middleware(Results(backend=result_backend))

    # And an actor that waits some amount of time
    condition = Condition()

    @remoulade.actor(store_results=True)
    def wait(n):
        time.sleep(n)
        with condition:
            condition.notify_all()
            return n

    # And this actor is declared
    stub_broker.declare_actor(wait)

    # When I group messages of varying durations together and run the group
    g = group(wait.message(n) for n in range(1, 4))
    g.run()

    # Then every time a job in the group completes, the completed_count should increase
    for count in range(1, len(g) + 1):
        with condition:
            condition.wait(5)
            time.sleep(0.1)  # give the worker time to set the result
            assert g.results.completed_count == count

    # Finally, completed should be true
    assert g.results.completed
Пример #10
0
def test_group_forget(stub_broker, result_backend, stub_worker, block):
    # Given a result backend
    stub_broker.add_middleware(Results(backend=result_backend))

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_work():
        return 42

    # And this actor is declared
    stub_broker.declare_actor(do_work)

    # And I've run a group
    messages = [do_work.message() for _ in range(5)]
    g = group(messages)
    g.run()

    # If i wait for the group to be completed
    if not block:
        stub_broker.join(do_work.queue_name)
        stub_worker.join()

    # If i forget the results
    results = g.results.get(block=block, forget=True)
    assert list(results) == [42] * 5

    # All messages have been forgotten
    results = g.results.get()
    assert list(results) == [None] * 5
Пример #11
0
def test_groups_execute_jobs_in_parallel(stub_broker, stub_worker, result_backend):
    # Given that I have a result backend
    stub_broker.add_middleware(Results(backend=result_backend))

    # And I have an actor that sleeps for 100ms
    @remoulade.actor(store_results=True)
    def wait():
        time.sleep(0.1)

    # And this actor is declared
    stub_broker.declare_actor(wait)

    # When I group multiple of these actors together and run them
    t = time.monotonic()
    g = group([wait.message() for _ in range(5)])
    g.run()

    # group message_ids are no stored if not needed
    with pytest.raises(MessageIdsMissing):
        result_backend.get_group_message_ids(g.group_id)

    # And wait on the group to complete
    results = list(g.results.get(block=True))

    # Then the total elapsed time should be less than 500ms
    assert time.monotonic() - t <= 0.5

    # And I should get back as many results as there were jobs in the group
    assert len(results) == len(g)

    # And the group should be completed
    assert g.results.completed
    assert isinstance(g.results, CollectionResults)
Пример #12
0
def test_composition_can_be_canceled(stub_broker, stub_worker, cancel_backend):
    # Given a cancel backend
    # And a broker with the cancel middleware
    stub_broker.add_middleware(Cancel(backend=cancel_backend))

    calls_count = 0

    # And an actor
    @remoulade.actor()
    def do_work():
        nonlocal calls_count
        calls_count += 1
        raise ValueError()

    # And this actor is declared
    stub_broker.declare_actor(do_work)

    # And a composition
    g = group([do_work.message() | do_work.message() for _ in range(2)])

    # If the composition is canceled
    cancel_backend.cancel([g.group_id])

    g.run()

    stub_broker.join(do_work.queue_name)
    stub_worker.join()

    # It messages should not have runMessageSchema
    assert calls_count == 0
Пример #13
0
def test_group_are_canceled_on_actor_failure(stub_broker, stub_worker, cancel_backend):
    # Given a cancel backend
    # And a broker with the cancel middleware
    stub_broker.add_middleware(Cancel(backend=cancel_backend))

    has_been_called = []

    # And an actor
    @remoulade.actor()
    def do_work():
        has_been_called.append(1)
        raise ValueError()

    # And this actor is declared
    stub_broker.declare_actor(do_work)

    g = group((do_work.message() for _ in range(4)), cancel_on_error=True)

    # When I group a few jobs together and run it
    g.run()

    stub_broker.join(do_work.queue_name)
    stub_worker.join()

    # All actors should have been canceled
    assert all(cancel_backend.is_canceled(child.message_id, g.group_id) for child in g.children)
Пример #14
0
 def test_save_group_id_in_message(self, stub_broker, state_middleware,
                                   do_work):
     msg = do_work.message()
     group_id = group([msg]).run().group_id
     state = state_middleware.backend.get_state(msg.message_id)
     assert state.message_id == msg.message_id
     assert state.group_id == group_id
Пример #15
0
def test_cancel_pipeline_or_groups(stub_broker, stub_worker, cancel_backend, with_pipeline):
    # Given a cancel backend
    # And a broker with the cancel middleware
    stub_broker.add_middleware(Cancel(backend=cancel_backend))

    has_been_called = []

    # And an actor
    @remoulade.actor()
    def do_work():
        has_been_called.append(1)
        raise ValueError()

    # And this actor is declared
    stub_broker.declare_actor(do_work)

    if with_pipeline:
        g = pipeline((do_work.message() for _ in range(4)))
    else:
        g = group((do_work.message() for _ in range(4)))

    g.cancel()
    g.run()

    stub_broker.join(do_work.queue_name)
    stub_worker.join()

    # All actors should have been canceled
    assert all(cancel_backend.is_canceled(child.message_id, None) for child in g.children)
    assert len(has_been_called) == 0
Пример #16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("uri", nargs="+", help="A website URI.")

    arguments = parser.parse_args()
    jobs = group(request.message(uri) | count_words.message() for uri in arguments.uri).run()
    for uri, count in zip(arguments.uri, jobs.results.get(block=True)):
        print(" * {uri} has {count} words".format(uri=uri, count=count))

    return 0
Пример #17
0
def test_pipeline_with_groups_and_pipe_ignore(stub_broker, stub_worker,
                                              result_backend):
    # Given a result backend
    # And a broker with the results middleware
    stub_broker.add_middleware(Results(backend=result_backend))

    # Given an actor that do not stores results
    @remoulade.actor()
    def do_work():
        return 1

    @remoulade.actor(store_results=True)
    def do_other_work():
        return 2

    # And this actor is declared
    stub_broker.declare_actor(do_work)
    stub_broker.declare_actor(do_other_work)

    # When I pipe a group with another actor
    pipe = group([do_work.message(), do_work.message()
                  ]) | do_other_work.message_with_options(pipe_ignore=True)
    pipe.run()

    # I don't get any error as long the second actor has pipe_ignore=True
    result = pipe.result.get(block=True)

    assert 2 == result

    # But if it don't, the pipeline cannot finish
    pipe = group([do_work.message(), do_work.message()
                  ]) | do_other_work.message()
    pipe.run()

    stub_broker.join(do_work.queue_name)
    stub_worker.join()

    with pytest.raises(ResultMissing):
        pipe.result.get()
Пример #18
0
def test_local_broker_with_groups(local_broker, local_result_backend):
    local_broker.add_middleware(Results(backend=local_result_backend))

    # Given that I have an actor that stores its results
    @remoulade.actor(store_results=True)
    def add(a, b):
        return a + b

    # And this actor is declared
    local_broker.declare_actor(add)

    # When I run a group
    g = group([add.message(1, 2), add.message(3, 4), add.message(4, 5)])
    g.run()

    assert list(g.results.get()) == [3, 7, 9]
Пример #19
0
def test_pipelines_store_results_error(stub_broker, result_backend,
                                       stub_worker, store_results):
    # And a broker with the results middleware
    stub_broker.add_middleware(Results(backend=result_backend))

    # Given an actor that fail
    @remoulade.actor(store_results=store_results)
    def do_work_fail():
        raise ValueError()

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_work():
        return 42

    # And these actors are declared
    stub_broker.declare_actor(do_work_fail)
    stub_broker.declare_actor(do_work)

    # And I've run a pipeline
    g = group([do_work.message(), do_work.message(), do_work.message()])
    pipe = do_work_fail.message() | do_work.message() | g | do_work.message()
    pipe.run()

    stub_broker.join(do_work.queue_name)
    stub_worker.join()

    # I get an error
    if store_results:
        with pytest.raises(ErrorStored) as e:
            pipe.children[0].result.get()
        assert str(e.value) == "ValueError()"

    for i in [1, 3]:
        with pytest.raises(ErrorStored) as e:
            pipe.children[i].result.get()
        assert str(e.value).startswith("ParentFailed")

    for child in g.children:
        with pytest.raises(ErrorStored) as e:
            child.result.get()
        assert str(e.value).startswith("ParentFailed")
Пример #20
0
def test_groups_can_time_out(stub_broker, stub_worker, result_backend):
    # Given that I have a result backend
    stub_broker.add_middleware(Results(backend=result_backend))

    # And I have an actor that sleeps for 300ms
    @remoulade.actor(store_results=True)
    def wait():
        time.sleep(0.3)

    # And this actor is declared
    stub_broker.declare_actor(wait)

    # When I group a few jobs together and run it
    g = group(wait.message() for _ in range(2))
    g.run()

    # And wait for the group to complete with a timeout
    # Then a ResultTimeout error should be raised
    with pytest.raises(ResultTimeout):
        g.results.wait(timeout=100)

    # And the group should not be completed
    assert not g.results.completed
Пример #21
0
def test_group_wait_forget(stub_broker, result_backend, stub_worker):
    # Given a result backend
    # And a broker with the results middleware
    stub_broker.add_middleware(Results(backend=result_backend))

    # Given an actor that stores results
    @remoulade.actor(store_results=True)
    def do_work():
        return 42

    # And this actor is declared
    stub_broker.declare_actor(do_work)

    # And I've run a group
    messages = [do_work.message() for _ in range(5)]
    g = group(messages)
    g.run()

    # If i forget the results
    g.results.wait(forget=True)

    # All messages have been forgotten
    assert list(g.results.get()) == [None] * 5
Пример #22
0
def test_compositions_are_canceled_on_message_cancel(stub_broker, cancel_backend, state_middleware, api_client):
    # Given a cancel backend
    # And a broker with the cancel middleware
    stub_broker.add_middleware(Cancel(backend=cancel_backend))

    # And an actor
    @remoulade.actor
    def do_work(arg=None):
        return 1

    # And those actors are declared
    stub_broker.declare_actor(do_work)

    message_to_cancel = do_work.message()

    # And a group that I enqueue
    g = group([message_to_cancel | do_work.message(), do_work.message()], cancel_on_error=True)
    g.run()

    # When I cancel a message of this group
    api_client.post("messages/cancel/" + message_to_cancel.message_id)

    # The whole composition should be canceled
    assert cancel_backend.is_canceled("", g.group_id)