Exemple #1
0
def test_workflow_map():
    workflow = ATestDefinitionMap
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    nb_parts = ATestDefinitionMap.nb_parts

    # All the futures returned by the map are passed to wait().
    # The executor should then schedule all of them.
    decisions, _ = executor.replay(Response(history=history))
    for i in range(nb_parts):
        check_task_scheduled_decision(decisions[i], increment)

    # Let's add all tasks of the map to the history to simulate their
    # completion.
    decision_id = history.last_id
    for i in range(nb_parts):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                i + 1),
            last_state='completed',
            input={'args': i},
            result=i + 1)
    (history.add_decision_task_scheduled().add_decision_task_started())

    # All tasks are finished, the executor should complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(
        result=json_dumps([i + 1 for i in range(nb_parts)]))

    assert decisions[0] == workflow_completed
Exemple #2
0
def load_workflow(domain,
                  workflow_name,
                  task_list=None,
                  repair_with=None,
                  force_activities=None):
    """
    Load a workflow.
    :param domain:
    :type domain: str
    :param workflow_name:
    :type workflow_name: str
    :param task_list:
    :type task_list: Optional[str]
    :param repair_with:
    :type repair_with: Optional[simpleflow.history.History]
    :param force_activities:
    :type force_activities: Optional[str]
    :return: Executor for this workflow
    :rtype: Executor
    """
    module_name, object_name = workflow_name.rsplit('.', 1)
    module = __import__(module_name, fromlist=['*'])

    workflow = getattr(module, object_name)
    return Executor(swf.models.Domain(domain),
                    workflow,
                    task_list,
                    repair_with=repair_with,
                    force_activities=force_activities)
Exemple #3
0
def test_more_than_1000_open_activities_scheduled():
    workflow = TestDefinitionMoreThanMaxOpenActivities
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    # The first time, the executor should schedule
    # ``constants.MAX_OPEN_ACTIVITY_COUNT`` decisions.
    # No timer because we wait for at least an activity to complete.
    for i in xrange(constants.MAX_OPEN_ACTIVITY_COUNT / constants.MAX_DECISIONS):
        decisions, _ = executor.replay(history)
        assert len(decisions) == constants.MAX_DECISIONS

    decision_id = history.last_id
    for i in xrange(constants.MAX_OPEN_ACTIVITY_COUNT):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.test_dataflow.increment-{}'.format(
                i + 1),
            last_state='scheduled',
            result=i + 1)
    (history
        .add_decision_task_scheduled()
        .add_decision_task_started())

    decisions, _ = executor.replay(history)
    assert executor._open_activity_count == constants.MAX_OPEN_ACTIVITY_COUNT
    assert len(decisions) == 0
Exemple #4
0
def test_more_than_1000_open_activities_scheduled_and_running():
    def get_random_state():
        import random
        return random.choice(['scheduled', 'started'])

    workflow = ATestDefinitionMoreThanMaxOpenActivities
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    # The first time, the executor should schedule
    # ``constants.MAX_OPEN_ACTIVITY_COUNT`` decisions.
    # No timer because we wait for at least an activity to complete.
    for i in range(constants.MAX_OPEN_ACTIVITY_COUNT //
                   constants.MAX_DECISIONS):
        decisions, _ = executor.replay(Response(history=history))
        assert len(decisions) == constants.MAX_DECISIONS

    decision_id = history.last_id
    for i in range(constants.MAX_OPEN_ACTIVITY_COUNT):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                i + 1),
            last_state=get_random_state(),
            result=i + 1)
    (history.add_decision_task_scheduled().add_decision_task_started())

    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 0
Exemple #5
0
def test_on_failure_callback():
    workflow = TestOnFailureDefinition
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    history.add_activity_task(
        raise_error,
        decision_id=history.last_id,
        activity_id='activity-tests.test_dataflow.raise_error-1',
        last_state='failed',
        reason='error')

    (history
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # The executor should fail the workflow and extract the reason from the
    # exception raised in the workflow definition.
    decisions, _ = executor.replay(history)

    assert executor._workflow.failed is True

    workflow_failed = swf.models.decision.WorkflowExecutionDecision()
    workflow_failed.fail(
        reason='Workflow execution failed: FAIL')

    assert decisions[0] == workflow_failed
Exemple #6
0
def test_activity_task_timeout():
    workflow = TestDefinition
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)
    decision_id = history.last_id
    (history
        .add_activity_task(
            increment,
            activity_id='activity-tests.test_dataflow.increment-1',
            decision_id=decision_id,
            last_state='timed_out',
            timeout_type='START_TO_CLOSE'))

    decisions, _ = executor.replay(history)
    # The task timed out and there is no retry.
    assert len(decisions) == 1

    reason = (
        "Cannot replay the workflow: MultipleExceptions("
        "('futures failed', [TimeoutError(START_TO_CLOSE)]))"
    )
    workflow_failed = swf.models.decision.WorkflowExecutionDecision()
    workflow_failed.fail(reason=reason)

    decision = decisions[0]
    assert decision.type == 'FailWorkflowExecution'
    assert decision['failWorkflowExecutionDecisionAttributes']['reason'] == reason
Exemple #7
0
def test_workflow_with_repair_and_force_activities():
    workflow = ATestDefinitionWithInput
    history = builder.History(workflow, input={'args': [4]})

    # Now let's build the history to repair
    previous_history = builder.History(workflow, input={'args': [4]})
    decision_id = previous_history.last_id
    (previous_history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={'args': 4},
        result=57)  # obviously wrong but helps see if things work
     )
    to_repair = History(previous_history)
    to_repair.parse()

    executor = Executor(DOMAIN,
                        workflow,
                        repair_with=to_repair,
                        force_activities="increment|something_else")

    # The executor should not schedule anything, it should use previous history
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 1
    assert decisions[0]['decisionType'] == 'ScheduleActivityTask'
    attrs = decisions[0]['scheduleActivityTaskDecisionAttributes']
    assert not attrs['taskList']['name'].startswith("FAKE-")
    check_task_scheduled_decision(decisions[0], increment)
Exemple #8
0
def test_multiple_scheduled_activities():
    """
    When ``Future.exception`` was made blocking if the future is not finished,
    :py:meth:`swf.executor.Executor.resume` did not check ``future.finished``
    before ``future.exception is None``. It mades the call to ``.resume()`` to
    block for the first scheduled task it encountered instead of returning it.
    This issue was fixed in commit 6398aa8.
    With the wrong behaviour, the call to ``executor.replay()`` would not
    schedule the ``double`` task even after the task represented by *b*
    (``self.submit(increment, 2)``) has completed.
    """
    workflow = ATestMultipleScheduledActivitiesDefinition
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    decision_id = history.last_id
    (history.add_activity_task_scheduled(
        increment,
        decision_id=decision_id,
        activity_id='activity-tests.data.activities.increment-1',
        input={'args': 1})
     # The right behaviour is to schedule the ``double`` task when *b* is in
     # state finished.
     .add_activity_task(
         increment,
         decision_id=decision_id,
         activity_id='activity-tests.data.activities.increment-2',
         last_state='completed',
         input={'args': 2},
         result='3'))

    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)
Exemple #9
0
def test_workflow_with_after_closed():
    workflow = ATestDefinitionWithAfterClosed
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow, input={'args': (4, )})

    # The executor should only schedule the *increment* task.
    assert not hasattr(executor.workflow, 'b')
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 4
        },
        result=5).add_decision_task_scheduled().add_decision_task_started())

    # *double* has completed and the ``b.result``is now available. The executor
    # should complete the workflow and its result to ``b.result``.
    assert not hasattr(executor.workflow, 'b')
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(5))

    assert decisions[0] == workflow_completed
    assert executor.workflow.b == 5
Exemple #10
0
def test_workflow_failed_from_definition():
    workflow = ATestDefinitionFailWorkflow
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    # Let's directly add the task in state ``failed`` to make the executor fail
    # the workflow.
    history.add_activity_task(
        raise_error,
        decision_id=history.last_id,
        activity_id='activity-tests.data.activities.raise_error-1',
        last_state='failed',
        result=json_dumps(None))

    (history.add_decision_task_scheduled().add_decision_task_started())

    # Now the workflow definition calls ``Workflow.fail('error')`` that should
    # fail the whole workflow.
    decisions, _ = executor.replay(Response(history=history))

    assert executor.workflow.failed is True

    workflow_failed = swf.models.decision.WorkflowExecutionDecision()
    workflow_failed.fail(reason='Workflow execution failed: error')

    assert decisions[0] == workflow_failed
Exemple #11
0
def test_workflow_with_input():
    workflow = ATestDefinitionWithInput
    executor = Executor(DOMAIN, workflow)

    result = 5
    history = builder.History(workflow, input={'args': (4, )})

    # The executor should only schedule the *increment* task.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=result).add_decision_task_scheduled().add_decision_task_started(
        ))

    # As there is only a single task, the executor should now complete the
    # workflow and set its result accordingly.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(result))

    assert decisions[0] == workflow_completed
Exemple #12
0
def test_workflow_activity_raises_on_failure():
    workflow = ATestDefinitionActivityRaisesOnFailure
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    history.add_activity_task(
        raise_on_failure,
        decision_id=history.last_id,
        activity_id='activity-tests.data.activities.raise_on_failure-1',
        last_state='failed',
        reason='error')

    (history.add_decision_task_scheduled().add_decision_task_started())

    # The executor should fail the workflow and extract the reason from the
    # exception raised in the workflow definition.
    decisions, _ = executor.replay(Response(history=history))

    assert executor.workflow.failed is True

    workflow_failed = swf.models.decision.WorkflowExecutionDecision()
    workflow_failed.fail(reason='Workflow execution error in task '
                         'activity-tests.data.activities.raise_on_failure: '
                         '"error"')

    assert decisions[0] == workflow_failed
Exemple #13
0
    def test_submit_resolves_priority(self):
        response = Decider(DOMAIN, "test-task-list").poll()
        executor = Executor(DOMAIN, ExampleWorkflow)
        decisions, _ = executor.replay(response)

        expect(decisions).to.have.length_of(5)

        def get_task_priority(decision):
            return decision["scheduleActivityTaskDecisionAttributes"].get(
                "taskPriority")

        # default priority for the whole workflow
        expect(get_task_priority(decisions[0])).to.equal("12")

        # priority passed explicitly
        expect(get_task_priority(decisions[1])).to.equal("5")

        # priority == None
        expect(get_task_priority(decisions[2])).to.be.none

        # priority set at decorator level
        expect(get_task_priority(decisions[3])).to.equal("32")

        # priority set at decorator level but overridden in self.submit()
        expect(get_task_priority(decisions[4])).to.equal("30")
Exemple #14
0
def test_workflow_with_two_tasks_same_future():
    workflow = ATestDefinitionTwoTasksSameFuture
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # ``b.result`` and ``c.result`` requires the execution of ``double(a)`` and
    # ``increment(a)``. They both depend on the execution of ``increment(1)``so
    # the executor should schedule ``increment(1)``.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # Now ``a.result`` is available and the executor should schedule the
    # execution of ``double(a)`` and ``increment(a)`` at the same time.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)
    check_task_scheduled_decision(decisions[1], increment)

    # Let's add both tasks to the history to simulate their completion.
    decision_id = history.last_id
    (history.add_activity_task(
        double,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.double-1',
        input={
            'args': 2
        },
        result=4).add_activity_task(
            increment,
            decision_id=decision_id,
            last_state='completed',
            activity_id='activity-tests.data.activities.increment-2',
            input={
                'args': 2
            },
            result=3).add_decision_task_scheduled().add_decision_task_started(
            ))

    # Both tasks completed, hence the executor should complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps((4, 3)))

    assert decisions[0] == workflow_completed
Exemple #15
0
def test_workflow_with_before_replay():
    workflow = ATestDefinitionWithBeforeReplay
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow, input={'args': (4, )})

    # The executor should only schedule the *increment* task.
    assert not hasattr(executor.workflow, 'a')
    decisions, _ = executor.replay(Response(history=history))
    assert executor.workflow.a == 4
Exemple #16
0
def test_more_than_1000_open_activities_partial_max():
    workflow = ATestDefinitionMoreThanMaxOpenActivities
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)
    decisions, _ = executor.replay(Response(history=history))

    first_decision_id = history.last_id
    for i in range(constants.MAX_OPEN_ACTIVITY_COUNT - 2):
        history.add_activity_task(
            increment,
            decision_id=first_decision_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                i + 1),
            last_state='scheduled',
            result=i + 1)
    (history.add_decision_task_scheduled().add_decision_task_started())

    decisions, _ = executor.replay(Response(history=history))
    assert executor._open_activity_count == constants.MAX_OPEN_ACTIVITY_COUNT
    assert len(decisions) == 2

    history.add_decision_task_completed()
    for i in range(2):
        id_ = constants.MAX_OPEN_ACTIVITY_COUNT - 2 + i + 1
        history.add_activity_task(
            increment,
            decision_id=history.last_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                id_),
            last_state='scheduled',
            result=id_,
        )

    (history.add_decision_task_scheduled().add_decision_task_started())

    decisions, _ = executor.replay(Response(history=history))
    assert executor._open_activity_count == constants.MAX_OPEN_ACTIVITY_COUNT
    assert len(decisions) == 0

    history.add_decision_task_completed()

    for i in range(constants.MAX_OPEN_ACTIVITY_COUNT - 2):
        scheduled_id = first_decision_id + i + 1
        history.add_activity_task_started(scheduled_id)
        history.add_activity_task_completed(
            scheduled_id,
            started=history.last_id,
        )

    (history.add_decision_task_scheduled().add_decision_task_started())

    decisions, _ = executor.replay(Response(history=history))
    # 2 already scheduled + 5 to schedule now
    assert executor._open_activity_count == 7
    assert len(decisions) == 5
    def test_get_event_details(self):
        history = builder.History(ExampleWorkflow, input={})
        signal_input = {'x': 42, 'foo': 'bar', '__propagate': False}
        marker_details = {'baz': 'bae'}
        history.add_signal('a_signal', signal_input)
        history.add_marker('a_marker', marker_details)
        history.add_timer_started('a_timer', 1, decision_id=2)
        history.add_timer_fired('a_timer')

        executor = Executor(DOMAIN, ExampleWorkflow)
        executor.replay(Response(history=history, execution=None))

        details = executor.get_event_details('signal', 'a_signal')
        del details['timestamp']
        expect(details).to.equal({
            'type': 'signal',
            'state': 'signaled',
            'name': 'a_signal',
            'input': signal_input,
            'event_id': 4,
            'external_initiated_event_id': 0,
            'external_run_id': None,
            'external_workflow_id': None,
        })

        details = executor.get_event_details('signal', 'another_signal')
        expect(details).to.be.none

        details = executor.get_event_details('marker', 'a_marker')
        del details['timestamp']
        expect(details).to.equal({
            'type': 'marker',
            'state': 'recorded',
            'name': 'a_marker',
            'details': marker_details,
            'event_id': 5,
        })
        details = executor.get_event_details('marker', 'another_marker')
        expect(details).to.be.none

        details = executor.get_event_details('timer', 'a_timer')
        del details['started_event_timestamp']
        del details['fired_event_timestamp']
        expect(details).to.equal({
            'type': 'timer',
            'state': 'fired',
            'id': 'a_timer',
            'decision_task_completed_event_id': 2,
            'start_to_fire_timeout': 1,
            'started_event_id': 6,
            'fired_event_id': 7,
            'control': None,
        })
        details = executor.get_event_details('timer', 'another_timer')
        expect(details).to.be.none
Exemple #18
0
def test_workflow_with_after_run():
    workflow = TestDefinitionWithAfterRun
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow,
                              input={'args': (4,)})

    # The executor should only schedule the *increment* task.
    assert not hasattr(executor._workflow, 'b')
    decisions, _ = executor.replay(history)
    assert executor._workflow.b == 5
Exemple #19
0
def test_workflow_with_after_replay():
    workflow = ATestDefinitionWithAfterReplay
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow, input={'args': (4, )})

    # The executor should only schedule the *increment* task.
    assert not hasattr(executor.workflow, 'b')
    decisions, _ = executor.replay(Response(history=history))
    assert executor.workflow.b == 5
    # Check that workflow is not marked as finished
    assert not hasattr(executor.workflow, 'c')
Exemple #20
0
def test_workflow_retry_activity_failed_again():
    workflow = TestDefinitionRetryActivity
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # There is a single task, hence the executor should schedule it first.
    decisions, _ = executor.replay(history)
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``failed`` state.
    decision_id = history.last_id
    (history
        .add_activity_task(
            increment_retry,
            decision_id=decision_id,
            last_state='failed',
            activity_id='activity-tests.test_dataflow.increment_retry-1')
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # As the retry value is one, the executor should retry i.e. schedule the
    # task again.
    decisions, _ = executor.replay(history)
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``failed`` state again.
    decision_id = history.last_id
    (history
        .add_activity_task(
            increment_retry,
            decision_id=decision_id,
            last_state='failed',
            activity_id='activity-tests.test_dataflow.increment_retry-1')
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # There is no more retry. The executor should set `Future.exception` and
    # complete the workflow as there is no further task.
    decisions, _ = executor.replay(history)

    reason = (
        "Cannot replay the workflow: TaskFailed("
        "('activity-tests.test_dataflow.increment_retry-1', 'REASON', 'DETAILS'))"
    )

    workflow_failed = swf.models.decision.WorkflowExecutionDecision()
    workflow_failed.fail(reason=reason)

    decision = decisions[0]
    assert decision.type == 'FailWorkflowExecution'
    assert decision['failWorkflowExecutionDecisionAttributes']['reason'] == reason
Exemple #21
0
def test_workflow_with_two_tasks_not_completed():
    """
    This test checks how the executor behaves when a task is still running.

    """
    workflow = TestDefinitionWithInput
    executor = Executor(DOMAIN, workflow)

    arg = 4
    result = 5
    history = builder.History(workflow,
                              input={'args': (arg,)})

    # The executor should schedule *increment*.
    decisions, _ = executor.replay(history)
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task in state ``started`` to the history.
    decision_id = history.last_id
    scheduled_id = decision_id + 1
    (history
        .add_activity_task(increment,
                           decision_id=decision_id,
                           last_state='started',
                           activity_id='activity-tests.test_dataflow.increment-1',
                           input={'args': 1},
                           result=5)
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # The executor cannot schedule any other task, it returns an empty
    # decision.
    decisions, _ = executor.replay(history)
    assert len(decisions) == 0

    # Let's now set the task as ``completed`` in the history.
    decision_id = history.last_id
    (history
        .add_activity_task_completed(scheduled=scheduled_id,
                                     started=scheduled_id + 1,
                                     result=result)
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # As there is a single task and it is now finished, the executor should
    # complete the workflow.
    decisions, _ = executor.replay(history)
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json.dumps(result))

    assert decisions[0] == workflow_completed
Exemple #22
0
def test_workflow_with_same_task_called_two_times():
    """
    This test checks how the executor behaves when the same task is executed
    two times with a different argument.

    """
    workflow = TestDefinitionSameTask
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # As the second task depends on the first, the executor should only
    # schedule the first task.
    decisions, _ = executor.replay(history)
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history
        .add_activity_task(increment,
                           decision_id=decision_id,
                           last_state='completed',
                           activity_id='activity-tests.test_dataflow.increment-1',
                           input={'args': 1},
                           result=2)
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # The first task is finished, the executor should schedule the second one.
    decision_id = history.last_id
    decisions, _ = executor.replay(history)
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history
        .add_activity_task(increment,
                           decision_id=decision_id,
                           last_state='completed',
                           activity_id='activity-tests.test_dataflow.increment-2',
                           input={'args': 2},
                           result=3)
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # The executor should now complete the workflow.
    decisions, _ = executor.replay(history)
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json.dumps(3))

    assert decisions[0] == workflow_completed
Exemple #23
0
def load_workflow(domain,
                  workflow_name,
                  task_list=None,
                  repair_with=None,
                  force_activities=None):
    module_name, object_name = workflow_name.rsplit('.', 1)
    module = __import__(module_name, fromlist=['*'])

    workflow = getattr(module, object_name)
    return Executor(swf.models.Domain(domain),
                    workflow,
                    task_list,
                    repair_with=repair_with,
                    force_activities=force_activities)
Exemple #24
0
def load_workflow_executor(
    domain,
    workflow_name,
    task_list=None,
    repair_with=None,
    force_activities=None,
    repair_workflow_id=None,
    repair_run_id=None,
):
    """
    Load a workflow executor.

    :param domain:
    :type domain: str | swf.models.Domain
    :param workflow_name:
    :type workflow_name: str
    :param task_list:
    :type task_list: Optional[str]
    :param repair_with:
    :type repair_with: Optional[simpleflow.history.History]
    :param force_activities:
    :type force_activities: Optional[str]
    :param repair_workflow_id: workflow ID to repair
    :type repair_workflow_id: Optional[str]
    :param repair_run_id: run ID to repair
    :type repair_run_id: Optional[str]
    :return: Executor for this workflow
    :rtype: Executor
    """
    logger.debug(
        'load_workflow_executor(workflow_name="{}")'.format(workflow_name))
    module_name, object_name = workflow_name.rsplit(".", 1)
    module = __import__(module_name, fromlist=["*"])

    workflow = getattr(module, object_name)

    # TODO: find the cause of this differentiated behaviour
    if not isinstance(domain, swf.models.Domain):
        domain = swf.models.Domain(domain)

    return Executor(
        domain,
        workflow,
        task_list,
        repair_with=repair_with,
        force_activities=force_activities,
        repair_workflow_id=repair_workflow_id,
        repair_run_id=repair_run_id,
    )
Exemple #25
0
def test_workflow_with_two_tasks():
    workflow = ATestDefinition
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # *double* requires the result of *increment*, hold by the *a* future.
    # Hence the executor schedule *increment*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # Now ``a.result``contains the result of *increment*'s that is finished.
    # The line ``return b.result`` requires the computation of *double* with
    # ``a.result``, then the executor should schedule *double*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        double,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.double-1',
        input={
            'args': 2
        },
        result=4).add_decision_task_scheduled().add_decision_task_started())

    # *double* has completed and the ``b.result``is now available. The executor
    # should complete the workflow and its result to ``b.result``.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(4))

    assert decisions[0] == workflow_completed
Exemple #26
0
def test_activity_task_timeout_retry():
    workflow = ATestDefinitionRetryActivity
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        activity_id='activity-tests.data.activities.increment_retry-1',
        decision_id=decision_id,
        last_state='timed_out',
        timeout_type='START_TO_CLOSE'))

    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 1
    check_task_scheduled_decision(decisions[0], increment_retry)
Exemple #27
0
def test_workflow_with_more_than_max_decisions():
    workflow = TestDefinitionMoreThanMaxDecisions
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    # The first time, the executor should schedule ``constants.MAX_DECISIONS``
    # decisions and a timer to force the scheduling of the remaining tasks.
    decisions, _ = executor.replay(history)
    assert len(decisions) == constants.MAX_DECISIONS
    assert decisions[-1].type == 'StartTimer'

    decision_id = history.last_id
    for i in xrange(constants.MAX_DECISIONS):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.test_dataflow.increment-{}'.format(
                i + 1),
            last_state='completed',
            result=i + 1)
    (history
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # Once the first batch of ``constants.MAX_DECISIONS`` tasks is finished,
    # the executor should schedule the 20 remaining ones.
    decisions, _ = executor.replay(history)
    assert len(decisions) == 20

    for i in xrange(constants.MAX_DECISIONS - 1, constants.MAX_DECISIONS + 20):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.test_dataflow.increment-{}'.format(
                i + 1),
            last_state='completed',
            result=i + 1)
    (history
        .add_decision_task_scheduled()
        .add_decision_task_started())

    # All tasks are finised, the executor should complete the workflow.
    decisions, _ = executor.replay(history)
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result='null')

    assert decisions[0] == workflow_completed
Exemple #28
0
def test_workflow_reuse_same_future():
    workflow = ATestDefinitionSameFuture
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # *double* depends on *increment*, then the executor should only schedule
    # *increment* at first.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        input={
            'args': 1
        },
        activity_id='activity-tests.data.activities.increment-1',
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # *increment* is finished, the executor should schedule *double*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        double,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.double-1',
        input={
            'args': 2
        },
        result=4).add_decision_task_scheduled().add_decision_task_started())

    # The executor should now complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(4))

    assert decisions[0] == workflow_completed
Exemple #29
0
def test_activity_not_found_schedule_failed():
    workflow = TestDefinition
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)
    decision_id = history.last_id
    (history
        .add_activity_task_schedule_failed(
            activity_id='activity-tests.test_dataflow.increment-1',
            decision_id=decision_id,
            activity_type={
                'name': increment.name,
                'version': increment.version
            },
            cause='ACTIVITY_TYPE_DOES_NOT_EXIST'))

    decisions, _ = executor.replay(history)
    check_task_scheduled_decision(decisions[0], increment)
Exemple #30
0
def test_workflow_with_child_workflow():
    workflow = ATestDefinitionChildWorkflow
    executor = Executor(DOMAIN, workflow)

    # FIXME the original test only contains args, and check both keys are present.
    # FIXME But their order is unspecified from one execution to the next
    input = {'args': (1, ), 'kwargs': {}}
    history = builder.History(workflow, input=input)

    # The executor should schedule the execution of a child workflow.
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 1
    assert decisions == [{
        'startChildWorkflowExecutionDecisionAttributes': {
            'workflowId': 'workflow-test_workflow-1',
            'taskList': {
                'name': 'test_task_list'
            },
            'executionStartToCloseTimeout': '3600',
            'input': json_dumps(input),
            'workflowType': {
                'version': 'test_version',
                'name': 'test_workflow'
            },
            'taskStartToCloseTimeout': '300'
        },
        'decisionType': 'StartChildWorkflowExecution'
    }]

    # Let's add the child workflow to the history to simulate its completion.
    (history.add_decision_task().add_child_workflow(
        workflow,
        workflow_id='workflow-test_workflow-1',
        task_list=ATestWorkflow.task_list,
        input='"{\\"args\\": [1], \\"kwargs\\": {}}"',
        result='4'))

    # Now the child workflow is finished and the executor should complete the
    # workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(4))

    assert decisions[0] == workflow_completed