Ejemplo n.º 1
0
def test_workflow_with_after_closed():
    workflow = ATestDefinitionWithAfterClosed
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow, input={'args': (4, )})

    # The executor should only schedule the *increment* task.
    assert not hasattr(executor.workflow, 'b')
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 4
        },
        result=5).add_decision_task_scheduled().add_decision_task_started())

    # *double* has completed and the ``b.result``is now available. The executor
    # should complete the workflow and its result to ``b.result``.
    assert not hasattr(executor.workflow, 'b')
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(5))

    assert decisions[0] == workflow_completed
    assert executor.workflow.b == 5
Ejemplo n.º 2
0
def test_workflow_with_input():
    workflow = ATestDefinitionWithInput
    executor = Executor(DOMAIN, workflow)

    result = 5
    history = builder.History(workflow, input={'args': (4, )})

    # The executor should only schedule the *increment* task.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=result).add_decision_task_scheduled().add_decision_task_started(
        ))

    # As there is only a single task, the executor should now complete the
    # workflow and set its result accordingly.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(result))

    assert decisions[0] == workflow_completed
Ejemplo n.º 3
0
def test_workflow_map():
    workflow = ATestDefinitionMap
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    nb_parts = ATestDefinitionMap.nb_parts

    # All the futures returned by the map are passed to wait().
    # The executor should then schedule all of them.
    decisions, _ = executor.replay(Response(history=history))
    for i in range(nb_parts):
        check_task_scheduled_decision(decisions[i], increment)

    # Let's add all tasks of the map to the history to simulate their
    # completion.
    decision_id = history.last_id
    for i in range(nb_parts):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                i + 1),
            last_state='completed',
            input={'args': i},
            result=i + 1)
    (history.add_decision_task_scheduled().add_decision_task_started())

    # All tasks are finished, the executor should complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(
        result=json_dumps([i + 1 for i in range(nb_parts)]))

    assert decisions[0] == workflow_completed
Ejemplo n.º 4
0
def test_more_than_1000_open_activities_scheduled_and_running():
    def get_random_state():
        import random
        return random.choice(['scheduled', 'started'])

    workflow = ATestDefinitionMoreThanMaxOpenActivities
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    # The first time, the executor should schedule
    # ``constants.MAX_OPEN_ACTIVITY_COUNT`` decisions.
    # No timer because we wait for at least an activity to complete.
    for i in range(constants.MAX_OPEN_ACTIVITY_COUNT //
                   constants.MAX_DECISIONS):
        decisions, _ = executor.replay(Response(history=history))
        assert len(decisions) == constants.MAX_DECISIONS

    decision_id = history.last_id
    for i in range(constants.MAX_OPEN_ACTIVITY_COUNT):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                i + 1),
            last_state=get_random_state(),
            result=i + 1)
    (history.add_decision_task_scheduled().add_decision_task_started())

    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 0
Ejemplo n.º 5
0
def test_workflow_with_two_tasks_same_future():
    workflow = ATestDefinitionTwoTasksSameFuture
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # ``b.result`` and ``c.result`` requires the execution of ``double(a)`` and
    # ``increment(a)``. They both depend on the execution of ``increment(1)``so
    # the executor should schedule ``increment(1)``.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # Now ``a.result`` is available and the executor should schedule the
    # execution of ``double(a)`` and ``increment(a)`` at the same time.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)
    check_task_scheduled_decision(decisions[1], increment)

    # Let's add both tasks to the history to simulate their completion.
    decision_id = history.last_id
    (history.add_activity_task(
        double,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.double-1',
        input={
            'args': 2
        },
        result=4).add_activity_task(
            increment,
            decision_id=decision_id,
            last_state='completed',
            activity_id='activity-tests.data.activities.increment-2',
            input={
                'args': 2
            },
            result=3).add_decision_task_scheduled().add_decision_task_started(
            ))

    # Both tasks completed, hence the executor should complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps((4, 3)))

    assert decisions[0] == workflow_completed
Ejemplo n.º 6
0
def test_more_than_1000_open_activities_partial_max():
    workflow = ATestDefinitionMoreThanMaxOpenActivities
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)
    decisions, _ = executor.replay(Response(history=history))

    first_decision_id = history.last_id
    for i in range(constants.MAX_OPEN_ACTIVITY_COUNT - 2):
        history.add_activity_task(
            increment,
            decision_id=first_decision_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                i + 1),
            last_state='scheduled',
            result=i + 1)
    (history.add_decision_task_scheduled().add_decision_task_started())

    decisions, _ = executor.replay(Response(history=history))
    assert executor._open_activity_count == constants.MAX_OPEN_ACTIVITY_COUNT
    assert len(decisions) == 2

    history.add_decision_task_completed()
    for i in range(2):
        id_ = constants.MAX_OPEN_ACTIVITY_COUNT - 2 + i + 1
        history.add_activity_task(
            increment,
            decision_id=history.last_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                id_),
            last_state='scheduled',
            result=id_,
        )

    (history.add_decision_task_scheduled().add_decision_task_started())

    decisions, _ = executor.replay(Response(history=history))
    assert executor._open_activity_count == constants.MAX_OPEN_ACTIVITY_COUNT
    assert len(decisions) == 0

    history.add_decision_task_completed()

    for i in range(constants.MAX_OPEN_ACTIVITY_COUNT - 2):
        scheduled_id = first_decision_id + i + 1
        history.add_activity_task_started(scheduled_id)
        history.add_activity_task_completed(
            scheduled_id,
            started=history.last_id,
        )

    (history.add_decision_task_scheduled().add_decision_task_started())

    decisions, _ = executor.replay(Response(history=history))
    # 2 already scheduled + 5 to schedule now
    assert executor._open_activity_count == 7
    assert len(decisions) == 5
Ejemplo n.º 7
0
def test_workflow_with_same_task_called_two_times():
    """
    This test checks how the executor behaves when the same task is executed
    two times with a different argument.
    """
    workflow = ATestDefinitionSameTask
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # As the second task depends on the first, the executor should only
    # schedule the first task.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # The first task is finished, the executor should schedule the second one.
    decision_id = history.last_id
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-2',
        input={
            'args': 2
        },
        result=3).add_decision_task_scheduled().add_decision_task_started())

    # The executor should now complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(3))

    assert decisions[0] == workflow_completed
Ejemplo n.º 8
0
def test_workflow_with_two_tasks():
    workflow = ATestDefinition
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # *double* requires the result of *increment*, hold by the *a* future.
    # Hence the executor schedule *increment*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # Now ``a.result``contains the result of *increment*'s that is finished.
    # The line ``return b.result`` requires the computation of *double* with
    # ``a.result``, then the executor should schedule *double*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        double,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.double-1',
        input={
            'args': 2
        },
        result=4).add_decision_task_scheduled().add_decision_task_started())

    # *double* has completed and the ``b.result``is now available. The executor
    # should complete the workflow and its result to ``b.result``.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(4))

    assert decisions[0] == workflow_completed
Ejemplo n.º 9
0
def test_workflow_with_repair_and_force_activities():
    workflow = ATestDefinitionWithInput
    history = builder.History(workflow, input={'args': [4]})

    # Now let's build the history to repair
    previous_history = builder.History(workflow, input={'args': [4]})
    decision_id = previous_history.last_id
    (previous_history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment-1',
        input={'args': 4},
        result=57)  # obviously wrong but helps see if things work
     )
    to_repair = History(previous_history)
    to_repair.parse()

    executor = Executor(DOMAIN,
                        workflow,
                        repair_with=to_repair,
                        force_activities="increment|something_else")

    # The executor should not schedule anything, it should use previous history
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 1
    assert decisions[0]['decisionType'] == 'ScheduleActivityTask'
    attrs = decisions[0]['scheduleActivityTaskDecisionAttributes']
    assert not attrs['taskList']['name'].startswith("FAKE-")
    check_task_scheduled_decision(decisions[0], increment)
Ejemplo n.º 10
0
def test_workflow_failed_from_definition():
    workflow = ATestDefinitionFailWorkflow
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    # Let's directly add the task in state ``failed`` to make the executor fail
    # the workflow.
    history.add_activity_task(
        raise_error,
        decision_id=history.last_id,
        activity_id='activity-tests.data.activities.raise_error-1',
        last_state='failed',
        result=json_dumps(None))

    (history.add_decision_task_scheduled().add_decision_task_started())

    # Now the workflow definition calls ``Workflow.fail('error')`` that should
    # fail the whole workflow.
    decisions, _ = executor.replay(Response(history=history))

    assert executor.workflow.failed is True

    workflow_failed = swf.models.decision.WorkflowExecutionDecision()
    workflow_failed.fail(reason='Workflow execution failed: error')

    assert decisions[0] == workflow_failed
Ejemplo n.º 11
0
def test_workflow_activity_raises_on_failure():
    workflow = ATestDefinitionActivityRaisesOnFailure
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    history.add_activity_task(
        raise_on_failure,
        decision_id=history.last_id,
        activity_id='activity-tests.data.activities.raise_on_failure-1',
        last_state='failed',
        reason='error')

    (history.add_decision_task_scheduled().add_decision_task_started())

    # The executor should fail the workflow and extract the reason from the
    # exception raised in the workflow definition.
    decisions, _ = executor.replay(Response(history=history))

    assert executor.workflow.failed is True

    workflow_failed = swf.models.decision.WorkflowExecutionDecision()
    workflow_failed.fail(reason='Workflow execution error in task '
                         'activity-tests.data.activities.raise_on_failure: '
                         '"error"')

    assert decisions[0] == workflow_failed
Ejemplo n.º 12
0
def test_multiple_scheduled_activities():
    """
    When ``Future.exception`` was made blocking if the future is not finished,
    :py:meth:`swf.executor.Executor.resume` did not check ``future.finished``
    before ``future.exception is None``. It mades the call to ``.resume()`` to
    block for the first scheduled task it encountered instead of returning it.
    This issue was fixed in commit 6398aa8.
    With the wrong behaviour, the call to ``executor.replay()`` would not
    schedule the ``double`` task even after the task represented by *b*
    (``self.submit(increment, 2)``) has completed.
    """
    workflow = ATestMultipleScheduledActivitiesDefinition
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    decision_id = history.last_id
    (history.add_activity_task_scheduled(
        increment,
        decision_id=decision_id,
        activity_id='activity-tests.data.activities.increment-1',
        input={'args': 1})
     # The right behaviour is to schedule the ``double`` task when *b* is in
     # state finished.
     .add_activity_task(
         increment,
         decision_id=decision_id,
         activity_id='activity-tests.data.activities.increment-2',
         last_state='completed',
         input={'args': 2},
         result='3'))

    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)
Ejemplo n.º 13
0
def test_workflow_with_two_tasks_not_completed():
    """
    This test checks how the executor behaves when a task is still running.
    """
    workflow = ATestDefinitionWithInput
    executor = Executor(DOMAIN, workflow)

    arg = 4
    result = 5
    history = builder.History(workflow, input={'args': (arg, )})

    # The executor should schedule *increment*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task in state ``started`` to the history.
    decision_id = history.last_id
    scheduled_id = decision_id + 1
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='started',
        activity_id='activity-tests.data.activities.increment-1',
        input={
            'args': 1
        },
        result=5).add_decision_task_scheduled().add_decision_task_started())

    # The executor cannot schedule any other task, it returns an empty
    # decision.
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 0

    # Let's now set the task as ``completed`` in the history.
    decision_id = history.last_id
    (history.add_activity_task_completed(scheduled=scheduled_id,
                                         started=scheduled_id + 1,
                                         result=result).
     add_decision_task_scheduled().add_decision_task_started())

    # As there is a single task and it is now finished, the executor should
    # complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(result))

    assert decisions[0] == workflow_completed
Ejemplo n.º 14
0
def test_workflow_reuse_same_future():
    workflow = ATestDefinitionSameFuture
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # *double* depends on *increment*, then the executor should only schedule
    # *increment* at first.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='completed',
        input={
            'args': 1
        },
        activity_id='activity-tests.data.activities.increment-1',
        result=2).add_decision_task_scheduled().add_decision_task_started())

    # *increment* is finished, the executor should schedule *double*.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], double)

    # Let's add the task to the history to simulate its completion.
    decision_id = history.last_id
    (history.add_activity_task(
        double,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.double-1',
        input={
            'args': 2
        },
        result=4).add_decision_task_scheduled().add_decision_task_started())

    # The executor should now complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(4))

    assert decisions[0] == workflow_completed
Ejemplo n.º 15
0
def test_workflow_with_child_workflow():
    workflow = ATestDefinitionChildWorkflow
    executor = Executor(DOMAIN, workflow)

    # FIXME the original test only contains args, and check both keys are present.
    # FIXME But their order is unspecified from one execution to the next
    input = {'args': (1, ), 'kwargs': {}}
    history = builder.History(workflow, input=input)

    # The executor should schedule the execution of a child workflow.
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 1
    assert decisions == [{
        'startChildWorkflowExecutionDecisionAttributes': {
            'workflowId': 'workflow-test_workflow-1',
            'taskList': {
                'name': 'test_task_list'
            },
            'executionStartToCloseTimeout': '3600',
            'input': json_dumps(input),
            'workflowType': {
                'version': 'test_version',
                'name': 'test_workflow'
            },
            'taskStartToCloseTimeout': '300'
        },
        'decisionType': 'StartChildWorkflowExecution'
    }]

    # Let's add the child workflow to the history to simulate its completion.
    (history.add_decision_task().add_child_workflow(
        workflow,
        workflow_id='workflow-test_workflow-1',
        task_list=ATestWorkflow.task_list,
        input='"{\\"args\\": [1], \\"kwargs\\": {}}"',
        result='4'))

    # Now the child workflow is finished and the executor should complete the
    # workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(4))

    assert decisions[0] == workflow_completed
Ejemplo n.º 16
0
def test_workflow_with_more_than_max_decisions():
    workflow = ATestDefinitionMoreThanMaxDecisions
    executor = Executor(DOMAIN, workflow)
    history = builder.History(workflow)

    # The first time, the executor should schedule ``constants.MAX_DECISIONS``
    # decisions and a timer to force the scheduling of the remaining tasks.
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == constants.MAX_DECISIONS
    assert decisions[-1].type == 'StartTimer'

    decision_id = history.last_id
    for i in range(constants.MAX_DECISIONS):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                i + 1),
            last_state='completed',
            result=i + 1)
    (history.add_decision_task_scheduled().add_decision_task_started())

    # Once the first batch of ``constants.MAX_DECISIONS`` tasks is finished,
    # the executor should schedule the 5 remaining ones.
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 5

    for i in range(constants.MAX_DECISIONS - 1, constants.MAX_DECISIONS + 5):
        history.add_activity_task(
            increment,
            decision_id=decision_id,
            activity_id='activity-tests.data.activities.increment-{}'.format(
                i + 1),
            last_state='completed',
            result=i + 1)
    (history.add_decision_task_scheduled().add_decision_task_started())

    # All tasks are finised, the executor should complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result='null')

    assert decisions[0] == workflow_completed
Ejemplo n.º 17
0
 def fake_poll(self):
     polled_activity_data = json.loads(b64decode(self.poll_data))
     activity_task = BaseActivityTask.from_poll(
         self.domain, self.task_list, polled_activity_data,
     )
     return Response(
         task_token=activity_task.task_token,
         activity_task=activity_task,
         raw_response=polled_activity_data,
     )
Ejemplo n.º 18
0
def test_workflow_with_before_replay():
    workflow = ATestDefinitionWithBeforeReplay
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow, input={'args': (4, )})

    # The executor should only schedule the *increment* task.
    assert not hasattr(executor.workflow, 'a')
    decisions, _ = executor.replay(Response(history=history))
    assert executor.workflow.a == 4
Ejemplo n.º 19
0
    def test_get_event_details(self):
        history = builder.History(ExampleWorkflow, input={})
        signal_input = {'x': 42, 'foo': 'bar', '__propagate': False}
        marker_details = {'baz': 'bae'}
        history.add_signal('a_signal', signal_input)
        history.add_marker('a_marker', marker_details)
        history.add_timer_started('a_timer', 1, decision_id=2)
        history.add_timer_fired('a_timer')

        executor = Executor(DOMAIN, ExampleWorkflow)
        executor.replay(Response(history=history, execution=None))

        details = executor.get_event_details('signal', 'a_signal')
        del details['timestamp']
        expect(details).to.equal({
            'type': 'signal',
            'state': 'signaled',
            'name': 'a_signal',
            'input': signal_input,
            'event_id': 4,
            'external_initiated_event_id': 0,
            'external_run_id': None,
            'external_workflow_id': None,
        })

        details = executor.get_event_details('signal', 'another_signal')
        expect(details).to.be.none

        details = executor.get_event_details('marker', 'a_marker')
        del details['timestamp']
        expect(details).to.equal({
            'type': 'marker',
            'state': 'recorded',
            'name': 'a_marker',
            'details': marker_details,
            'event_id': 5,
        })
        details = executor.get_event_details('marker', 'another_marker')
        expect(details).to.be.none

        details = executor.get_event_details('timer', 'a_timer')
        del details['started_event_timestamp']
        del details['fired_event_timestamp']
        expect(details).to.equal({
            'type': 'timer',
            'state': 'fired',
            'id': 'a_timer',
            'decision_task_completed_event_id': 2,
            'start_to_fire_timeout': 1,
            'started_event_id': 6,
            'fired_event_id': 7,
            'control': None,
        })
        details = executor.get_event_details('timer', 'another_timer')
        expect(details).to.be.none
Ejemplo n.º 20
0
def test_workflow_retry_activity_failed_again():
    workflow = ATestDefinitionRetryActivity
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # There is a single task, hence the executor should schedule it first.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``failed`` state.
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        decision_id=decision_id,
        last_state='failed',
        activity_id='activity-tests.data.activities.increment_retry-1').
     add_decision_task_scheduled().add_decision_task_started())

    # As the retry value is one, the executor should retry i.e. schedule the
    # task again.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``failed`` state again.
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        decision_id=decision_id,
        last_state='failed',
        activity_id='activity-tests.data.activities.increment_retry-1').
     add_decision_task_scheduled().add_decision_task_started())

    # There is no more retry. The executor should set `Future.exception` and
    # complete the workflow as there is no further task.
    decisions, _ = executor.replay(Response(history=history))

    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    # ``a.result`` is ``None`` because it was not set.
    workflow_completed.complete(result=json_dumps(None))

    assert decisions[0] == workflow_completed
Ejemplo n.º 21
0
def test_workflow_retry_activity():
    workflow = ATestDefinitionRetryActivity
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)

    # There is a single task, hence the executor should schedule it first.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``failed`` state.
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        decision_id=decision_id,
        last_state='failed',
        activity_id='activity-tests.data.activities.increment_retry-1').
     add_decision_task_scheduled().add_decision_task_started())

    # As the retry value is one, the executor should retry i.e. schedule the
    # task again.
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment_retry)

    # Let's add the task in ``completed`` state.
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        decision_id=decision_id,
        last_state='completed',
        activity_id='activity-tests.data.activities.increment_retry-1',
        input={
            'args': 7
        },
        result=8).add_decision_task_scheduled().add_decision_task_started())

    # Now the task is finished and the executor should complete the workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json_dumps(8))

    assert decisions[0] == workflow_completed
Ejemplo n.º 22
0
    def poll(self, task_list=None, identity=None):
        """Polls for an activity task to process from current
        actor's instance defined ``task_list``

        if no activity task was polled, raises a PollTimeout
        exception.

        :param  task_list: task list the Actor should watch for tasks on
        :type   task_list: string

        :param  identity: Identity of the worker making the request,
                          which is recorded in the ActivityTaskStarted
                          event in the workflow history. This enables
                          diagnostic tracing when problems arise.
                          The form of this identity is user defined.
        :type   identity: string

        :raises: PollTimeout

        :returns: task token, polled activity task
        :rtype: (str, ActivityTask)
        """
        logging_context.reset()
        task_list = task_list or self.task_list
        identity = identity or self._identity

        try:
            task = self.connection.poll_for_activity_task(
                self.domain.name, task_list, identity=format.identity(identity),
            )
        except boto.exception.SWFResponseError as e:
            message = self.get_error_message(e)
            if e.error_code == "UnknownResourceFault":
                raise DoesNotExistError(
                    "Unable to poll activity task", message,
                )

            raise ResponseError(message)

        if not task.get("taskToken"):
            raise PollTimeout("Activity Worker poll timed out")

        logging_context.set("workflow_id", task["workflowExecution"]["workflowId"])
        logging_context.set("task_type", "activity")
        logging_context.set("event_id", task["startedEventId"])
        logging_context.set("activity_id", task["activityId"])

        activity_task = ActivityTask.from_poll(self.domain, self.task_list, task,)

        return Response(
            task_token=activity_task.task_token,
            activity_task=activity_task,
            raw_response=task,
        )
Ejemplo n.º 23
0
def test_workflow_with_child_workflow():
    workflow = TestDefinitionChildWorkflow
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow, input={'args': (1, )})

    # The executor should schedule the execution of a child workflow.
    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 1
    assert decisions == [{
        'startChildWorkflowExecutionDecisionAttributes': {
            'workflowId': 'workflow-test_workflow-1',
            'taskList': {
                'name': 'test_task_list'
            },
            'executionStartToCloseTimeout': '3600',
            'input': '{"args": [1], "kwargs": {}}',
            'workflowType': {
                'version': 'test_version',
                'name': 'test_workflow'
            },
            'taskStartToCloseTimeout': '300'
        },
        'decisionType': 'StartChildWorkflowExecution'
    }]

    # Let's add the child workflow to the history to simulate its completion.
    (history.add_decision_task().add_child_workflow(
        workflow,
        workflow_id='workflow-test_workflow-1',
        task_list=TestWorkflow.task_list,
        input='"{\\"args\\": [1], \\"kwargs\\": {}}"',
        result='4'))

    # Now the child workflow is finished and the executor should complete the
    # workflow.
    decisions, _ = executor.replay(Response(history=history))
    workflow_completed = swf.models.decision.WorkflowExecutionDecision()
    workflow_completed.complete(result=json.dumps(4))

    assert decisions[0] == workflow_completed
Ejemplo n.º 24
0
def test_workflow_with_after_replay():
    workflow = ATestDefinitionWithAfterReplay
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow, input={'args': (4, )})

    # The executor should only schedule the *increment* task.
    assert not hasattr(executor.workflow, 'b')
    decisions, _ = executor.replay(Response(history=history))
    assert executor.workflow.b == 5
    # Check that workflow is not marked as finished
    assert not hasattr(executor.workflow, 'c')
Ejemplo n.º 25
0
def test_activity_task_timeout_retry():
    workflow = ATestDefinitionRetryActivity
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)
    decision_id = history.last_id
    (history.add_activity_task(
        increment_retry,
        activity_id='activity-tests.data.activities.increment_retry-1',
        decision_id=decision_id,
        last_state='timed_out',
        timeout_type='START_TO_CLOSE'))

    decisions, _ = executor.replay(Response(history=history))
    assert len(decisions) == 1
    check_task_scheduled_decision(decisions[0], increment_retry)
Ejemplo n.º 26
0
def test_activity_task_timeout_raises():
    workflow = ATestDefinitionActivityRaisesOnFailure
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)
    decision_id = history.last_id
    (history.add_activity_task(
        raise_on_failure,
        activity_id='activity-tests.data.activities.raise_on_failure-1',
        decision_id=decision_id,
        last_state='timed_out',
        timeout_type='START_TO_CLOSE'))

    decisions, _ = executor.replay(Response(history=history))
    workflow_failed = swf.models.decision.WorkflowExecutionDecision()
    workflow_failed.fail(reason='Workflow execution error in task '
                         'activity-tests.data.activities.raise_on_failure: '
                         '"TimeoutError(START_TO_CLOSE)"')

    assert decisions[0] == workflow_failed
Ejemplo n.º 27
0
def test_activity_not_found_schedule_failed():
    conn = boto.connect_swf()
    conn.register_domain("TestDomain", "50")

    workflow = ATestDefinition
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)
    decision_id = history.last_id
    (history.add_activity_task_schedule_failed(
        activity_id='activity-tests.data.activities.increment-1',
        decision_id=decision_id,
        activity_type={
            'name': increment.name,
            'version': increment.version
        },
        cause='ACTIVITY_TYPE_DOES_NOT_EXIST'))

    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)
Ejemplo n.º 28
0
def test_activity_not_found_schedule_failed_already_exists():
    workflow = ATestDefinition
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow)
    decision_id = history.last_id
    (history.add_activity_task_schedule_failed(
        activity_id='activity-tests.data.activities.increment-1',
        decision_id=decision_id,
        activity_type={
            'name': increment.name,
            'version': increment.version
        },
        cause='ACTIVITY_TYPE_DOES_NOT_EXIST'))

    with patch('swf.models.ActivityType.save',
               raise_already_exists(increment)):
        decisions, _ = executor.replay(Response(history=history))

    check_task_scheduled_decision(decisions[0], increment)
Ejemplo n.º 29
0
def test_workflow_with_repair_if_task_failed():
    workflow = ATestDefinitionWithInput
    history = builder.History(workflow, input={'args': [4]})

    # Now let's build the history to repair
    previous_history = builder.History(workflow, input={'args': [4]})
    decision_id = previous_history.last_id
    (previous_history.add_activity_task(
        increment,
        decision_id=decision_id,
        last_state='failed',
        activity_id='activity-tests.data.activities.increment-1',
        input={'args': 4},
        result=57)  # obviously wrong but helps see if things work
     )
    to_repair = History(previous_history)
    to_repair.parse()

    executor = Executor(DOMAIN, workflow, repair_with=to_repair)

    # The executor should not schedule anything, it should use previous history
    decisions, _ = executor.replay(Response(history=history))
    check_task_scheduled_decision(decisions[0], increment)
Ejemplo n.º 30
0
def test_task_naming():
    workflow = ATestTaskNaming
    executor = Executor(DOMAIN, workflow)

    history = builder.History(workflow, input={})

    decisions, _ = executor.replay(Response(history=history))
    expected = [
        # non idempotent task, should increment
        "activity-tests.data.activities.increment-1",
        # non idempotent task, should increment again
        "activity-tests.data.activities.increment-2",
        # idempotent task, with arg 1
        "activity-tests.data.activities.triple-bdc09455c37471e0ba7397350413a5e6",
        # idempotent task, with arg 2
        "activity-tests.data.activities.triple-12036b25db61ae6cadf7a003ff523029",
        # idempotent task, with arg 2 too => same task id
        "activity-tests.data.activities.triple-12036b25db61ae6cadf7a003ff523029",
        # class-based task, non idempotent
        "activity-tests.data.activities.Tetra-1",
    ]
    for i in range(0, len(expected)):
        decision = decisions[i]['scheduleActivityTaskDecisionAttributes']
        assert decision['activityId'] == expected[i]