def test_task_state_creation():
    task_state = Task('Task', resource='arn:aws:lambda:us-east-1:1234567890:function:StartLambda')
    task_state.add_retry(Retry(error_equals=['ErrorA', 'ErrorB'], interval_seconds=1, max_attempts=2, backoff_rate=2))
    task_state.add_retry(Retry(error_equals=['ErrorC'], interval_seconds=5))
    task_state.add_catch(Catch(error_equals=['States.ALL'], next_step=Pass('End State')))
    assert task_state.type == 'Task'
    assert len(task_state.retries) == 2
    assert len(task_state.catches) == 1
    assert task_state.to_dict() == {
        'Type': 'Task',
        'Resource': 'arn:aws:lambda:us-east-1:1234567890:function:StartLambda',
        'Retry': [
            {
                'ErrorEquals': ['ErrorA', 'ErrorB'],
                'IntervalSeconds': 1,
                'BackoffRate': 2,
                'MaxAttempts': 2
            },
            {
                'ErrorEquals': ['ErrorC'],
                'IntervalSeconds': 5
            }
        ],
        'Catch': [
            {
                'ErrorEquals': ['States.ALL'],
                'Next': 'End State'
            }
        ],
        'End': True
    }
def test_retry_fail_for_unsupported_state():
    c1 = Choice('My Choice')

    with pytest.raises(ValueError):
        c1.add_catch(
            Catch(error_equals=["States.NoChoiceMatched"],
                  next_step=Fail("ChoiceFailed")))
Ejemplo n.º 3
0
    def _integrate_notification_in_workflow(self,
                                            chain_of_tasks: Chain) -> Chain:
        """If a notification is defined we configure an SNS with email
        subscription to alert the user if the stepfunctions workflow failed or
        succeeded.

        :param chain_of_tasks: the workflow definition that contains all the steps we want to execute.
        :return: if notification is set, we adapt the workflow to include an SnsPublishStep on failure or on success.
        If notification is not set, we return the workflow as we received it.
        """
        if self.notification:
            logger.debug(
                "A notification is configured, "
                "implementing a notification on Error or when the stepfunctions workflow succeeds."
            )
            failure_notification = SnsPublishStep(
                "FailureNotification",
                parameters={
                    "TopicArn":
                    self.notification.get_topic_arn(),
                    "Message":
                    f"Stepfunctions workflow {self.unique_name} Failed.",
                },
            )
            pass_notification = SnsPublishStep(
                "SuccessNotification",
                parameters={
                    "TopicArn":
                    self.notification.get_topic_arn(),
                    "Message":
                    f"Stepfunctions workflow {self.unique_name} Succeeded.",
                },
            )

            catch_error = Catch(error_equals=["States.ALL"],
                                next_step=failure_notification)
            workflow_with_notification = Parallel(state_id="notification")
            workflow_with_notification.add_branch(chain_of_tasks)
            workflow_with_notification.add_catch(catch_error)
            workflow_with_notification.next(pass_notification)
            return Chain([workflow_with_notification])
        logger.debug(
            "No notification is configured, returning the workflow definition."
        )
        return chain_of_tasks
def test_catch_creation():
    catch = Catch(error_equals=['States.ALL'], next_step=Fail('End'))
    assert catch.to_dict() == {
        'ErrorEquals': ['States.ALL'],
        'Next': 'End'
    }
def test_task_state_constructor_with_retry_adds_retrier_to_retriers(
        retry, expected_retry):
    step = Task('Task', retry=retry)
    assert step.to_dict()['Retry'] == expected_retry


@pytest.mark.parametrize("retry, expected_retry",
                         [(RETRY, EXPECTED_RETRY),
                          (RETRIES, EXPECTED_RETRIES)])
def test_task_state_add_retry_adds_retrier_to_retriers(retry, expected_retry):
    step = Task('Task')
    step.add_retry(retry)
    assert step.to_dict()['Retry'] == expected_retry


CATCH = Catch(error_equals=['States.ALL'], next_step=Pass('End State'))
CATCHES = [
    CATCH,
    Catch(error_equals=['States.TaskFailed'], next_step=Pass('Next State'))
]
EXPECTED_CATCH = [{'ErrorEquals': ['States.ALL'], 'Next': 'End State'}]
EXPECTED_CATCHES = EXPECTED_CATCH + [{
    'ErrorEquals': ['States.TaskFailed'],
    'Next': 'Next State'
}]


@pytest.mark.parametrize("catch, expected_catch",
                         [(CATCH, EXPECTED_CATCH),
                          (CATCHES, EXPECTED_CATCHES)])
def test_parallel_state_constructor_with_catch_adds_catcher_to_catchers(
    comment="Import database to s3",
    parameters={
        "JobName": "${jobName}",
        "Arguments": {
            "--s3_external_bucket": "${s3ExternalBucket}",
            "--s3_internal_bucket": "${s3InternalBucket}",
            "--enable-glue-datacatalog": "true",
            "--enable-continuous-cloudwatch-log": "true",
            "--enable-s3-parquet-optimized-committer": "true",
            "--job-bookmark-option": "job-bookmark-disable"
        }
    }
)
glue_import_to_s3_job.add_retry(sf.steps.Retry(error_equals=["Glue.AWSGlueException"]))
glue_import_to_s3_job.add_catch(Catch(
    error_equals=["States.TaskFailed"],
    next_step=fail_state
))

crawl_params = sf.steps.Pass(
    state_id='Crawl s3 Params',
    result={
        'crawlerName': "${crawlerName}"
    }
)

crawl_database_start = sf.steps.Task(
    state_id='start crawler',
    resource='${startCrawlerLambda}'
)
crawl_database_start.add_retry(
    sf.steps.Retry(error_equals=[
                'IamRole': execution_input['IamRole'],
                'TargetColumnName': execution_input['TargetColumnName'],
                'S3OutputData': execution_input['S3OutputData'],
                'Tags': execution_input['Tags']
            }
        }
    })

create_autopilot_job_step.add_retry(
    Retry(error_equals=["States.TaskFailed"],
          interval_seconds=15,
          max_attempts=2,
          backoff_rate=4.0))

create_autopilot_job_step.add_catch(
    Catch(error_equals=["States.TaskFailed"], next_step=workflow_failure))

check_autopilot_job_status = LambdaStep(
    'CheckAutopilotJobStatus',
    parameters={
        'FunctionName': 'CheckAutopilotJobStatus',
        'Payload': {
            'AutopilotJobName':
            create_autopilot_job_step.output()['Payload']['AutopilotJobName']
        }
    })

check_job_wait_state = Wait(state_id="Wait", seconds=360)

check_job_choice = Choice(state_id="IsAutopilotJobComplete")