Esempio n. 1
0
    def __init__(self, scope: Construct, id: str, functions: LambdaLib, **kwargs) -> None:
        super().__init__(scope, id)

        # Step Function
        submit_job = tasks.LambdaInvoke(self, "Submit Job",
            lambda_function=functions.send_email_approval,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
            result_path=sfn.JsonPath.DISCARD
        )

        wait_x = sfn.Wait(self, "Wait",
            time= sfn.WaitTime.duration(Duration.minutes(2))
        )

        get_status = tasks.LambdaInvoke(self, "Get Job Status",
            lambda_function=functions.check_status_dynamo,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
            result_path="$.status"
        )

        restrict_es = tasks.LambdaInvoke(self, "Restric ES Policy",
            lambda_function=functions.restric_es_policy,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
        )

        restrict_rds = tasks.LambdaInvoke(self, "Restric RDS",
            lambda_function=functions.restric_rds_policy,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
        )

        restrict_es_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_ES_PUBLIC)
        restrict_rds_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_RDS_PUBLIC)

        definition = (submit_job.next(wait_x)
                                .next(get_status)
                                .next(sfn.Choice(self, "Job Complete?")
                                .when(sfn.Condition.string_equals("$.status.Payload.status", "Rejected!"), wait_x)
                                # .when(sfn.Condition.string_equals("$.status.Payload.status", "NON_COMPLIANT"), final_task)
                                # .when(sfn.Condition.string_equals("$.status.Payload.status", "Accepted!"), final_task))
                                .otherwise(sfn.Choice(self, "Remediation Choice")
                                .when(restrict_es_condition, restrict_es)
                                .when(restrict_rds_condition, restrict_rds)))
                                )


        self.state_machine = sfn.StateMachine(self, "StateMachine",
            definition=definition,
            timeout=Duration.hours(2)
        )
Esempio n. 2
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        flip_coin_function = lambda_.Function(
            self,
            "FlipCoinFunction",
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="index.handler",
            code=lambda_.Code.from_asset("./sfn/lambda/flip_coin"))

        flip_coin_invoke = tasks.LambdaInvoke(
            self, "FlipCoin", lambda_function=flip_coin_function)

        wait = stepfunctions.Wait(self,
                                  "Wait",
                                  time=stepfunctions.WaitTime.duration(
                                      core.Duration.seconds(5)))

        tails_result = stepfunctions.Pass(self, "TailsResult")
        tails_result.next(flip_coin_invoke)

        choice = stepfunctions.Choice(self,
                                      "HeadsTailsChoice") \
            .when(condition=stepfunctions.Condition.string_equals("$.Payload.result", "heads"),
                  next=stepfunctions.Succeed(self, "HeadsResult")) \
            .when(condition=stepfunctions.Condition.string_equals("$.Payload.result", "tails"),
                  next=tails_result)

        stepfunctions.StateMachine(self,
                                   "StateMachine",
                                   definition=flip_coin_invoke.next(
                                       wait.next(choice)))
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 polling_delay: int = 5,
                 statemachine_timeout: int = 300,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        state_fn = StateHandlerLambda(self, "config-state-handler").function
        config_fn = AccountConfigLambda(self,
                                        "account-config-handler").function

        config_state = tasks.LambdaInvoke(self,
                                          "Set Configuring State",
                                          lambda_function=state_fn,
                                          output_path="$.Payload")

        completed_state = tasks.LambdaInvoke(self,
                                             "Set Completed State",
                                             lambda_function=state_fn,
                                             output_path="$.Payload")

        config_task = tasks.LambdaInvoke(self,
                                         "Request Account Configuration",
                                         lambda_function=config_fn,
                                         output_path="$.Payload")

        polling_task = tasks.LambdaInvoke(self,
                                          "Poll Account Configuration",
                                          lambda_function=config_fn,
                                          output_path="$.Payload")

        delay = sfn.Wait(self,
                         "Delay Polling",
                         time=sfn.WaitTime.duration(
                             core.Duration.seconds(polling_delay)))

        is_ready = sfn.Choice(self, "Account Ready?")
        acct_ready = sfn.Condition.string_equals('$.state', "READY")
        acct_pending = sfn.Condition.string_equals('$.state', "PENDING")
        success = sfn.Succeed(self, "Config Succeeded")

        failed = sfn.Fail(self,
                          "Config Failed",
                          cause="Bad value in Polling loop")
        # this is the loop which polls for state change, either looping back to delay or setting completion state and finishing
        is_ready.when(acct_pending, delay).when(
            acct_ready, completed_state.next(success)).otherwise(failed)
        # this is the main chain starting with creation request a delay and then polling loop
        config_chain = config_task.next(config_state).next(delay).next(
            polling_task).next(is_ready)

        self.state_machine = sfn.StateMachine(
            self,
            "Account-Config-StateMachine",
            definition=config_chain,
            timeout=core.Duration.seconds(statemachine_timeout))
    def __init__(self, scope: core.App, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pass_through_lambda = _lambda.Function(
            self,
            'PassThroughLambda',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='pass_through_lambda.handler')

        loop_count_lambda = _lambda.Function(
            self,
            'LoopCountLambda',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='loop_count_lambda.handler')

        start_state_machine = sfn.Task(
            self,
            "Start CodeBuild Lambda",
            task=sfn_tasks.InvokeFunction(pass_through_lambda))

        wait_x = sfn.Wait(
            self,
            "Wait X Seconds",
            time=sfn.WaitTime.seconds_path('$.wait_time'),
        )

        get_state_machine_status = sfn.Task(
            self,
            "Get Build Status",
            task=sfn_tasks.InvokeFunction(loop_count_lambda))

        is_complete = sfn.Choice(self, "Job Complete?")

        state_machine_failed = sfn.Fail(self,
                                        "Build Failed",
                                        cause="AWS Batch Job Failed",
                                        error="DescribeJob returned FAILED")

        state_machine_success = sfn.Pass(self, "Build Successs")

        definition = start_state_machine\
            .next(wait_x)\
            .next(get_state_machine_status)\
            .next(is_complete
                  .when(sfn.Condition.string_equals(
                      "$.status", "FAILED"), state_machine_failed)
                  .when(sfn.Condition.string_equals(
                      "$.status", "SUCCEEDED"), state_machine_success)
                  .otherwise(wait_x))

        sfn.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=core.Duration.seconds(60),
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Step Function Starts Here

        # The first thing we need to do is see if they are asking for pineapple on a pizza
        pineapple_check_lambda = _lambda.Function(self, "pineappleCheckLambdaHandler",
                                                  runtime=_lambda.Runtime.NODEJS_12_X,
                                                  handler="orderPizza.handler",
                                                  code=_lambda.Code.from_asset("lambdas"),
                                                  )

        # Step functions are built up of steps, we need to define our first step
        order_pizza = step_fn.Task(self, 'Order Pizza Job',
                                   task=step_fn_tasks.InvokeFunction(pineapple_check_lambda),
                                   input_path='$.flavour',
                                   result_path='$.pineappleAnalysis'
                                   )

        # Pizza Order failure step defined
        job_failed = step_fn.Fail(self, 'Sorry, We Dont add Pineapple',
                                  cause='Failed To Make Pizza',
                                  error='They asked for Pineapple')

        # If they didnt ask for pineapple let's cook the pizza
        cook_pizza = step_fn.Pass(self, 'Lets make your pizza')

        # If they ask for a pizza with pineapple, fail. Otherwise cook the pizza
        definition = step_fn.Chain \
            .start(order_pizza) \
            .next(step_fn.Choice(self, 'With Pineapple?') \
                  .when(step_fn.Condition.boolean_equals('$.pineappleAnalysis.containsPineapple', True), job_failed) \
                  .otherwise(cook_pizza))

        state_machine = step_fn.StateMachine(self, 'StateMachine', definition=definition, timeout=core.Duration.minutes(5))

        # Dead Letter Queue Setup
        dlq = sqs.Queue(self, 'stateMachineLambdaDLQ', visibility_timeout=core.Duration.seconds(300))

        # defines an AWS Lambda resource to connect to our API Gateway
        state_machine_lambda = _lambda.Function(self, "stateMachineLambdaHandler",
                                                runtime=_lambda.Runtime.NODEJS_12_X,
                                                handler="stateMachineLambda.handler",
                                                code=_lambda.Code.from_asset("lambdas"),
                                                environment={
                                                    'statemachine_arn': state_machine.state_machine_arn
                                                }
                                                )

        state_machine.grant_start_execution(state_machine_lambda)

        # defines an API Gateway REST API resource backed by our "sqs_publish_lambda" function.
        api_gw.LambdaRestApi(self, 'Endpoint',
                             handler=state_machine_lambda
                             )
Esempio n. 6
0
    def __init__(self, app: cdk.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        submit_job_activity = sfn.Activity(
            self, "SubmitJob"
        )
        check_job_activity = sfn.Activity(
            self, "CheckJob"
        )

        submit_job = sfn.Task(
            self, "Submit Job",
            task=sfn_tasks.InvokeActivity(submit_job_activity),
            result_path="$.guid",
        )
        wait_x = sfn.Wait(
            self, "Wait X Seconds",
            duration=sfn.WaitDuration.seconds_path('$.wait_time'),
        )
        get_status = sfn.Task(
            self, "Get Job Status",
            task=sfn_tasks.InvokeActivity(check_job_activity),
            input_path="$.guid",
            result_path="$.status",
        )
        is_complete = sfn.Choice(
            self, "Job Complete?"
        )
        job_failed = sfn.Fail(
            self, "Job Failed",
            cause="AWS Batch Job Failed",
            error="DescribeJob returned FAILED"
        )
        final_status = sfn.Task(
            self, "Get Final Job Status",
            task=sfn_tasks.InvokeActivity(check_job_activity),
            input_path="$.guid",
        )

        definition = submit_job\
            .next(wait_x)\
            .next(get_status)\
            .next(is_complete
                  .when(sfn.Condition.string_equals(
                      "$.status", "FAILED"), job_failed)
                  .when(sfn.Condition.string_equals(
                      "$.status", "SUCCEEDED"), final_status)
                  .otherwise(wait_x))

        sfn.StateMachine(
            self, "StateMachine",
            definition=definition,
            timeout_sec=30,
        )
Esempio n. 7
0
    def __init__(self, scope: core.Construct, id: builtins.str,
                 action_name: str, resources: FsiSharedResources,
                 function: lambda_.Function) -> None:
        super().__init__(scope, id)
        self.__resources = resources

        state_machine_name = id

        # Define the state machine definition...
        invoke_function = sft.LambdaInvoke(
            self,
            'InvokeFunction',
            lambda_function=function,
            invocation_type=sft.LambdaInvocationType.REQUEST_RESPONSE,
            input_path='$.Payload',
            result_path='$.Result')

        choice = sf.Choice(self,
                           'IsComplete',
                           comment='Check if theres more to process')
        choice.when(
            sf.Condition.string_equals('$.Result.Payload.Result.RunState',
                                       'RunStatus.MORE_AVAILABLE'),
            invoke_function)
        choice.when(
            sf.Condition.string_equals('$.Result.Payload.Result.RunState',
                                       'RunStatus.COMPLETE'),
            sf.Pass(self, 'Finalize', comment='Workflow Complete'))
        choice.otherwise(
            sf.Fail(self,
                    'NotImplemented',
                    cause='Unknown Choice',
                    error='NotImplementedException'))

        definition = invoke_function.next(choice)

        # Register the definition as StateMachine...
        zone_name = self.resources.landing_zone.zone_name
        self.state_machine = sf.StateMachine(
            self,
            'StateMachine',
            state_machine_name=state_machine_name,
            state_machine_type=sf.StateMachineType.STANDARD,
            timeout=core.Duration.hours(2),
            logs=sf.LogOptions(destination=logs.LogGroup(
                self,
                'LogGroup',
                removal_policy=core.RemovalPolicy.DESTROY,
                retention=RetentionDays.TWO_WEEKS,
                log_group_name='/homenet/fsi-{}/states/{}/{}'.format(
                    zone_name, self.component_name, action_name).lower())),
            tracing_enabled=True,
            definition=definition)
Esempio n. 8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        lambda_role = _iam.Role(
            self,
            id='lab3-om-role',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'))

        cloudwatch_policy_statement = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW)
        cloudwatch_policy_statement.add_actions("logs:CreateLogGroup")
        cloudwatch_policy_statement.add_actions("logs:CreateLogStream")
        cloudwatch_policy_statement.add_actions("logs:PutLogEvents")
        cloudwatch_policy_statement.add_actions("logs:DescribeLogStreams")
        cloudwatch_policy_statement.add_resources("*")
        lambda_role.add_to_policy(cloudwatch_policy_statement)

        fn_lambda_approve_reject = aws_lambda.Function(
            self,
            "lab3-om-approve-reject",
            code=aws_lambda.AssetCode(
                "../lambda-functions/approve-reject-application/"),
            handler="app.handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)

        fn_lambda_verify_identity = aws_lambda.Function(
            self,
            "lab3-om-verify-identity",
            code=aws_lambda.AssetCode("../lambda-functions/verify-identity/"),
            handler="app.handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)

        fn_lambda_check_address = aws_lambda.Function(
            self,
            "lab3-om-check-address",
            code=aws_lambda.AssetCode("../lambda-functions/check-address/"),
            handler="app.handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        '''
        [INFO] This is a sample how to define the task and integrate with Lambda Functions. You need to create another 2 tasks for respective Lambda functions
        '''
        task_verify_identity = _tasks.LambdaInvoke(
            self,
            "Verify Identity Document",
            lambda_function=fn_lambda_verify_identity,
            output_path="$.Payload")

        task_check_address = _tasks.LambdaInvoke(
            self,
            "Check Address",
            lambda_function=fn_lambda_check_address,
            output_path="$.Payload")

        task_wait_review = _tasks.LambdaInvoke(
            self,
            "Wait for Review",
            lambda_function=fn_lambda_approve_reject,
            output_path="$.Payload")

        state_approve = _sfn.Succeed(self, "Approve Application")
        state_reject = _sfn.Succeed(self, "Reject Application")

        # Let's define the State Machine, step by step
        # First, paralell tasks for verification

        s_verification = _sfn.Parallel(self, "Verification")
        s_verification.branch(task_verify_identity)
        s_verification.branch(task_check_address)

        # Next, we add a choice state
        c_human_review = _sfn.Choice(self, "Human review required?")
        c_human_review.when(
            _sfn.Condition.and_(
                _sfn.Condition.boolean_equals("$[0].humanReviewRequired",
                                              False),
                _sfn.Condition.boolean_equals("$[1].humanReviewRequired",
                                              True)), state_approve)
        c_human_review.when(
            _sfn.Condition.or_(
                _sfn.Condition.boolean_equals("$[0].humanReviewRequired",
                                              True),
                _sfn.Condition.boolean_equals("$[1].humanReviewRequired",
                                              False)), task_wait_review)

        # Another choice state to check if the application passed the review
        c_review_approved = _sfn.Choice(self, "Review approved?")
        c_review_approved.when(
            _sfn.Condition.boolean_equals("$.reviewApproved", True),
            state_approve)
        c_review_approved.when(
            _sfn.Condition.boolean_equals("$.reviewApproved", False),
            state_reject)

        task_wait_review.next(c_review_approved)

        definition = s_verification.next(c_human_review)

        _sfn.StateMachine(self,
                          "lab3-statemachine",
                          definition=definition,
                          timeout=core.Duration.minutes(5))
Esempio n. 9
0
    def __init__(
        self,
        scope: Construct,
        stack_id: str,
        *,
        botocore_lambda_layer: aws_lambda_python.PythonLayerVersion,
        env_name: str,
        storage_bucket: aws_s3.Bucket,
        validation_results_table: Table,
    ) -> None:
        # pylint: disable=too-many-locals, too-many-statements

        super().__init__(scope, stack_id)

        ############################################################################################
        # PROCESSING ASSETS TABLE
        processing_assets_table = Table(
            self,
            f"{env_name}-processing-assets",
            env_name=env_name,
            parameter_name=ParameterName.PROCESSING_ASSETS_TABLE_NAME,
            sort_key=aws_dynamodb.Attribute(name="sk", type=aws_dynamodb.AttributeType.STRING),
        )

        ############################################################################################
        # BATCH JOB DEPENDENCIES
        batch_job_queue = BatchJobQueue(
            self,
            "batch-job-queue",
            env_name=env_name,
            processing_assets_table=processing_assets_table,
        ).job_queue

        s3_read_only_access_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name(
            "AmazonS3ReadOnlyAccess"
        )

        ############################################################################################
        # UPDATE CATALOG UPDATE MESSAGE QUEUE

        dead_letter_queue = aws_sqs.Queue(
            self,
            "dead-letter-queue",
            visibility_timeout=LAMBDA_TIMEOUT,
        )

        self.message_queue = aws_sqs.Queue(
            self,
            "update-catalog-message-queue",
            visibility_timeout=LAMBDA_TIMEOUT,
            dead_letter_queue=aws_sqs.DeadLetterQueue(max_receive_count=3, queue=dead_letter_queue),
        )
        self.message_queue_name_parameter = aws_ssm.StringParameter(
            self,
            "update-catalog-message-queue-name",
            string_value=self.message_queue.queue_name,
            description=f"Update Catalog Message Queue Name for {env_name}",
            parameter_name=ParameterName.UPDATE_CATALOG_MESSAGE_QUEUE_NAME.value,
        )

        populate_catalog_lambda = BundledLambdaFunction(
            self,
            "populate-catalog-bundled-lambda-function",
            directory="populate_catalog",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
            botocore_lambda_layer=botocore_lambda_layer,
        )

        self.message_queue.grant_consume_messages(populate_catalog_lambda)
        populate_catalog_lambda.add_event_source(
            SqsEventSource(self.message_queue, batch_size=1)  # type: ignore[arg-type]
        )

        ############################################################################################
        # STATE MACHINE TASKS

        check_stac_metadata_task = LambdaTask(
            self,
            "check-stac-metadata-task",
            directory="check_stac_metadata",
            botocore_lambda_layer=botocore_lambda_layer,
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        assert check_stac_metadata_task.lambda_function.role
        check_stac_metadata_task.lambda_function.role.add_managed_policy(
            policy=s3_read_only_access_policy
        )

        for table in [processing_assets_table, validation_results_table]:
            table.grant_read_write_data(check_stac_metadata_task.lambda_function)
            table.grant(
                check_stac_metadata_task.lambda_function,
                "dynamodb:DescribeTable",
            )

        content_iterator_task = LambdaTask(
            self,
            "content-iterator-task",
            directory="content_iterator",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{CONTENT_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )

        check_files_checksums_directory = "check_files_checksums"
        check_files_checksums_default_payload_object = {
            f"{DATASET_ID_KEY}.$": f"$.{DATASET_ID_KEY}",
            f"{VERSION_ID_KEY}.$": f"$.{VERSION_ID_KEY}",
            f"{METADATA_URL_KEY}.$": f"$.{METADATA_URL_KEY}",
            f"{FIRST_ITEM_KEY}.$": f"$.{CONTENT_KEY}.{FIRST_ITEM_KEY}",
            f"{ASSETS_TABLE_NAME_KEY}.$": f"$.{CONTENT_KEY}.{ASSETS_TABLE_NAME_KEY}",
            f"{RESULTS_TABLE_NAME_KEY}.$": f"$.{CONTENT_KEY}.{RESULTS_TABLE_NAME_KEY}",
        }
        check_files_checksums_single_task = BatchSubmitJobTask(
            self,
            "check-files-checksums-single-task",
            env_name=env_name,
            directory=check_files_checksums_directory,
            s3_policy=s3_read_only_access_policy,
            job_queue=batch_job_queue,
            payload_object=check_files_checksums_default_payload_object,
            container_overrides_command=[
                "--dataset-id",
                f"Ref::{DATASET_ID_KEY}",
                "--version-id",
                f"Ref::{VERSION_ID_KEY}",
                "--first-item",
                f"Ref::{FIRST_ITEM_KEY}",
                "--assets-table-name",
                f"Ref::{ASSETS_TABLE_NAME_KEY}",
                "--results-table-name",
                f"Ref::{RESULTS_TABLE_NAME_KEY}",
            ],
        )
        array_size = int(
            aws_stepfunctions.JsonPath.number_at(f"$.{CONTENT_KEY}.{ITERATION_SIZE_KEY}")
        )
        check_files_checksums_array_task = BatchSubmitJobTask(
            self,
            "check-files-checksums-array-task",
            env_name=env_name,
            directory=check_files_checksums_directory,
            s3_policy=s3_read_only_access_policy,
            job_queue=batch_job_queue,
            payload_object=check_files_checksums_default_payload_object,
            container_overrides_command=[
                "--dataset-id",
                f"Ref::{DATASET_ID_KEY}",
                "--version-id",
                f"Ref::{VERSION_ID_KEY}",
                "--first-item",
                f"Ref::{FIRST_ITEM_KEY}",
                "--assets-table-name",
                f"Ref::{ASSETS_TABLE_NAME_KEY}",
                "--results-table-name",
                f"Ref::{RESULTS_TABLE_NAME_KEY}",
            ],
            array_size=array_size,
        )

        for reader in [
            content_iterator_task.lambda_function,
            check_files_checksums_single_task.job_role,
            check_files_checksums_array_task.job_role,
        ]:
            processing_assets_table.grant_read_data(reader)  # type: ignore[arg-type]
            processing_assets_table.grant(
                reader, "dynamodb:DescribeTable"  # type: ignore[arg-type]
            )

        for writer in [
            check_files_checksums_single_task.job_role,
            check_files_checksums_array_task.job_role,
        ]:
            validation_results_table.grant_read_write_data(writer)  # type: ignore[arg-type]
            validation_results_table.grant(
                writer, "dynamodb:DescribeTable"  # type: ignore[arg-type]
            )

        validation_summary_task = LambdaTask(
            self,
            "validation-summary-task",
            directory="validation_summary",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{VALIDATION_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        validation_results_table.grant_read_data(validation_summary_task.lambda_function)
        validation_results_table.grant(
            validation_summary_task.lambda_function, "dynamodb:DescribeTable"
        )

        import_dataset_role = aws_iam.Role(
            self,
            "import-dataset",
            assumed_by=aws_iam.ServicePrincipal(  # type: ignore[arg-type]
                "batchoperations.s3.amazonaws.com"
            ),
        )

        import_asset_file_function = ImportFileFunction(
            self,
            directory="import_asset_file",
            invoker=import_dataset_role,
            env_name=env_name,
            botocore_lambda_layer=botocore_lambda_layer,
        )
        import_metadata_file_function = ImportFileFunction(
            self,
            directory="import_metadata_file",
            invoker=import_dataset_role,
            env_name=env_name,
            botocore_lambda_layer=botocore_lambda_layer,
        )

        import_dataset_task = LambdaTask(
            self,
            "import-dataset-task",
            directory="import_dataset",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{IMPORT_DATASET_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )

        import_dataset_task.lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(
                resources=[import_dataset_role.role_arn],
                actions=["iam:PassRole"],
            ),
        )
        import_dataset_task.lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(resources=["*"], actions=["s3:CreateJob"])
        )

        for table in [processing_assets_table]:
            table.grant_read_data(import_dataset_task.lambda_function)
            table.grant(import_dataset_task.lambda_function, "dynamodb:DescribeTable")

        # Import status check
        wait_before_upload_status_check = Wait(
            self,
            "wait-before-upload-status-check",
            time=WaitTime.duration(Duration.seconds(10)),
        )
        upload_status_task = LambdaTask(
            self,
            "upload-status",
            directory="upload_status",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path="$.upload_status",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        validation_results_table.grant_read_data(upload_status_task.lambda_function)
        validation_results_table.grant(upload_status_task.lambda_function, "dynamodb:DescribeTable")

        upload_status_task.lambda_function.add_to_role_policy(ALLOW_DESCRIBE_ANY_S3_JOB)

        # Parameters
        import_asset_file_function_arn_parameter = aws_ssm.StringParameter(
            self,
            "import asset file function arn",
            string_value=import_asset_file_function.function_arn,
            description=f"Import asset file function ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_ASSET_FILE_FUNCTION_TASK_ARN.value,
        )
        import_metadata_file_function_arn_parameter = aws_ssm.StringParameter(
            self,
            "import metadata file function arn",
            string_value=import_metadata_file_function.function_arn,
            description=f"Import metadata file function ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_METADATA_FILE_FUNCTION_TASK_ARN.value,
        )

        import_dataset_role_arn_parameter = aws_ssm.StringParameter(
            self,
            "import dataset role arn",
            string_value=import_dataset_role.role_arn,
            description=f"Import dataset role ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_DATASET_ROLE_ARN.value,
        )

        update_dataset_catalog = LambdaTask(
            self,
            "update-dataset-catalog",
            directory="update_dataset_catalog",
            botocore_lambda_layer=botocore_lambda_layer,
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        self.message_queue.grant_send_messages(update_dataset_catalog.lambda_function)

        for storage_writer in [
            import_dataset_role,
            import_dataset_task.lambda_function,
            import_asset_file_function,
            import_metadata_file_function,
            populate_catalog_lambda,
            update_dataset_catalog.lambda_function,
        ]:
            storage_bucket.grant_read_write(storage_writer)  # type: ignore[arg-type]

        grant_parameter_read_access(
            {
                import_asset_file_function_arn_parameter: [import_dataset_task.lambda_function],
                import_dataset_role_arn_parameter: [import_dataset_task.lambda_function],
                import_metadata_file_function_arn_parameter: [import_dataset_task.lambda_function],
                processing_assets_table.name_parameter: [
                    check_stac_metadata_task.lambda_function,
                    content_iterator_task.lambda_function,
                    import_dataset_task.lambda_function,
                ],
                validation_results_table.name_parameter: [
                    check_stac_metadata_task.lambda_function,
                    content_iterator_task.lambda_function,
                    validation_summary_task.lambda_function,
                    upload_status_task.lambda_function,
                ],
                self.message_queue_name_parameter: [update_dataset_catalog.lambda_function],
            }
        )

        success_task = aws_stepfunctions.Succeed(self, "success")
        upload_failure = aws_stepfunctions.Fail(self, "upload failure")
        validation_failure = aws_stepfunctions.Succeed(self, "validation failure")

        ############################################################################################
        # STATE MACHINE
        dataset_version_creation_definition = (
            check_stac_metadata_task.next(content_iterator_task)
            .next(
                aws_stepfunctions.Choice(  # type: ignore[arg-type]
                    self, "check_files_checksums_maybe_array"
                )
                .when(
                    aws_stepfunctions.Condition.number_equals(
                        f"$.{CONTENT_KEY}.{ITERATION_SIZE_KEY}", 1
                    ),
                    check_files_checksums_single_task.batch_submit_job,
                )
                .otherwise(check_files_checksums_array_task.batch_submit_job)
                .afterwards()
            )
            .next(
                aws_stepfunctions.Choice(self, "content_iteration_finished")
                .when(
                    aws_stepfunctions.Condition.number_equals(
                        f"$.{CONTENT_KEY}.{NEXT_ITEM_KEY}", -1
                    ),
                    validation_summary_task.next(
                        aws_stepfunctions.Choice(  # type: ignore[arg-type]
                            self, "validation_successful"
                        )
                        .when(
                            aws_stepfunctions.Condition.boolean_equals(
                                f"$.{VALIDATION_KEY}.{SUCCESS_KEY}", True
                            ),
                            import_dataset_task.next(
                                wait_before_upload_status_check  # type: ignore[arg-type]
                            )
                            .next(upload_status_task)
                            .next(
                                aws_stepfunctions.Choice(
                                    self, "import_completed"  # type: ignore[arg-type]
                                )
                                .when(
                                    aws_stepfunctions.Condition.and_(
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status", "Complete"
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Complete",
                                        ),
                                    ),
                                    update_dataset_catalog.next(
                                        success_task  # type: ignore[arg-type]
                                    ),
                                )
                                .when(
                                    aws_stepfunctions.Condition.or_(
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status",
                                            "Cancelled",
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status", "Failed"
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Cancelled",
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Failed",
                                        ),
                                    ),
                                    upload_failure,  # type: ignore[arg-type]
                                )
                                .otherwise(
                                    wait_before_upload_status_check  # type: ignore[arg-type]
                                )
                            ),
                        )
                        .otherwise(validation_failure)  # type: ignore[arg-type]
                    ),
                )
                .otherwise(content_iterator_task)
            )
        )

        self.state_machine = aws_stepfunctions.StateMachine(
            self,
            f"{env_name}-dataset-version-creation",
            definition=dataset_version_creation_definition,  # type: ignore[arg-type]
        )

        self.state_machine_parameter = aws_ssm.StringParameter(
            self,
            "state machine arn",
            description=f"State machine ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_DATASET_VERSION_CREATION_STEP_FUNCTION_ARN.value,  # pylint:disable=line-too-long
            string_value=self.state_machine.state_machine_arn,
        )

        Tags.of(self).add("ApplicationLayer", "processing")  # type: ignore[arg-type]
Esempio n. 10
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        ####################################################################################
        # IoT Events

        # IoT Events: Execution role
        iot_events_execution_role = iam.Role(
            self,
            "IoTEventsExecutionRole",
            assumed_by=iam.ServicePrincipal("iotevents.amazonaws.com"))
        iot_events_execution_role.add_to_policy(
            iam.PolicyStatement(resources=["*"], actions=["iot:Publish"]))
        iot_events_execution_role.add_to_policy(
            iam.PolicyStatement(resources=["*"], actions=["SNS:Publish"]))

        # IoT Events: Input
        inputDefinitionProperty = iotevents.CfnInput.InputDefinitionProperty(
            attributes=[{
                "jsonPath": "gatewayid"
            }, {
                "jsonPath": "last_uplink_received_timestamp_ms"
            }, {
                "jsonPath": "last_connection_status"
            }, {
                "jsonPath": "timestamp_iso8601"
            }])

        iot_events_input = iotevents.CfnInput(
            self,
            "LoRaWANGatewayConnectivityStatusInput",
            input_definition=inputDefinitionProperty,
            input_name="LoRaWANGatewayConnectivityStatusInput",
            input_description=
            "Input for connectivity status updates for LoRaWAN gateways")
        # IoT Events: Detector Model
        detector_model_definition = iotevents.CfnDetectorModel.DetectorModelDefinitionProperty(
            initial_state_name=lorawan_gateway_monitoring_detectormodel.
            initial_state_name,
            states=lorawan_gateway_monitoring_detectormodel.get_states(self))

        iot_events_model = iotevents.CfnDetectorModel(
            self,
            "LoRaWANGatewayConnectivityModel",
            detector_model_definition=detector_model_definition,
            detector_model_name="LoRaWANGatewayConnectivityModel",
            detector_model_description=
            "Detector model for LoRaWAN gateway connectivity status",
            key="gatewayid",
            evaluation_method="BATCH",
            role_arn=iot_events_execution_role.role_arn)

        ####################################################################################
        # Lambda function GetWirelessGatewayStatisticsLambda

        # Lambda function GetWirelessGatewayStatisticsLambda: Execution Role
        get_wireless_gateway_statistics_lambda_role = iam.Role(
            self,
            "GetWirelessGatewayStatisticsLambdaExecutionRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))
        get_wireless_gateway_statistics_lambda_role.add_to_policy(
            iam.PolicyStatement(resources=[
                "arn:aws:iotwireless:" + self.region + ":" + self.account +
                ":WirelessGateway/*"
            ],
                                actions=[
                                    "iotwireless:ListWirelessGateways",
                                    "iotwireless:GetWirelessGatewayStatistics"
                                ]))
        get_wireless_gateway_statistics_lambda_role.add_to_policy(
            iam.PolicyStatement(resources=[
                "arn:aws:iotevents:" + self.region + ":" + self.account +
                ":input/LoRaWANGatewayConnectivityStatusInput"
            ],
                                actions=["iotevents:BatchPutMessage"]))

        # Lambda function GetWirelessGatewayStatisticsLambda: Lambda function configuration
        get_wireless_gateway_statistics_lambda = lambda_.Function(
            self,
            "GetWirelessGatewayStatisticsLambda",
            code=lambda_.Code.asset(
                "src_get_wireless_gateway_statistics_lambda"),
            runtime=lambda_.Runtime.PYTHON_3_7,
            handler="lambda.handler",
            role=get_wireless_gateway_statistics_lambda_role,
            timeout=cdk.Duration.seconds(25))

        get_wireless_gateway_statistics_lambda.add_environment(
            "TEST_MODE", "true")

        get_wireless_gateway_statistics_lambda.add_environment(
            "IOT_EVENTS_INPUT_NAME", "LoRaWANGatewayConnectivityStatusInput")

        ####################################################################################
        # SNS topic
        sns_topic = sns.Topic(
            self,
            "LoRaWANGatewayNotificationTopic",
            display_name=
            "Topic to use for notifications about LoRaWAN gateway events like connect or disconnect",
            topic_name="LoRaWANGatewayNotificationTopic")

        email_address = cdk.CfnParameter(self, "emailforalarms")
        sns_topic.add_subscription(
            subscriptions.EmailSubscription(email_address.value_as_string))

        ####################################################################################
        # Step Function

        # State 'Fail'
        failure_state = sfn.Fail(self, "Fail")

        # State 'Wait'
        wait_state = sfn.Wait(self,
                              "Sleep",
                              time=sfn.WaitTime.duration(
                                  cdk.Duration.minutes(4)))

        # State 'Ingest gateway connectivity status into IoT Events input'
        lambda_invoke_state = tasks.LambdaInvoke(
            self,
            "Ingest gateway connectivity status into IoT Events input",
            result_path="$.wireless_gateway_stats",
            lambda_function=get_wireless_gateway_statistics_lambda
            # payload=task_input_payload
        )

        # Stat 'Did IoT events ingestion run successfull?'
        choice_lambda_state = sfn.Choice(
            self, "Did IoT events ingestion run successfull?")
        choice_lambda_state.when(
            sfn.Condition.number_equals(
                "$.wireless_gateway_stats.Payload.status", 200), wait_state)
        choice_lambda_state.otherwise(failure_state)

        # Define transitions
        wait_state.next(lambda_invoke_state)
        lambda_invoke_state.next(choice_lambda_state)

        # Crreate a state machine
        gateway_watchdog_state_machine = sfn.StateMachine(
            self,
            "LoRaWANGatewayWatchdogStatemachine",
            definition=lambda_invoke_state,
            state_machine_name="LoRaWANGatewayWatchdogStatemachine")
        ####################################################################################
        # CloudFormation Stack outputs

        cdk.CfnOutput(
            self,
            "StateMachineARN",
            value=gateway_watchdog_state_machine.state_machine_arn,
            description=
            "Please run 'aws stepfunctions start-execution --state-machine-arn  <LorawanConnectivityWatchdogStack.StateMachineARN>' to start the monitoring of LoRaWAN gateway connectivity",
        )

        cdk.CfnOutput(
            self,
            "StateMachineStartCommand",
            value='aws stepfunctions start-execution --state-machine-arn ' +
            gateway_watchdog_state_machine.state_machine_arn,
            description=
            "Please run this command to start the monitoring of LoRaWAN gateway connectivity",
        )

        cdk.CfnOutput(
            self,
            "StateMachineStopommand",
            value='aws stepfunctions stop-execution --state-machine-arn ' +
            gateway_watchdog_state_machine.state_machine_arn,
            description=
            "Please run this command to stop the monitoring of LoRaWAN gateway connectivity",
        )
    def __init__(self, scope, id, *args, **kwargs):
        super().__init__(scope, id, *args, **kwargs)

        # Buckets
        source_bucket = s3.Bucket(self, "SourceBucket")
        dest_bucket = s3.Bucket(self, "DestinationBucket")
        processing_bucket = s3.Bucket(self, "ProcessingBucket")

        # Lambda Functions
        generate_workflow_input_lambda = aws_lambda.Function(
            self, "GenerateWorkflowInputFunction",
            code=aws_lambda.Code.from_asset(str(DIST_PATH)),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler="generate_workflow_input.lambda_handler",
            environment={
                "InputBucketName": source_bucket.bucket_name,
                "ProcessingBucketName": processing_bucket.bucket_name,
                "OutputBucketName": dest_bucket.bucket_name
            }
        )
        check_workflow_ready_lambda = aws_lambda.Function(
            self, "CheckWorkflowReadyFunction",
            code=aws_lambda.Code.from_asset(str(DIST_PATH)),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler="check_workflow_ready.lambda_handler"
        )
        string_replace_lambda = aws_lambda.Function(
            self, "StringReplaceFunction",
            code=aws_lambda.Code.from_asset(str(DIST_PATH)),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler="string_replace.lambda_handler"
        )
        calculate_total_earnings_lambda = aws_lambda.Function(
            self, "CalculateTotalEarningsFunction",
            code=aws_lambda.Code.from_asset(str(DIST_PATH)),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler="calculate_total_earnings.lambda_handler"
        )
        convert_csv_to_json_lambda = aws_lambda.Function(
            self, "ConvertCsvToJsonFunction",
            code=aws_lambda.Code.from_asset(str(DIST_PATH)),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler="convert_csv_to_json.lambda_handler"
        )

        # Permissions
        source_bucket.grant_read(check_workflow_ready_lambda)
        source_bucket.grant_read(string_replace_lambda)
        processing_bucket.grant_write(string_replace_lambda)
        processing_bucket.grant_read_write(calculate_total_earnings_lambda)
        processing_bucket.grant_read(convert_csv_to_json_lambda)
        dest_bucket.grant_write(convert_csv_to_json_lambda)

        # Outputs
        core.CfnOutput(self, "SourceBucketName", value=source_bucket.bucket_name)
        core.CfnOutput(self, "DestinationBucketName", value=dest_bucket.bucket_name)
        core.CfnOutput(self, "ProcessingBucketName", value=processing_bucket.bucket_name)
        core.CfnOutput(self, "GenerateWorkflowInputLambda", value=generate_workflow_input_lambda.function_name)
        core.CfnOutput(self, "CheckWorkflowReadyLambda", value=check_workflow_ready_lambda.function_name)
        core.CfnOutput(self, "StringReplaceLambda", value=string_replace_lambda.function_name)
        core.CfnOutput(self, "CalculateTotalEarningsLambda", value=calculate_total_earnings_lambda.function_name)
        core.CfnOutput(self, "ConvertCsvToJsonLambda", value=convert_csv_to_json_lambda.function_name)

        # State Machine
        generate_workflow_input_task = sf_tasks.LambdaInvoke(
            self, "GenerateWorkflowInput",
            lambda_function=generate_workflow_input_lambda,
            payload_response_only=True
        )
        check_workflow_ready_task = sf_tasks.LambdaInvoke(
            self, "CheckWorkflowReady",
            lambda_function=check_workflow_ready_lambda,
            input_path="$.CheckWorkflowReady.Input",
            result_path="$.CheckWorkflowReady.Output",
            payload_response_only=True
        )
        string_replace_task = sf_tasks.LambdaInvoke(
            self, "ReplaceString",
            lambda_function=string_replace_lambda,
            result_path="$.StringReplace.Output",
            payload_response_only=True
        )
        calculate_total_earnings_task = sf_tasks.LambdaInvoke(
            self, "CalculateTotalEarnings",
            lambda_function=calculate_total_earnings_lambda,
            input_path="$.CalculateTotalEarnings.Input",
            result_path="$.CalculateTotalEarnings.Output",
            payload_response_only=True
        )
        convert_csv_to_json_task = sf_tasks.LambdaInvoke(
            self, "ConvertCsvToJson",
            lambda_function=convert_csv_to_json_lambda,
            input_path="$.ConvertCsvToJson.Input",
            result_path="$.ConvertCsvToJson.Output",
            payload_response_only=True
        )

        end_task = sf.Succeed(self, "WorkflowEnd")

        replace_string_parallel = sf.Map(
            self, "ReplaceStringParallel",
            items_path="$.StringReplace.Input",
            result_path="$.StringReplace.Output"
        ).iterator(string_replace_task)

        workflow_steps = sf.Chain.\
            start(replace_string_parallel)\
            .next(calculate_total_earnings_task)\
            .next(convert_csv_to_json_task)\
            .next(end_task)

        run_workflow = sf.Choice(self, "RunWorkflowDecision")\
            .when(sf.Condition.boolean_equals("$.CheckWorkflowReady.Output", True), workflow_steps)\
            .otherwise(end_task)

        hello_workflow_state_machine = sf.StateMachine(
            self, "HelloWorkflowStateMachine",
            definition=sf.Chain\
                .start(generate_workflow_input_task)\
                .next(check_workflow_ready_task)\
                .next(run_workflow)
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        submit_lambda = aws_lambda_python.PythonFunction(
            self,
            "submit-status",
            entry="./lambdas/example",
            handler="submit_status",
        )

        get_status_lambda = aws_lambda_python.PythonFunction(
            self,
            "get-status",
            entry="./lambdas/example",
            handler="get_status")

        final_status_lambda = aws_lambda_python.PythonFunction(
            self,
            "final-status",
            entry="./lambdas/example",
            handler="final_status")

        submit_job = tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            payload_response_only=True,
        )

        wait_x = sfn.Wait(self,
                          "Wait X Seconds",
                          time=sfn.WaitTime.seconds_path("$.seconds"))

        get_status = tasks.LambdaInvoke(
            self,
            "Get Job Status",
            lambda_function=get_status_lambda,
            payload_response_only=True,
        )

        job_failed = sfn.Fail(
            self,
            "Job Failed",
            cause="AWS Batch Job Failed",
            error="DescribeJob returned FAILED",
        )

        final_status = tasks.LambdaInvoke(
            self,
            "Get Final Job Status",
            lambda_function=final_status_lambda,
            payload_response_only=True,
        )

        definition = (submit_job.next(wait_x).next(get_status).next(
            sfn.Choice(self, "Job Complete?").when(
                sfn.Condition.string_equals("$.status", "FAILED"),
                job_failed).when(
                    sfn.Condition.string_equals("$.status", "SUCCEEDED"),
                    final_status).otherwise(wait_x)))

        sfn.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=core.Duration.minutes(5),
        )
Esempio n. 13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Step Function Starts Here

        # The first thing we need to do is see if they are asking for pineapple on a pizza
        pineapple_check_lambda = _lambda.Function(
            self,
            "pineappleCheckLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="orderPizza.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
        )

        # Step functions are built up of steps, we need to define our first step
        order_pizza = step_fn_tasks.LambdaInvoke(
            self,
            'Order Pizza Job',
            lambda_function=pineapple_check_lambda,
            input_path='$.flavour',
            result_path='$.pineappleAnalysis',
            payload_response_only=True)

        # Pizza Order failure step defined
        pineapple_detected = step_fn.Fail(self,
                                          'Sorry, We Dont add Pineapple',
                                          cause='They asked for Pineapple',
                                          error='Failed To Make Pizza')

        # If they didnt ask for pineapple let's cook the pizza
        cook_pizza = step_fn.Succeed(self,
                                     'Lets make your pizza',
                                     output_path='$.pineappleAnalysis')

        # If they ask for a pizza with pineapple, fail. Otherwise cook the pizza
        definition = step_fn.Chain \
            .start(order_pizza) \
            .next(step_fn.Choice(self, 'With Pineapple?')
                  .when(step_fn.Condition.boolean_equals('$.pineappleAnalysis.containsPineapple', True),
                        pineapple_detected)
                  .otherwise(cook_pizza))

        state_machine = step_fn.StateMachine(
            self,
            'StateMachine',
            definition=definition,
            timeout=core.Duration.minutes(5),
            tracing_enabled=True,
            state_machine_type=step_fn.StateMachineType.EXPRESS)

        # HTTP API Definition

        # Give our gateway permissions to interact with SNS
        http_api_role = iam.Role(
            self,
            'HttpApiRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'),
            inline_policies={
                "AllowSFNExec":
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=["states:StartSyncExecution"],
                        effect=iam.Effect.ALLOW,
                        resources=[state_machine.state_machine_arn])
                ])
            })

        api = api_gw.HttpApi(self,
                             'the_state_machine_api',
                             create_default_stage=True)

        # create an AWS_PROXY integration between the HTTP API and our Step Function
        integ = api_gw.CfnIntegration(
            self,
            'Integ',
            api_id=api.http_api_id,
            integration_type='AWS_PROXY',
            connection_type='INTERNET',
            integration_subtype='StepFunctions-StartSyncExecution',
            credentials_arn=http_api_role.role_arn,
            request_parameters={
                "Input": "$request.body",
                "StateMachineArn": state_machine.state_machine_arn
            },
            payload_format_version="1.0",
            timeout_in_millis=10000)

        api_gw.CfnRoute(self,
                        'DefaultRoute',
                        api_id=api.http_api_id,
                        route_key=api_gw.HttpRouteKey.DEFAULT.key,
                        target="integrations/" + integ.ref)

        core.CfnOutput(self, 'HTTP API URL', value=api.url)
    def __init__(self, scope: core.Construct, id: str, QueueDefine="default",TaskDefine="default",LambdaDefine="default", SNSDefine="default",**kwargs):
        super().__init__(scope, id, **kwargs)

        self.Job_String_Split = _sfn.Task(
            self,"String_Split",
            input_path = "$.TaskInfo",
            result_path = "$.JobDetail.String_Split",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Split",
                job_definition = TaskDefine.getTaskDefine("String_Split"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Split.OUTPUT_KEY"),
                        "SPLIT_NUM":_sfn.Data.string_at("$.JobParameter.String_Split.SPLIT_NUM")
                    }
                )
            )
        )
        
        self.Job_Map = _sfn.Task(
            self,"Job_Map",
            input_path = "$.TaskInfo",
            result_path = "$.TaskInfo.JobDetail.Job_Map",
            output_path = "$",
            task = _sfn_tasks.RunLambdaTask(LambdaDefine.getLambdaFunction("Get_Job_List")),
        )
        
        self.Job_String_Reverse = _sfn.Task(
            self,"String_Reverse",
            input_path = "$",
            result_path = "$",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Reverse",
                job_definition = TaskDefine.getTaskDefine("String_Reverse"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "INDEX":_sfn.Data.string_at("$.INDEX"),
                        "INPUT_BUCKET":_sfn.Data.string_at("$.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.INPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.String_Reverse.OUTPUT_KEY")
                    }
                )
            )
        )
        
        self.Job_String_Repeat = _sfn.Task(
            self,"String_Repeat",
            input_path = "$",
            result_path = "$",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Repeat",
                job_definition = TaskDefine.getTaskDefine("String_Repeat"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "INDEX":_sfn.Data.string_at("$.INDEX"),
                        "INPUT_BUCKET":_sfn.Data.string_at("$.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.INPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.String_Repeat.OUTPUT_KEY")
                    }
                )
            )
        )
        
        self.Job_String_Process_Repeat = _sfn.Map(
            self, "String_Process_Repeat",
            max_concurrency=50,
            input_path = "$.TaskInfo.JobDetail.Job_Map",
            result_path = "DISCARD",
            items_path = "$.Payload",
            output_path = "$",
        ).iterator(self.Job_String_Repeat)
        
        self.Job_String_Repeat_Merge = _sfn.Task(
            self,"String_Repeat_Merge",
            input_path = "$.TaskInfo",
            result_path = "DISCARD",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Repeat_Merge",
                job_definition = TaskDefine.getTaskDefine("String_Merge"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "PERFIX":_sfn.Data.string_at("$.JobParameter.String_Repeat.Prefix"),
                        "FILE_NAME":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"),
                        "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Repeat.OUTPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Repeat.OUTPUT_KEY")
                    }
                )
            )
        )
        
        self.Job_String_Process_Repeat.next(self.Job_String_Repeat_Merge)
        
        self.Job_String_Process_Reverse = _sfn.Map(
            self, "String_Process_Reverse",
            max_concurrency=50,
            input_path = "$.TaskInfo.JobDetail.Job_Map",
            result_path = "DISCARD",
            items_path = "$.Payload",
            output_path = "$",
        ).iterator(self.Job_String_Reverse)
        
        self.Job_String_Reverse_Merge = _sfn.Task(
            self,"String_Reverse_Merge",
            input_path = "$.TaskInfo",
            result_path = "DISCARD",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Reverse_Merge",
                job_definition = TaskDefine.getTaskDefine("String_Merge"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "PERFIX":_sfn.Data.string_at("$.JobParameter.String_Reverse.Prefix"),
                        "FILE_NAME":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"),
                        "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Reverse.OUTPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Reverse.OUTPUT_KEY")
                    }
                )
            )
        )
        
        self.Job_String_Process_Reverse.next(self.Job_String_Reverse_Merge)

        self.Job_Parallel_Process = _sfn.Parallel(
            self,
            'Parallel_Process',
            input_path = "$",
            result_path = "DISCARD"
        )
        
        self.Job_Parallel_Process.branch(self.Job_String_Process_Repeat)
        self.Job_Parallel_Process.branch(self.Job_String_Process_Reverse)
        
        self.Job_Check_Output = _sfn.Task(
            self,"Check_Output",
            input_path = "$.TaskInfo",
            
            result_path = "$.JobDetail.Check_Output",
            output_path = "$.JobDetail.Check_Output.Payload",
            task = _sfn_tasks.RunLambdaTask(LambdaDefine.getLambdaFunction("Get_Output_size")),
        )
        
        self.Job_Is_Complete = _sfn.Choice(
            self, "Is_Complete",
            input_path = "$.TaskInfo",
            output_path = "$"
        )
        
        self.Job_Finish = _sfn.Wait(
            self, "Finish",
            time = _sfn.WaitTime.duration(core.Duration.seconds(5))
        )
        
        self.Job_Notification = _sfn.Task(self, "Notification",
            input_path = "$.TaskInfo",
            result_path = "DISCARD",
            output_path = "$",
            task = _sfn_tasks.PublishToTopic(SNSDefine.getSNSTopic("Topic_Batch_Job_Notification"),
                integration_pattern = _sfn.ServiceIntegrationPattern.FIRE_AND_FORGET,
                message = _sfn.TaskInput.from_data_at("$.JobStatus.Job_Comment"),
                subject = _sfn.Data.string_at("$.JobStatus.SNS_Subject")
            )
        )
        
        self.Job_Failed = _sfn.Wait(
            self, "Failed",
            time = _sfn.WaitTime.duration(core.Duration.seconds(5))
        )
        
        self.statemachine = _sfn.StateMachine(
            self, "StateMachine",
            definition = self.Job_String_Split.next(self.Job_Map) \
                .next(self.Job_Parallel_Process) \
                .next(self.Job_Check_Output) \
                .next(self.Job_Notification) \
                .next(self.Job_Is_Complete \
                    .when(_sfn.Condition.string_equals(
                            "$.JobStatus.OutputStatus", "FAILED"
                        ), self.Job_Failed
                            .next(self.Job_Map)
                        )
                    .when(_sfn.Condition.string_equals(
                            "$.JobStatus.OutputStatus", "SUCCEEDED"
                        ), self.Job_Finish)
                    .otherwise(self.Job_Failed)
                ),
            timeout = core.Duration.hours(1),
        )
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 id_checker: str,
                 event_bus: str,
                 stage: Optional[str] = 'prod',
                 **kwargs) -> None:
        super().__init__(scope, id + '-' + stage, **kwargs)

        app_table_name = id + '-applications-table-' + stage
        app_table = ddb.Table(self,
                              id=app_table_name,
                              table_name=app_table_name,
                              partition_key=ddb.Attribute(
                                  name='id', type=ddb.AttributeType.STRING),
                              billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        events_table_name = id + '-events-table-' + stage
        events_table = ddb.Table(self,
                                 id=events_table_name,
                                 table_name=events_table_name,
                                 partition_key=ddb.Attribute(
                                     name='id', type=ddb.AttributeType.STRING),
                                 billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
                                 stream=ddb.StreamViewType.NEW_IMAGE)

        self._table_stream_arn = events_table.table_stream_arn

        # create our Lambda function for the bank account service
        func_name = id + '-' + stage + '-' + 'account-application'
        lambda_assets = lambda_.Code.from_asset('account_application_service')
        handler = lambda_.Function(self,
                                   func_name,
                                   code=lambda_assets,
                                   runtime=lambda_.Runtime.NODEJS_10_X,
                                   handler='main.handler',
                                   environment={
                                       'ACCOUNTS_TABLE_NAME':
                                       app_table.table_name,
                                       'EVENTS_TABLE_NAME':
                                       events_table.table_name,
                                       'REGION': core.Aws.REGION
                                   })

        gw.LambdaRestApi(self, id=stage + '-' + id, handler=handler)

        # grant main Lambda function access to DynamoDB tables
        app_table.grant_read_write_data(handler.role)
        events_table.grant_read_write_data(handler.role)

        p_statement = iam.PolicyStatement(actions=[
            'ssm:Describe*', 'ssm:Get*', 'ssm:List*', 'events:*', 'states:*'
        ],
                                          effect=iam.Effect.ALLOW,
                                          resources=['*'])
        handler.add_to_role_policy(statement=p_statement)

        # create the Lambda function for the event publisher
        evt_publisher = id + '-' + stage + '-' + 'event-publisher'
        evt_handler = lambda_.Function(
            self,
            evt_publisher,
            code=lambda_assets,
            runtime=lambda_.Runtime.NODEJS_10_X,
            handler='event-publisher.handler',
            events=[
                lambda_es.DynamoEventSource(
                    table=events_table,
                    starting_position=lambda_.StartingPosition.LATEST)
            ],
            environment={
                'EVENT_BRIDGE_ARN': event_bus,
                'REGION': core.Aws.REGION
            })

        evt_handler.add_to_role_policy(statement=p_statement)

        # set up StepFunctions
        approve_application = sf.Task(
            self,
            'Approve Application',
            task=sft.InvokeFunction(handler,
                                    payload={
                                        'body': {
                                            'command':
                                            'APPROVE_ACCOUNT_APPLICATION',
                                            'data': {
                                                'id.$': '$.application.id'
                                            }
                                        }
                                    }),
            result_path='$.approveApplication')

        reject_application = sf.Task(self,
                                     'Reject Application',
                                     task=sft.InvokeFunction(
                                         handler,
                                         payload={
                                             'body': {
                                                 'command':
                                                 'REJECT_ACCOUNT_APPLICATION',
                                                 'data': {
                                                     'id.$': '$.application.id'
                                                 }
                                             }
                                         }),
                                     result_path='$.rejectApplication')

        id_checker_handler = lambda_.Function.from_function_arn(
            self, 'IdentityChecker', function_arn=id_checker)
        check_identity = sf.Task(self,
                                 'Check Identity',
                                 task=sft.InvokeFunction(
                                     id_checker_handler,
                                     payload={
                                         'body': {
                                             'command': 'CHECK_IDENTITY',
                                             'data': {
                                                 'application.$':
                                                 '$.application'
                                             }
                                         }
                                     }))

        wait_for_human_review = sf.Task(self, 'Wait for Human Review',
                                        task=sft.RunLambdaTask(handler,
                                                               integration_pattern=sf.ServiceIntegrationPattern.WAIT_FOR_TASK_TOKEN,
                                                               payload={
                                                                   'body': {
                                                                       'command': 'FLAG_ACCOUNT_APPLICATION_FOR_HUMAN_REVIEW',
                                                                       'data': {
                                                                           'id.$': '$.application.id',
                                                                           'taskToken': sf.Context.task_token
                                                                       }
                                                                   }
                                                               }), result_path='$.humanReview') \
            .next(
            sf.Choice(self, 'Human Approval Choice')
            .when(sf.Condition.string_equals('$.humanReview.decision', 'APPROVE'), next=approve_application)
            .when(sf.Condition.string_equals('$.humanReview.decision', 'REJECT'), next=reject_application))

        sm_definition = sf.Parallel(self, 'Perform Automated Checks', result_path='$.checks') \
            .branch(check_identity) \
            .branch(sf.Pass(self, 'Check Fraud Model', result=sf.Result({'flagged': False}))) \
            .next(
            sf.Choice(self, 'Automated Checks Choice')
                .when(sf.Condition.boolean_equals('$.checks[0].flagged', True), next=wait_for_human_review)
                .when(sf.Condition.boolean_equals('$.checks[1].flagged', True), next=wait_for_human_review)
                .otherwise(approve_application))

        state_machine = sf.StateMachine(self,
                                        'OpenAccountStateMachine' + stage,
                                        definition=sm_definition)
        ssm.CfnParameter(self,
                         id='StateMachineArnSSM',
                         type='String',
                         value=state_machine.state_machine_arn,
                         name='StateMachineArnSSM')
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        ProcessPurchase = lambda_.Function(
            self,
            'process-purchase',
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler='handler.process_purchase',
            code=lambda_.Code.from_asset('./code'),
            timeout=(Duration.seconds(5)))

        ProcessRefund = lambda_.Function(
            self,
            'process-refund',
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler='handler.process_refund',
            code=lambda_.Code.from_asset('./code'),
            timeout=(Duration.seconds(5)))

        GeneratePurchaseReceipt = lambda_.Function(
            self,
            'generate-purchase-receipt',
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler='handler.generate_purchase_receipt',
            code=lambda_.Code.from_asset('./code'),
            timeout=(Duration.seconds(5)))

        GenerateRefundReceipt = lambda_.Function(
            self,
            'generate-refund-receipt',
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler='handler.generate_refund_receipt',
            code=lambda_.Code.from_asset('./code'),
            timeout=(Duration.seconds(5)))

        ProcessPurchaseState = tasks.LambdaInvoke(
            self,
            'process purchase task',
            lambda_function=ProcessPurchase,
            input_path='$')

        ProcessRefundState = tasks.LambdaInvoke(self,
                                                'process refund task',
                                                lambda_function=ProcessRefund,
                                                input_path='$')

        GeneratePurchaseReceiptState = tasks.LambdaInvoke(
            self,
            'generate purchase receipt',
            lambda_function=GeneratePurchaseReceipt,
            input_path='$')

        GenerateRefundReceiptState = tasks.LambdaInvoke(
            self,
            'generate refund receipt',
            lambda_function=GenerateRefundReceipt,
            input_path='$')

        TransactionChoice = sfn.Choice(self,
                                       'process transaction',
                                       output_path='$')

        TransactionTypeEqualsPurchase = sfn.Condition.string_equals(
            '$.TransactionType', 'PURCHASE')
        TransactionTypeEqualsRefund = sfn.Condition.string_equals(
            '$.TransactionType', 'REFUND')

        # StateMachineDefinition = TransactionChoice.when(TransactionTypeEqualsPurchase, ProcessPurchaseState).when(TransactionTypeEqualsRefund, ProcessRefundState) # works
        StateMachineDefinition = TransactionChoice.when(
            TransactionTypeEqualsPurchase,
            ProcessPurchaseState.next(GeneratePurchaseReceiptState)).when(
                TransactionTypeEqualsRefund,
                ProcessRefundState.next(GenerateRefundReceiptState))

        Workflow = sfn.StateMachine(self,
                                    'transaction workflow',
                                    definition=StateMachineDefinition)
Esempio n. 17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        api = apigateway.RestApi(
            scope=self,
            id=f'{constants.PREFIX}-approval-api',
            rest_api_name='Human approval endpoint',
            description='HTTP Endpoint backed by API Gateway and Lambda',
            endpoint_types=[apigateway.EndpointType.REGIONAL],
        )

        v1 = api.root.add_resource("v1")
        approve_api = v1.add_resource("approve")

        #################################################

        email_topic = sns.Topic(
            scope=self,
            id=f'{constants.PREFIX}-email-topic',
        )
        email_topic.add_subscription(
            subscription=subscriptions.EmailSubscription(
                email_address=constants.EMAIL_APPROVER, ))

        #################################################

        submit_job_lambda = _lambda.Function(
            scope=self,
            id=f'{constants.PREFIX}-submit-lambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='submit.handler',
            environment={
                "TOPIC_ARN": email_topic.topic_arn,
                "END_POINT": approve_api.url,
                "TO_ADDRESS": constants.EMAIL_RECIPIENT,
                "FROM_ADDRESS": constants.EMAIL_SENDER,
            },
            code=_lambda.Code.from_asset(
                os.path.join('lambdas', 'submit-lambda')),
        )
        email_topic.grant_publish(submit_job_lambda)
        submit_job_lambda.add_to_role_policy(statement=iam.PolicyStatement(
            actions=['ses:Send*'],
            resources=['*'],
        ))

        submit_job = tasks.LambdaInvoke(
            scope=self,
            id=f'{constants.PREFIX}-submit-job',
            lambda_function=submit_job_lambda,
            integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN,
            heartbeat=core.Duration.minutes(5),
            payload=sfn.TaskInput.from_object({
                "token": sfn.Context.task_token,
                "data": sfn.Data.string_at('$'),
            }),
        )

        success = sfn.Succeed(scope=self,
                              id=f'{constants.PREFIX}-success',
                              comment='We did it!')
        fail = sfn.Fail(scope=self,
                        id=f'{constants.PREFIX}-fail',
                        error='WorkflowFailure',
                        cause='Something went wrong')

        choice = sfn.Choice(scope=self,
                            id=f'{constants.PREFIX}-choice',
                            comment='Was it approved?')

        choice.when(condition=sfn.Condition.string_equals("$.status", "OK"),
                    next=success)
        choice.otherwise(fail)

        definition = submit_job.next(choice)

        self._state_machine = sfn.StateMachine(
            scope=self,
            id=f'{constants.PREFIX}-state-machine',
            definition=definition,
            # only 10 mins to approve better be quick
            timeout=core.Duration.minutes(10))

        #################################################

        approval_lambda = _lambda.Function(
            scope=self,
            id=f'{constants.PREFIX}-approval-lambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='approve.handler',
            code=_lambda.Code.from_asset(
                os.path.join('lambdas', 'approve-lambda')),
        )
        approval_lambda.add_to_role_policy(statement=iam.PolicyStatement(
            actions=['states:Send*'], resources=['*']))

        approve_integration = apigateway.LambdaIntegration(approval_lambda)

        approve_api_get_method = approve_api.add_method(
            http_method="GET",
            api_key_required=False,
            integration=approve_integration,
        )
Esempio n. 18
0
    def create_unfurl_statemachine(self):
        map_job = sfn.Map(self,
                          "Unfurl Map",
                          items_path="$.links",
                          max_concurrency=10)
        get_note_job = tasks.LambdaInvoke(
            self,
            "Get Note Job for Unfurl",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action": "get_note_from_url",
                "url.$": "$.url"
            }),
        )
        get_tf_job = tasks.LambdaInvoke(
            self,
            "Get Text Frequency Job for Unfurl",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_tf",
                "id.$":
                "$.Payload.id",
                "url.$":
                "$.Payload.url",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )
        get_idf_job = tasks.LambdaInvoke(
            self,
            "Get Inter Document Frequency Job for Unfurl",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_idf",
                "id.$":
                "$.Payload.id",
                "url.$":
                "$.Payload.url",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )
        get_tfidf_job = tasks.LambdaInvoke(
            self,
            "Get TF*IDF WordCloud Image Job for Unfurl",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_tfidf_png",
                "id.$":
                "$.Payload.id",
                "url.$":
                "$.Payload.url",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )
        unfurl_job = tasks.LambdaInvoke(
            self,
            "Get Attachment Job",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action": "unfurl",
                "id.$": "$.Payload.id",
                "url.$": "$.Payload.url",
            }),
        )

        get_tf_job.next(get_idf_job.next(get_tfidf_job.next(unfurl_job)))

        choice_job = sfn.Choice(self, "Check for Update")
        choice_job.when(
            sfn.Condition.and_(
                sfn.Condition.is_timestamp("$.Payload.tfidfPngUpdatedAt"),
                sfn.Condition.timestamp_less_than_json_path(
                    "$.Payload.contentUpdatedAt",
                    "$.Payload.tfidfPngUpdatedAt"),
            ),
            unfurl_job,
        ).when(
            sfn.Condition.and_(
                sfn.Condition.is_timestamp("$.Payload.tfTsvUpdatedAt"),
                sfn.Condition.timestamp_less_than_json_path(
                    "$.Payload.contentUpdatedAt", "$.Payload.tfTsvUpdatedAt"),
            ),
            get_tfidf_job,
        ).otherwise(get_tf_job)

        unfurl_definition = map_job.iterator(get_note_job.next(choice_job))
        self.unfurl_statemachine = sfn.StateMachine(
            self,
            "UnfurlStateMachine",
            definition=unfurl_definition,
            timeout=core.Duration.minutes(20),
            state_machine_type=sfn.StateMachineType.EXPRESS,
            logs=sfn.LogOptions(
                destination=logs.LogGroup(self, "UnfurlStateMachineLogGroup"),
                level=sfn.LogLevel.ERROR,
            ),
        )
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        submit_lambda = PythonFunction(self,
                                       'Submit',
                                       handler='handler',
                                       index='submit.py',
                                       entry=os.path.join(
                                           os.getcwd(), 'lambdas'),
                                       runtime=lambda_.Runtime.PYTHON_3_8)

        get_status_lambda = PythonFunction(self,
                                           'Status',
                                           handler='handler',
                                           index='status.py',
                                           entry=os.path.join(
                                               os.getcwd(), 'lambdas'),
                                           runtime=lambda_.Runtime.PYTHON_3_8)

        submit_job = tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        wait_x = sfn.Wait(self,
                          "Wait X Seconds",
                          time=sfn.WaitTime.seconds_path("$.waitSeconds"))

        get_status = tasks.LambdaInvoke(
            self,
            "Get Job Status",
            lambda_function=get_status_lambda,
            # Pass just the field named "guid" into the Lambda, put the
            # Lambda's result in a field called "status" in the response
            output_path="$.Payload")

        job_failed = sfn.Fail(self,
                              "Job Failed",
                              cause="AWS Batch Job Failed",
                              error="DescribeJob returned FAILED")

        final_status = tasks.LambdaInvoke(
            self,
            "Get Final Job Status",
            lambda_function=get_status_lambda,
            # Use "guid" field as input
            output_path="$.Payload")

        definition = submit_job.next(wait_x).next(get_status).next(
            sfn.Choice(self, "Job Complete?").when(
                sfn.Condition.string_equals("$.status", "FAILED"),
                job_failed).when(
                    sfn.Condition.string_equals("$.status", "SUCCEEDED"),
                    final_status).otherwise(wait_x))

        sfn.StateMachine(self,
                         "StateMachine",
                         definition=definition,
                         timeout=cdk.Duration.minutes(5))
Esempio n. 20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        email_subscription_parameter = core.CfnParameter(
            self,
            "EmailSubscriptionParameter",
            description="Email Address for Notification Subscription",
            allowed_pattern=
            '^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$',
            min_length=1,
            constraint_description="Must be a valid email.")
        email_subscription = email_subscription_parameter.value_as_string

        #runtime=aws_lambda.Runtime.PYTHON_3_8

        boto3_lambda_layer = None

        boto3_lambda_layer = self.create_dependencies_layer(
            id="boto3layer",
            requirements_path="./layers/boto3/requirements.txt",
            output_dir="./layers/boto3")

        is_inline = False

        context_enrichment = self.create_lambda_function(
            boto3_lambda_layer, "./functions/context-enrichment",
            "context_enrichment", is_inline)
        """
    context_enrichment=aws_lambda.Function(
      self,
      "context_enrichment",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/context-enrichment"),
      layers=[boto3_lambda_layer]
    )
    """
        handler_statement = iam.PolicyStatement(actions=[
            "iam:ListRoleTags", "s3:GetBucketTagging", "lambda:ListTags",
            "sqs:ListQueueTags", "kms:ListAliases", "kms:ListResourceTags"
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        context_enrichment.add_to_role_policy(handler_statement)

        cmk_key = kms.Key(
            self,
            "SNSEncryptionAtRestKey",
            description="SNS Encryption at rest key",
            alias="sns-encryption-at-rest",
            enable_key_rotation=True,
        )

        email_topic = sns.Topic(
            self,
            "AccessAnalyzerNotificationTopic",
            display_name="Access Analyzer Finding Notification Topic",
            master_key=cmk_key)
        email_topic.add_subscription(
            subscriptions.EmailSubscription(email_subscription))

        notification = self.create_lambda_function(
            boto3_lambda_layer, "./functions/notification", "notification",
            is_inline, {"SNS_TOPIC_ARN": email_topic.topic_arn})
        """
    notification=aws_lambda.Function(
      self,
      "notification",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/notification"),
      layers=[boto3_lambda_layer],
      environment={"SNS_TOPIC_ARN":email_topic.topic_arn}
    )
    """
        notification_statement = iam.PolicyStatement(actions=[
            "sns:Publish",
        ],
                                                     effect=iam.Effect.ALLOW,
                                                     resources=["*"])
        notification.add_to_role_policy(notification_statement)
        cmk_key.grant_encrypt_decrypt(notification)

        archive_access_analyzer_finding = self.create_lambda_function(
            boto3_lambda_layer, "./functions/archive-access-analyzer-finding",
            "archive-access-analyzer-finding", is_inline)
        """
    archive_access_analyzer_finding=aws_lambda.Function(
      self,
      "archive-access-analyzer-finding",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/archive-access-analyzer-finding"),
      layers=[boto3_lambda_layer]
    )
    """
        archive_statement = iam.PolicyStatement(actions=[
            "access-analyzer:UpdateFindings",
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        archive_access_analyzer_finding.add_to_role_policy(archive_statement)

        evaluate_access_analyzer_finding = self.create_lambda_function(
            boto3_lambda_layer, "./functions/evaluate-access-analyzer-finding",
            "evaluate-access-analyzer-finding", is_inline)
        """
    evaluate_access_analyzer_finding=aws_lambda.Function(
      self,
      "evaluate-access-analyzer-finding",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/evaluate-access-analyzer-finding"),
      layers=[boto3_lambda_layer]
    )
    """
        #https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        access_analyzer_handler_task = sfn.Task(
            self,
            "Context Enrichment",
            task=sfn_tasks.InvokeFunction(context_enrichment),
            result_path="$.guid",
        )

        notification_task = sfn.Task(
            self,
            "Send Notification",
            task=sfn_tasks.InvokeFunction(notification),
            result_path="$.guid",
        )

        archive_task = sfn.Task(
            self,
            "Archive Finding",
            task=sfn_tasks.InvokeFunction(archive_access_analyzer_finding),
            result_path="$.guid",
        )

        evaluate_task = sfn.Task(
            self,
            "Evaluate Risk Level",
            task=sfn_tasks.InvokeFunction(evaluate_access_analyzer_finding),
            result_path="$.guid",
        )

        definition=access_analyzer_handler_task. \
          next(evaluate_task). \
          next(sfn.Choice(self, "Archive?"). \
            when(sfn.Condition.string_equals("$.guid.status", "ARCHIVE"), archive_task). \
            when(sfn.Condition.string_equals("$.guid.status", "NOTIFY"), notification_task) \
          )

        state_machine = sfn.StateMachine(
            self,
            "Access-Analyzer-Automatic-Finding-Archive-State-Machine",
            definition=definition,
            timeout=core.Duration.minutes(5),
        )

        #https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-cloudwatch-events-s3.html
        access_analyzer_finding_rule = aws_events.Rule(
            self,
            "AccessAnalzyerFindingActiveEventRule",
            description="Access Analyzer Finding Event Active",
            enabled=True,
            event_pattern=aws_events.EventPattern(
                source=["aws.access-analyzer"],
                detail_type=["Access Analyzer Finding"],
                detail={"status": ["ACTIVE"]}),
            targets=[
                aws_events_targets.SfnStateMachine(state_machine),
                aws_events_targets.LambdaFunction(context_enrichment)
            ])
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 redshift_cluster_name: str,
                 user_secret: Secret) -> None:
        super().__init__(scope, id)

        stack = Stack.of(self)

        subprocess.call(
            ['pip', 'install', '-t', 'dwh/dwh_loader_layer/python/lib/python3.8/site-packages', '-r',
             'dwh/dwh_loader/requirements.txt', '--platform', 'manylinux1_x86_64', '--only-binary=:all:',
             '--upgrade'])

        requirements_layer = _lambda.LayerVersion(scope=self,
                                                  id='PythonRequirementsTemplate',
                                                  code=_lambda.Code.from_asset('dwh/dwh_loader_layer'),
                                                  compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        dwh_loader_role = _iam.Role(
            self, 'Role',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        dwh_loader_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name(
            'service-role/AWSLambdaBasicExecutionRole'
        ))

        dwh_loader_role.attach_inline_policy(
            _iam.Policy(
                self, 'InlinePolicy',
                statements=[
                    _iam.PolicyStatement(
                        actions=[
                            "redshift-data:ExecuteStatement",
                            "redshift-data:CancelStatement",
                            "redshift-data:ListStatements",
                            "redshift-data:GetStatementResult",
                            "redshift-data:DescribeStatement",
                            "redshift-data:ListDatabases",
                            "redshift-data:ListSchemas",
                            "redshift-data:ListTables",
                            "redshift-data:DescribeTable"
                        ],
                        resources=['*']
                    ),
                    _iam.PolicyStatement(
                        actions=["secretsmanager:GetSecretValue"],
                        resources=[user_secret.secret_arn]
                    ),
                    _iam.PolicyStatement(
                        actions=["redshift:GetClusterCredentials"],
                        resources=[
                            "arn:aws:redshift:*:*:dbname:*/*",
                            "arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER
                        ]
                    ),
                    _iam.PolicyStatement(
                        effect=_iam.Effect('DENY'),
                        actions=["redshift:CreateClusterUser"],
                        resources=["arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER]
                    ),
                    _iam.PolicyStatement(
                        conditions={
                            'StringLike': {
                                "iam:AWSServiceName": "redshift-data.amazonaws.com"
                            }
                        },
                        actions=["iam:CreateServiceLinkedRole"],
                        resources=["arn:aws:iam::*:role/aws-service-role/redshift-data.amazonaws.com/AWSServiceRoleForRedshift"]
                    ),
                ]
            )
        )

        dwh_loader_function = _lambda.Function(
            self, 'Lambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('dwh/dwh_loader'),
            handler='dwh_loader.handler',
            function_name='dwh-loader',
            environment={
                'CLUSTER_NAME': redshift_cluster_name,
                'PROCEDURE': _config.Redshift.ETL_PROCEDURE,
                'SECRET_ARN': user_secret.secret_arn,
                'DATABASE': _config.Redshift.DATABASE,
                'REGION': core.Aws.REGION,
                'SCHEMA': _config.Redshift.SCHEMA
            },
            layers=[requirements_layer],
            timeout=core.Duration.seconds(30),
            role=dwh_loader_role
        )

        dwh_loader_submit = _sfn_tasks.LambdaInvoke(
            self, 'Submit',
            lambda_function=dwh_loader_function,
            payload_response_only=True
        )

        dwh_loader_wait = _sfn.Wait(
            self, 'Wait',
            time=_sfn.WaitTime.duration(core.Duration.seconds(30))
        )

        dwh_loader_complete = _sfn.Choice(
            self, 'Complete'
        )

        dwh_loader_failed = _sfn.Fail(
            self, 'Fail',
            cause="Redshift Data API statement failed",
            error="$.Result.Error"
        )

        dwh_loader_status = _sfn_tasks.LambdaInvoke(
            self, 'Status',
            lambda_function=dwh_loader_function,
            result_path='$.Result',
            payload_response_only=True
        )

        definition = dwh_loader_submit \
            .next(dwh_loader_wait) \
            .next(dwh_loader_status) \
            .next(dwh_loader_complete
                  .when(_sfn.Condition.string_equals('$.Result.Status', 'FAILED'), dwh_loader_failed)
                  .when(_sfn.Condition.string_equals('$.Result.Status', 'FINISHED'), _sfn.Succeed(self, 'DwhLoaderSuccess'))
                  .otherwise(dwh_loader_wait))

        dwh_loader_stepfunctions = _sfn.StateMachine(
            self, 'StepFunctions',
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        step_trigger = _events.Rule(
            self, 'StepTrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=dwh_loader_stepfunctions,
            )
        )
Esempio n. 22
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        submit_job_activity = sfn.Activity(self, "SubmitJob")
        check_job_activity = sfn.Activity(self, "CheckJob")
        do_mapping_activity1 = sfn.Activity(self, "MapJOb1")
        do_mapping_activity2 = sfn.Activity(self, "MapJOb2")

        submit_job = sfn.Task(
            self,
            "Submit Job",
            task=sfn_tasks.InvokeActivity(submit_job_activity),
            result_path="$.guid",
        )

        task1 = sfn.Task(
            self,
            "Task 1 in Mapping",
            task=sfn_tasks.InvokeActivity(do_mapping_activity1),
            result_path="$.guid",
        )

        task2 = sfn.Task(
            self,
            "Task 2 in Mapping",
            task=sfn_tasks.InvokeActivity(do_mapping_activity2),
            result_path="$.guid",
        )

        wait_x = sfn.Wait(
            self,
            "Wait X Seconds",
            time=sfn.WaitTime.seconds_path('$.wait_time'),
        )
        get_status = sfn.Task(
            self,
            "Get Job Status",
            task=sfn_tasks.InvokeActivity(check_job_activity),
            input_path="$.guid",
            result_path="$.status",
        )
        is_complete = sfn.Choice(self, "Job Complete?")
        job_failed = sfn.Fail(self,
                              "Job Failed",
                              cause="AWS Batch Job Failed",
                              error="DescribeJob returned FAILED")
        final_status = sfn.Task(
            self,
            "Get Final Job Status",
            task=sfn_tasks.InvokeActivity(check_job_activity),
            input_path="$.guid",
        )

        definition_map = task1.next(task2)

        process_map = sfn.Map(self, "Process_map",
                              max_concurrency=10).iterator(definition_map)

        definition = submit_job \
            .next(process_map) \
            .next(wait_x) \
            .next(get_status) \
            .next(is_complete
                  .when(sfn.Condition.string_equals(
                    "$.status", "FAILED"), job_failed)
                  .when(sfn.Condition.string_equals(
                    "$.status", "SUCCEEDED"), final_status)
                  .otherwise(wait_x))

        sfn.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=core.Duration.seconds(30),
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ------ Necessary Roles ------
        roles = IamRole(
            self, 'IamRoles'
        )
        

        # ------ S3 Buckets ------
        # Create Athena bucket
        athena_bucket = _s3.Bucket(self, "AthenaBucket",
            removal_policy=core.RemovalPolicy.DESTROY
        )
        # Create Forecast bucket
        forecast_bucket = _s3.Bucket(self, "FoecastBucket",
            removal_policy=core.RemovalPolicy.DESTROY
        )


        # ------ Athena ------ 
        # Config Athena query result output location
        workgroup_prop = _athena.CfnWorkGroup.WorkGroupConfigurationProperty(
            result_configuration=_athena.CfnWorkGroup.ResultConfigurationProperty(
                output_location="s3://"+athena_bucket.bucket_name
            )
        )
        # Create Athena workgroup
        athena_workgroup = _athena.CfnWorkGroup(
            self, 'ForecastGroup',
            name='ForecastGroup', 
            recursive_delete_option=True, 
            state='ENABLED', 
            work_group_configuration=workgroup_prop
        )
            
    
        # ------ SNS Topic ------
        topic = sns.Topic(
            self, 'NotificationTopic',
            display_name='StepsTopic'
        )
        # SNS email subscription. Get the email address from context value(cdk.json)
        topic.add_subscription(subs.EmailSubscription(self.node.try_get_context('my_email')))
         

        # ------ Layers ------
        shared_layer = _lambda.LayerVersion(
            self, 'LambdaLayer',
            layer_version_name='testfolderlayer',
            code=_lambda.AssetCode('shared/')
        )


        # ------ Lambdas for stepfuctions------
        create_dataset_lambda = _lambda.Function(
            self, 'CreateDataset',
            function_name='CreateDataset',
            code=_lambda.Code.asset('lambdas/createdataset/'),
            handler='dataset.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            timeout=core.Duration.seconds(30),
            layers=[shared_layer]
        )

        create_dataset_group_lambda = _lambda.Function(
            self, 'CreateDatasetGroup',
            function_name='CreateDatasetGroup',
            code = _lambda.Code.asset('lambdas/createdatasetgroup/'),
            handler = 'datasetgroup.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        import_data_lambda = _lambda.Function(
            self, 'CreateDatasetImportJob',
            function_name='CreateDatasetImportJob',
            code = _lambda.Code.asset('lambdas/createdatasetimportjob/'),
            handler = 'datasetimport.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'FORECAST_ROLE': roles.forecast_role.role_arn
            },
            layers=[shared_layer]
        )

        create_predictor_lambda = _lambda.Function(
            self, 'CreatePredictor',
            function_name='CreatePredictor',
            code = _lambda.Code.asset('lambdas/createpredictor/'),
            handler = 'predictor.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        create_forecast_lambda = _lambda.Function(
            self, 'CreateForecast',
            function_name='CreateForecast',
            code = _lambda.Code.asset('lambdas/createforecast/'),
            handler = 'forecast.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'EXPORT_ROLE': roles.forecast_role.role_arn
            },
            layers=[shared_layer],
            timeout=core.Duration.seconds(30)
        )

        # Deploy lambda with python dependencies from requirements.txt
        update_resources_lambda = _lambda_python.PythonFunction(
            self, 'UpdateResources',
            function_name='UpdateResources',
            entry='lambdas/updateresources/',
            index='update.py',
            handler='lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.update_role,
            environment= {
                'ATHENA_WORKGROUP': athena_workgroup.name,
                'ATHENA_BUCKET' : athena_bucket.bucket_name
            },
            layers=[shared_layer],
            timeout=core.Duration.seconds(900)
        )
        

        notify_lambda = _lambda.Function(
            self, 'NotifyTopic',
            function_name='NotifyTopic',
            code = _lambda.Code.asset('lambdas/notify/'),
            handler = 'notify.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'SNS_TOPIC_ARN': topic.topic_arn
            },
            layers=[shared_layer]
        )

        delete_forecast_lambda = _lambda.Function(
            self, 'DeleteForecast',
            function_name='DeleteForecast',
            code = _lambda.Code.asset('lambdas/deleteforecast/'),
            handler = 'deleteforecast.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        delete_predctor_lambda = _lambda.Function(
            self, 'DeletePredictor',
            function_name='DeletePredictor',
            code = _lambda.Code.asset('lambdas/deletepredictor/'),
            handler = 'deletepredictor.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        delete_importjob_lambda = _lambda.Function(
            self, 'DeleteImportJob',
            function_name='DeleteImportJob',
            code = _lambda.Code.asset('lambdas/deletedatasetimport/'),
            handler = 'deletedataset.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )


        # ------ StepFunctions ------
        strategy_choice = sfn.Choice(
            self, 'Strategy-Choice'
        )

        success_state = sfn.Succeed(
            self, 'SuccessState'
        )

        failed = sfn_tasks.LambdaInvoke(
            self, 'Failed',
            lambda_function = notify_lambda,
            result_path=None
        ).next(strategy_choice)

        create_dataset_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Dataset', 
            lambda_function = create_dataset_lambda,
            retry_on_service_exceptions=True,
            payload_response_only=True
        )

        self.add_retry_n_catch(create_dataset_job, failed)

        create_dataset_group_job = sfn_tasks.LambdaInvoke(
            self, 'Create-DatasetGroup', 
            lambda_function = create_dataset_group_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_dataset_group_job, failed)


        import_data_job = sfn_tasks.LambdaInvoke(
            self, 'Import-Data',
            lambda_function = import_data_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(import_data_job, failed)

        create_predictor_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Predictor',
            lambda_function = create_predictor_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_predictor_job, failed)

        create_forecast_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Forecast',
            lambda_function = create_forecast_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_forecast_job, failed)

        update_resources_job = sfn_tasks.LambdaInvoke(
            self, 'Update-Resources',
            lambda_function = update_resources_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(update_resources_job, failed)

        notify_success = sfn_tasks.LambdaInvoke(
            self, 'Notify-Success',
            lambda_function = notify_lambda,
            payload_response_only=True
        )

        delete_forecast_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-Forecast',
            lambda_function = delete_forecast_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_forecast_job)

        delete_predictor_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-Predictor',
            lambda_function = delete_predctor_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_predictor_job)

        delete_import_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-ImportJob',
            lambda_function = delete_importjob_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_import_job)
        
        
        definition = create_dataset_job\
            .next(create_dataset_group_job)\
            .next(import_data_job)\
            .next(create_predictor_job)\
            .next(create_forecast_job)\
            .next(update_resources_job)\
            .next(notify_success)\
            .next(strategy_choice.when(sfn.Condition.boolean_equals('$.params.PerformDelete', False), success_state)\
                                .otherwise(delete_forecast_job).afterwards())\
            .next(delete_predictor_job)\
            .next(delete_import_job)
                    
            
        deployt_state_machine = sfn.StateMachine(
            self, 'StateMachine',
            definition = definition
            # role=roles.states_execution_role
        )

        # S3 event trigger lambda
        s3_lambda = _lambda.Function(
            self, 'S3Lambda',
            function_name='S3Lambda',
            code=_lambda.Code.asset('lambdas/s3lambda/'),
            handler='parse.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            role=roles.trigger_role,
            environment= {
                'STEP_FUNCTIONS_ARN': deployt_state_machine.state_machine_arn,
                'PARAMS_FILE': self.node.try_get_context('parameter_file')
            }
        )
        s3_lambda.add_event_source(
            event_src.S3EventSource(
                bucket=forecast_bucket,
                events=[_s3.EventType.OBJECT_CREATED],
                filters=[_s3.NotificationKeyFilter(
                    prefix='train/',
                    suffix='.csv'
                )]
            )
        )

        # CloudFormation output
        core.CfnOutput(
            self, 'StepFunctionsName',
            description='Step Functions Name',
            value=deployt_state_machine.state_machine_name
        )

        core.CfnOutput(
            self, 'ForecastBucketName',
            description='Forecast bucket name to drop you files',
            value=forecast_bucket.bucket_name
        )

        core.CfnOutput(
            self, 'AthenaBucketName',
            description='Athena bucket name to drop your files',
            value=athena_bucket.bucket_name
        )
Esempio n. 24
0
    def __init__(self, scope: core.App, id_: str, stack_env: str, **kwargs) -> None:
        super().__init__(scope, id_, **kwargs)

        # create dynamo table
        demo_table = aws_dynamodb.Table(
            scope=self,
            id="demo_table",
            partition_key=aws_dynamodb.Attribute(
                name="id",
                type=aws_dynamodb.AttributeType.STRING
            ),
            write_capacity=3,
            read_capacity=3,
            removal_policy=core.RemovalPolicy.DESTROY
        )

        queue = aws_sqs.Queue(self, f"{id_}-SQSQueue")

        # create producer lambda function
        producer_lambda = self._create_lambda_function(
            function_name="producer",
            environment={
                "TABLE_NAME": demo_table.table_name,
                "QUEUE_URL": queue.queue_url
            }
        )
        queue.grant_send_messages(producer_lambda)

        # grant permission to lambda to write to demo table
        demo_table.grant_write_data(producer_lambda)

        # create consumer lambda function
        consumer_lambda = self._create_lambda_function(
            function_name="consumer",
            environment={"TABLE_NAME": demo_table.table_name}
        )

        # grant permission to lambda to read from demo table
        demo_table.grant_read_data(consumer_lambda)

        # api_gateway for root
        base_api = apigw_.RestApi(
            scope=self,
            id=f"{id_}-{stack_env}-apigw",
            rest_api_name=f"{id_}-{stack_env}-apigw",
            deploy_options=apigw_.StageOptions(stage_name=stack_env)
        )

        # /example entity
        api_entity = base_api.root.add_resource("example")

        # GET /example
        api_entity.add_method(
            http_method="GET",
            integration=apigw_.LambdaIntegration(
                handler=consumer_lambda,
                integration_responses=[
                    apigw_.IntegrationResponse(
                        status_code="200"
                    )
                ]
            )
        )

        # POST /example
        api_entity.add_method(
            http_method="POST",
            integration=apigw_.LambdaIntegration(
                handler=producer_lambda,
                integration_responses=[
                    apigw_.IntegrationResponse(
                        status_code="200"
                    )
                ]
            )
        )

        # ============= #
        # StepFunctions #
        # ============= #

        dynamodb_update_running_task = self._dynamodb_update_in_sfn(table=demo_table, status="running")

        wait_1_min = aws_sfn.Wait(
            scope=self,
            id="Wait one minutes as heavy task",
            time=aws_sfn.WaitTime.duration(core.Duration.minutes(1)),
        )

        dynamodb_update_complete_task = self._dynamodb_update_in_sfn(table=demo_table, status="complete")
        dynamodb_update_failure_task = self._dynamodb_update_in_sfn(table=demo_table, status="failure")

        check_task_status = aws_sfn.Choice(scope=self, id="Job Complete?")\
            .when(aws_sfn.Condition.string_equals("$.job_status", "success"), dynamodb_update_complete_task) \
            .otherwise(dynamodb_update_failure_task)

        # StepFunctions
        definition = dynamodb_update_running_task \
            .next(wait_1_min) \
            .next(check_task_status)

        sfn_process = aws_sfn.StateMachine(
            scope=self,
            id=f"{id_}-{stack_env}",
            definition=definition
        )

        # Lambda to invoke StepFunction
        sfn_invoke_lambda = self._create_lambda_function(
            function_name="invoke_step_function",
            environment={
                "STEP_FUNCTION_ARN": sfn_process.state_machine_arn,
                "QUEUE_URL": queue.queue_url
            }
        )
        # grant
        queue.grant_consume_messages(sfn_invoke_lambda)
        sfn_process.grant_start_execution(sfn_invoke_lambda)

        # ================ #
        # CloudWatch Event #
        # ================ #

        # Runs every 2 hour
        invoke_automatically = aws_events.Rule(
            scope=self,
            id=f"InvokeSFnViaLambda-{stack_env}",
            schedule=aws_events.Schedule.rate(core.Duration.hours(2))
        )
        invoke_automatically.add_target(aws_events_targets.LambdaFunction(sfn_invoke_lambda))
Esempio n. 25
0
    def __init__(self, scope: Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        rg_property = network_fw.CfnRuleGroup.RuleGroupProperty(
            rule_variables=None,
            rules_source=network_fw.CfnRuleGroup.RulesSourceProperty(
                stateless_rules_and_custom_actions=network_fw.CfnRuleGroup.
                StatelessRulesAndCustomActionsProperty(stateless_rules=[
                    network_fw.CfnRuleGroup.StatelessRuleProperty(
                        priority=10,
                        rule_definition=network_fw.CfnRuleGroup.
                        RuleDefinitionProperty(
                            actions=["aws:drop"],
                            match_attributes=network_fw.CfnRuleGroup.
                            MatchAttributesProperty(destinations=[
                                network_fw.CfnRuleGroup.AddressProperty(
                                    address_definition="127.0.0.1/32")
                            ])))
                ])))

        nf_rule_group = network_fw.CfnRuleGroup(
            scope=self,
            id='GuardDutyNetworkFireWallRuleGroup',
            capacity=100,
            rule_group_name='guardduty-network-firewall',
            type='STATELESS',
            description='Guard Duty network firewall rule group',
            tags=[CfnTag(key='Name', value='cfn.rule-group.stack')],
            rule_group=rg_property)
        """ https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-rule-dlq.html#dlq-considerations """
        dlq_statemachine = sqs.Queue(self,
                                     'DLQStateMachine',
                                     queue_name='dlq_state_machine')

        guardduty_firewall_ddb = ddb.Table(
            scope=self,
            id=f'GuarddutyFirewallDDB',
            table_name='GuardDutyFirewallDDBTable',
            removal_policy=RemovalPolicy.DESTROY,
            partition_key=ddb.Attribute(name='HostIp',
                                        type=ddb.AttributeType.STRING),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST)
        """ IAM role for ddb permission """
        nf_iam_role = iam.Role(
            self,
            'DDBRole',
            role_name=f'ddb-nf-role-{env.region}',
            assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["arn:aws:logs:*:*:*"],
                                actions=[
                                    "logs:CreateLogGroup",
                                    "logs:CreateLogStream", "logs:PutLogEvents"
                                ]))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    guardduty_firewall_ddb.table_arn,
                                    f"{guardduty_firewall_ddb.table_arn}/*"
                                ],
                                actions=[
                                    "dynamodb:PutItem", "dynamodb:GetItem",
                                    "dynamodb:Scan"
                                ]))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[nf_rule_group.ref, f"{nf_rule_group.ref}/*"],
                actions=[
                    "network-firewall:DescribeRuleGroup",
                    "network-firewall:UpdateRuleGroup"
                ]))

        record_ip_in_db = _lambda.Function(
            self,
            'RecordIpInDB',
            function_name='record-ip-in-ddb',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='addIPToDDB.handler',
            environment=dict(ACLMETATABLE=guardduty_firewall_ddb.table_name),
            role=nf_iam_role)
        """
        https://docs.amazonaws.cn/en_us/eventbridge/latest/userguide/eb-event-patterns-content-based-filtering.html
        """
        record_ip_task = step_fn_task.LambdaInvoke(
            self,
            'RecordIpDDBTask',
            lambda_function=record_ip_in_db,
            payload=step_fn.TaskInput.from_object({
                "comment":
                "Relevant fields from the GuardDuty / Security Hub finding",
                "HostIp.$":
                "$.detail.findings[0].ProductFields.aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4",
                "Timestamp.$":
                "$.detail.findings[0].ProductFields.aws/guardduty/service/eventLastSeen",
                "FindingId.$": "$.id",
                "AccountId.$": "$.account",
                "Region.$": "$.region"
            }),
            result_path='$',
            payload_response_only=True)

        firewall_update_rule = _lambda.Function(
            scope=self,
            id='GuardDutyUpdateNetworkFirewallRule',
            function_name='gurdduty-update-networkfirewal-rule-group',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='updateNetworkFireWall.handler',
            environment=dict(
                FIREWALLRULEGROUP=nf_rule_group.ref,
                RULEGROUPPRI='30000',
                CUSTOMACTIONNAME='GuardDutytoFirewall',
                CUSTOMACTIONVALUE='gurdduty-update-networkfirewal-rule-group'),
            role=nf_iam_role)

        firewall_update_rule_task = step_fn_task.LambdaInvoke(
            self,
            'FirewallUpdateRuleTask',
            lambda_function=firewall_update_rule,
            input_path='$',
            result_path='$',
            payload_response_only=True)

        firewall_no_update_job = step_fn.Pass(self, 'No Firewall change')
        notify_failure_job = step_fn.Fail(self,
                                          'NotifyFailureJob',
                                          cause='Any Failure',
                                          error='Unknown')

        send_to_slack = _lambda.Function(
            scope=self,
            id='SendAlertToSlack',
            function_name='gurdduty-networkfirewal-to-slack',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="sendSMSToSlack.handler",
            code=_lambda.Code.from_asset('lambda_fns'))

        send_slack_task = step_fn_task.LambdaInvoke(
            scope=self,
            id='LambdaToSlackDemo',
            lambda_function=send_to_slack,
            input_path='$',
            result_path='$')

        is_new_ip = step_fn.Choice(self, "New IP?")
        is_block_succeed = step_fn.Choice(self, "Block sucessfully?")

        definition = step_fn.Chain \
            .start(record_ip_task
                   .add_retry(errors=["States.TaskFailed"],
                              interval=Duration.seconds(2),
                              max_attempts=2)
                   .add_catch(errors=["States.ALL"], handler=notify_failure_job)) \
            .next(is_new_ip
                  .when(step_fn.Condition.boolean_equals('$.NewIP', True),
                        firewall_update_rule_task
                            .add_retry(errors=["States.TaskFailed"],
                                       interval=Duration.seconds(2),
                                       max_attempts=2
                                       )
                            .add_catch(errors=["States.ALL"], handler=notify_failure_job)
                            .next(
                                is_block_succeed
                                    .when(step_fn.Condition.boolean_equals('$.Result', False), notify_failure_job)
                                    .otherwise(send_slack_task)
                            )
                        )
                  .otherwise(firewall_no_update_job)
                  )

        guardduty_state_machine = step_fn.StateMachine(
            self,
            'GuarddutyStateMachine',
            definition=definition,
            timeout=Duration.minutes(5),
            state_machine_name='guardduty-state-machine')

        event.Rule(
            scope=self,
            id='EventBridgeCatchIPv4',
            description="Security Hub - GuardDuty findings with remote IP",
            rule_name='guardduty-catch-ipv4',
            event_pattern=event.EventPattern(
                account=['123456789012'],
                detail_type=["GuardDuty Finding"],
                source=['aws.securityhub'],
                detail={
                    "findings": {
                        "ProductFields": {
                            "aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4":
                            [{
                                "exists": True
                            }]
                        }
                    }
                }),
            targets=[
                event_target.SfnStateMachine(
                    machine=guardduty_state_machine,
                    dead_letter_queue=dlq_statemachine)
            ])
        """ Send other findings to slack """
        send_finding_to_slack = _lambda.Function(
            self,
            'SendFindingToSlack',
            function_name='send-finding-to-slack',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="sendFindingToSlack.handler",
            code=_lambda.Code.from_asset('lambda_fns'))

        send_findings_task = step_fn_task.LambdaInvoke(
            self,
            'SendFindingToSlackTask',
            lambda_function=send_finding_to_slack,
            payload=step_fn.TaskInput.from_object({
                "comment":
                "Others fields from the GuardDuty / Security Hub finding",
                "severity.$":
                "$.detail.findings[0].Severity.Label",
                "Account_ID.$":
                "$.account",
                "Finding_ID.$":
                "$.id",
                "Finding_Type.$":
                "$.detail.findings[0].Types",
                "Region.$":
                "$.region",
                "Finding_description.$":
                "$.detail.findings[0].Description"
            }),
            result_path='$')

        slack_failure_job = step_fn.Fail(self,
                                         'SlackNotifyFailureJob',
                                         cause='Any Failure',
                                         error='Unknown')

        finding_definition = step_fn.Chain \
            .start(send_findings_task
                   .add_retry(errors=["States.TaskFailed"],
                              interval=Duration.seconds(2),
                              max_attempts=2)
                   .add_catch(errors=["States.ALL"], handler=slack_failure_job))

        sechub_findings_state_machine = step_fn.StateMachine(
            self,
            'SecHubFindingsStateMachine',
            definition=finding_definition,
            timeout=Duration.minutes(5),
            state_machine_name='sechub-finding-state-machine')

        event.Rule(scope=self,
                   id='EventBridgeFindings',
                   description="Security Hub - GuardDuty findings others",
                   rule_name='others-findings',
                   event_pattern=event.EventPattern(
                       account=['123456789012'],
                       source=['aws.securityhub'],
                       detail_type=['Security Hub Findings - Imported'],
                       detail={"severity": [5, 8]}),
                   targets=[
                       event_target.SfnStateMachine(
                           machine=sechub_findings_state_machine,
                           dead_letter_queue=dlq_statemachine)
                   ])
Esempio n. 26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Lets create couple of instances to test):
        vpc = _ec2.Vpc(self,
                       "abacVPC",
                       cidr="10.13.0.0/21",
                       max_azs=2,
                       nat_gateways=0,
                       subnet_configuration=[
                           _ec2.SubnetConfiguration(
                               name="pubSubnet",
                               cidr_mask=24,
                               subnet_type=_ec2.SubnetType.PUBLIC)
                       ])
        core.Tag.add(vpc,
                     key="ServiceProvider",
                     value="KonStone",
                     include_resource_types=[])

        weak_sg = _ec2.SecurityGroup(
            self,
            "web_sec_grp",
            vpc=vpc,
            description="Allow internet access from the world",
            allow_all_outbound=True)
        # vpc_cidr_block
        # weak_sg.add_ingress_rule(_ec2.Peer.any_ipv4(),
        weak_sg.add_ingress_rule(_ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                 _ec2.Port.tcp(22),
                                 "Allow SSH access from the VPC Only.")

        # We are using the latest AMAZON LINUX AMI
        # Benefit of having SSM Agent pre-installed
        ami_id = _ec2.AmazonLinuxImage(generation=_ec2.AmazonLinuxGeneration.
                                       AMAZON_LINUX_2).get_image(self).image_id

        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_iam/Role.html
        instace_profile_role = _iam.Role(
            self,
            'ec2ssmroleid',
            assumed_by=_iam.ServicePrincipal('ec2.amazonaws.com'),
            role_name="instace_profile_role")

        instace_profile_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        instance_profile_role_additional_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "arn:aws:logs:*:*:*",
            ],
            actions=["logs:Create*", "logs:PutLogEvents"])
        instance_profile_role_additional_perms.sid = "PutBucketPolicy"
        instace_profile_role.add_to_policy(
            instance_profile_role_additional_perms)

        inst_profile_01 = _iam.CfnInstanceProfile(
            self,
            "instProfile01Id",
            roles=[instace_profile_role.role_name],
        )

        # Let us bootstrap the server with the required agents
        try:
            with open("./bootstrap_scripts/install_agents.sh",
                      mode='rb') as file:
                bootstrap_data = file.read()
        except OSError:
            print('Failed to get UserData script')

        install_agents = _ec2.UserData.for_linux()
        install_agents.add_commands(str(bootstrap_data, 'utf-8'))

        # The EC2 Instance to monitor for failed SSH Logins
        ssh_monitored_inst_01 = _ec2.CfnInstance(
            self,
            "sshMonitoredInstance01",
            image_id=ami_id,
            instance_type="t2.micro",
            monitoring=False,
            tags=[{
                "key": "ServiceProvider",
                "value": "KonStone"
            }],
            iam_instance_profile=inst_profile_01.ref,
            network_interfaces=[{
                "deviceIndex": "0",
                "associatePublicIpAddress": True,
                "subnetId": vpc.public_subnets[0].subnet_id,
                "groupSet": [weak_sg.security_group_id]
            }],  #https: //github.com/aws/aws-cdk/issues/3419
            user_data=core.Fn.base64(install_agents.render()),
        )
        """
        linux_ami = _ec2.GenericLinuxImage({ "cn-northwest-1": "ami-0f62e91915e16cfc2","eu-west-1": "ami-12345678"})
        ssh_monitored_inst_01_02 = _ec2.Instance(self,
            "monitoredInstance02",
            instance_type=_ec2.InstanceType(instance_type_identifier="t2.micro"),
            instance_name="monitoredInstance02",
            machine_image=linux_ami,
            vpc=vpc,
            security_group=[weak_sg.security_group_id],
            # vpc_subnets=_ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PUBLIC)
            vpc_subnets=vpc.public_subnets[0].subnet_id,
            # user_data=_ec2.UserData.custom(t_user_data)
            )
        """

        # The log group name to store logs
        info_sec_ops_log_group = _logs.LogGroup(
            self,
            "infoSecOpsLogGroupId",
            log_group_name=(f"/Mystique/InfoSec/Automation/"
                            f"{ssh_monitored_inst_01.ref}"),
            retention=_logs.RetentionDays.ONE_WEEK)

        # Defines an AWS Lambda resource

        with open("lambda_src/quarantine_ec2_instance.py",
                  encoding="utf8") as fp:
            quarantine_ec2_instance_fn_handler_code = fp.read()

        quarantine_ec2_instance_fn = _lambda.Function(
            self,
            id='quarantineEc2InstanceFnId',
            function_name="quarantine_ec2_instance",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(quarantine_ec2_instance_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(5))
        quarantine_ec2_instance_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "*",
            ],
            actions=[
                "ec2:RevokeSecurityGroupIngress",
                "ec2:DescribeSecurityGroupReferences",
                "ec2:RevokeSecurityGroupEgress",
                "ec2:ApplySecurityGroupsToClientVpnTargetNetwork",
                "ec2:DescribeSecurityGroups", "ec2:CreateSecurityGroup",
                "ec2:DescribeInstances", "ec2:CreateTags", "ec2:StopInstances",
                "ec2:CreateVolume", "ec2:CreateSnapshots",
                "ec2:CreateSnapshot", "ec2:DescribeSnapshots",
                "ec2:ModifyInstanceAttribute"
            ])
        quarantine_ec2_instance_fn_perms.sid = "AllowLambdaToQuarantineEC2"
        quarantine_ec2_instance_fn.add_to_role_policy(
            quarantine_ec2_instance_fn_perms)

        info_sec_ops_topic = _sns.Topic(self,
                                        "infoSecOpsTopicId",
                                        display_name="InfoSecTopic",
                                        topic_name="InfoSecOpsTopic")

        # Ref: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        ###############################################################################
        ################# STEP FUNCTIONS EXPERIMENTAL CODE - UNSTABLE #################
        ###############################################################################

        quarantine_ec2_instance_task = _sfn.Task(
            self,
            "Quarantine EC2 Instance",
            task=_tasks.InvokeFunction(quarantine_ec2_instance_fn),
            result_path="$")

        notify_secops_task = _sfn.Task(
            self,
            "Notify InfoSecOps",
            task=_tasks.PublishToTopic(
                info_sec_ops_topic,
                integration_pattern=_sfn.ServiceIntegrationPattern.
                FIRE_AND_FORGET,
                message=_sfn.TaskInput.from_data_at("$.message"),
                subject="SSH Error Response Notification"))

        ssh_error_response_failure = _sfn.Fail(
            self,
            "SSH Error Response Actions Failed",
            cause="All Response Actions were NOT completed",
            error="Check Logs")

        ssh_error_response_success = _sfn.Succeed(
            self,
            "SSH Error Response Actions Succeeded",
            comment="All Response Action Completed Successfully",
        )

        ssh_error_response_sfn_definition = quarantine_ec2_instance_task\
            .next(notify_secops_task\
                .next(_sfn.Choice(self, "SSH Errors Response Complete?")\
                    .when(_sfn.Condition.number_equals("$.SdkHttpMetadata.HttpStatusCode", 200),ssh_error_response_success)\
                    .when(_sfn.Condition.not_(
                        _sfn.Condition.number_equals("$.SdkHttpMetadata.HttpStatusCode", 200)), ssh_error_response_failure)\
                    .otherwise(ssh_error_response_failure)
                    )
            )

        ssh_error_response_statemachine = _sfn.StateMachine(
            self,
            "stateMachineId",
            definition=ssh_error_response_sfn_definition,
            timeout=core.Duration.minutes(5))

        ###############################################################################
        ################# STEP FUNCTIONS EXPERIMENTAL CODE - UNSTABLE #################
        ###############################################################################

        # LAMBDA TO TRIGGER STATE MACHINE - since state cannot be invoked by SNS
        with open("lambda_src/trigger_state_machine.py",
                  encoding="utf8") as fp:
            trigger_state_machine_fn_handler_code = fp.read()

        trigger_state_machine_fn = _lambda.Function(
            self,
            id='sshErrorResponseFnId',
            function_name="trigger_ssh_error_response_state_machine_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(trigger_state_machine_fn_handler_code),
            # code=_lambda.Code.asset("lambda_src/is_policy_permissive.py"),
            # code=_lambda.Code.asset('lambda_src'),
            # code=_lambda.InlineCode(code_body),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(5),
            environment={
                "STATE_MACHINE_ARN":
                f"{ssh_error_response_statemachine.state_machine_arn}",
            })

        trigger_state_machine_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                f"{ssh_error_response_statemachine.state_machine_arn}",
            ],
            actions=["states:StartExecution"])
        trigger_state_machine_fn_perms.sid = "PutBucketPolicy"
        trigger_state_machine_fn.add_to_role_policy(
            trigger_state_machine_fn_perms)
        """
        version = trigger_state_machine_fn.add_version(name=datetime.now().isoformat())
        trigger_state_machine_fn_alias = _lambda.Alias(self, 
            'lmdaAliasId',
            alias_name='MystiqueTestAlias',
            version=version
            )
        """

        # Lets add permission to SNS to trigger our lambda function
        trigger_lambda_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                trigger_state_machine_fn.function_arn,
            ],
            actions=[
                "lambda:InvokeFunction",
            ])
        trigger_lambda_perms.sid = "TriggerLambaFunction"
        # info_sec_ops_topic.add_to_resource_policy( trigger_lambda_perms )

        # Subscribe InfoSecOps Email to topic
        info_sec_ops_topic.add_subscription(
            _subs.EmailSubscription(global_args.INFO_SEC_OPS_EMAIL))
        # info_sec_ops_topic.add_subscription(_subs.LambdaSubscription(trigger_state_machine_fn))

        trigger_state_machine_fn_alarm = trigger_state_machine_fn.metric_all_errors(
        ).create_alarm(
            self,
            "fn-error-alarm",
            threshold=5,
            alarm_name="trigger_state_machine_fn_error_alarm",
            evaluation_periods=5,
            period=core.Duration.minutes(1),
        )

        subscribe_trigger_state_machine_fn_to_logs = _logs.SubscriptionFilter(
            self,
            "sshErrorLogSubscriptionId",
            log_group=info_sec_ops_log_group,
            destination=_logs_destination.LambdaDestination(
                trigger_state_machine_fn),
            filter_pattern=_logs.FilterPattern.space_delimited(
                "Mon", "day", "timestamp", "ip", "id", "status",
                "...").where_string("status", "=", "Invalid"),
        )

        # https://pypi.org/project/aws-cdk.aws-logs/
        # We are creating three filter
        # tooManySshDisconnects, invalidSshUser and invalidSshKey:
        # When a user tries to SSH with invalid username the next line is logged in the SSH log file:
        # Apr 20 02:39:35 ip-172-31-63-56 sshd[17136]: Received disconnect from xxx.xxx.xxx.xxx: 11:  [preauth]
        too_many_ssh_disconnects_metric = _cloudwatch.Metric(
            namespace=f"{global_args.OWNER}",
            metric_name="tooManySshDisconnects")
        too_many_ssh_disconnects_filter = _logs.MetricFilter(
            self,
            "tooManySshDisconnectsFilterId",
            log_group=info_sec_ops_log_group,
            metric_namespace=too_many_ssh_disconnects_metric.namespace,
            metric_name=too_many_ssh_disconnects_metric.metric_name,
            filter_pattern=_logs.FilterPattern.space_delimited(
                "Mon", "day", "timestamp", "ip", "id", "msg1", "msg2",
                "...").where_string("msg2", "=", "disconnect"),
            metric_value="1")

        invalid_ssh_user_metric = _cloudwatch.Metric(
            namespace=f"{global_args.OWNER}",
            metric_name="invalidSshUser",
        )
        invalid_ssh_user_filter = _logs.MetricFilter(
            self,
            "invalidSshUserFilterId",
            log_group=info_sec_ops_log_group,
            metric_namespace=invalid_ssh_user_metric.namespace,
            metric_name=invalid_ssh_user_metric.metric_name,
            filter_pattern=_logs.FilterPattern.space_delimited(
                "Mon", "day", "timestamp", "ip", "id", "status",
                "...").where_string("status", "=", "Invalid"),
            metric_value="1")

        invalid_ssh_key_metric = _cloudwatch.Metric(
            namespace=f"{global_args.OWNER}", metric_name="invalidSshKey")

        invalid_ssh_key_filter = _logs.MetricFilter(
            self,
            "invalidSshKeyFilterId",
            log_group=info_sec_ops_log_group,
            metric_namespace=invalid_ssh_key_metric.namespace,
            metric_name=invalid_ssh_key_metric.metric_name,
            filter_pattern=_logs.FilterPattern.space_delimited(
                "Mon", "day", "timestamp", "ip", "id", "msg1", "msg2",
                "...").where_string("msg1", "=", "Connection").where_string(
                    "msg2", "=", "closed"),
            metric_value="1")

        # Now let us create alarms
        # alarm is raised there are more than 5(threshold) of the measured metrics in two(datapoint) of the last three seconds(evaluation):
        # Period=60Seconds, Eval=3, Threshold=5
        too_many_ssh_disconnects_alarm = _cloudwatch.Alarm(
            self,
            "tooManySshDisconnectsAlarmId",
            alarm_name="too_many_ssh_disconnects_alarm",
            alarm_description=
            "The number disconnect requests is greater then 5, even 1 time in 3 minutes",
            metric=too_many_ssh_disconnects_metric,
            actions_enabled=True,
            period=core.Duration.minutes(1),
            threshold=5,
            evaluation_periods=3,
            datapoints_to_alarm=1,
            statistic="sum",
            comparison_operator=_cloudwatch.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD)

        invalid_ssh_user_alarm = _cloudwatch.Alarm(
            self,
            "invalidSshUserAlarmId",
            alarm_name="too_many_invalid_ssh_users_alarm",
            alarm_description=
            "The number of invalid ssh users connecting is greater then 5, even 1 time in 3 minutes",
            metric=invalid_ssh_user_metric,
            actions_enabled=True,
            period=core.Duration.minutes(1),
            threshold=5,
            evaluation_periods=3,
            datapoints_to_alarm=1,
            statistic="sum",
            comparison_operator=_cloudwatch.ComparisonOperator.
            GREATER_THAN_THRESHOLD)
        invalid_ssh_user_alarm.add_alarm_action(
            _cloudwatch_actions.SnsAction(info_sec_ops_topic))

        invalid_ssh_key_alarm = _cloudwatch.Alarm(
            self,
            "invalidSshKeyAlarmId",
            alarm_name="too_many_invalid_ssh_key_alarm",
            alarm_description=
            "The number of invalid ssh keys connecting is greater then 5, even 1 time in 3 minutes",
            metric=invalid_ssh_key_metric,
            actions_enabled=True,
            period=core.Duration.minutes(1),
            threshold=5,
            evaluation_periods=3,
            datapoints_to_alarm=1,
            statistic="sum",
            comparison_operator=_cloudwatch.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD)
        invalid_ssh_key_alarm.add_alarm_action(
            _cloudwatch_actions.SnsAction(info_sec_ops_topic))

        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output0 = core.CfnOutput(
            self,
            "SecuirtyAutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output1_1 = core.Fn.get_att(
            logical_name_of_resource="sshMonitoredInstance01",
            attribute_name="PublicIp")
        output1 = core.CfnOutput(self,
                                 "MonitoredInstance",
                                 value=output1_1.to_string(),
                                 description="Web Server Public IP to attack")

        output2 = core.CfnOutput(
            self,
            "SSHAlarms",
            value=
            (f"https://console.aws.amazon.com/cloudwatch/home?region="
             f"{core.Aws.REGION}"
             f"#/configuration/"
             f"#alarmsV2:?search=ssh&alarmStateFilter=ALL&alarmTypeFilter=ALL"
             ),
            description="Check out the cloudwatch Alarms")

        output3 = core.CfnOutput(
            self,
            "SubscribeToNotificationTopic",
            value=(f"https://console.aws.amazon.com/sns/v3/home?"
                   f"{core.Aws.REGION}"
                   f"#/topic/"
                   f"{info_sec_ops_topic.topic_arn}"),
            description=
            "Add your email to subscription and confirm subscription")

        output_test_1 = core.CfnOutput(
            self,
            "ToGenInvalidKeyErrors",
            value=
            (f"for i in {{1..30}}; do ssh -i $RANDOM ec2-user@{output1_1.to_string()}; sleep 2; done &"
             ),
            description=
            "Generates random key names and connects to server 30 times over 60 seconds"
        )

        output_test_2 = core.CfnOutput(
            self,
            "ToGenInvalidUserErrors",
            value=
            (f"for i in {{1..30}}; do ssh ec2-user$RANDOM@{output1_1.to_string()}; sleep 2; done &"
             ),
            description=
            "Generates random user names and connects to server 30 times over 60 seconds"
        )
        """
Esempio n. 27
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        test_queue = sqs.Queue(self, 'test-queue', queue_name='test1')

        test_topic = sns.Topic(self, 'test-topic')

        sns.Subscription(self,
                         'test-subscription',
                         topic=test_topic,
                         endpoint=test_queue.queue_arn,
                         protocol=sns.SubscriptionProtocol.SQS)

        kinesis.Stream(self,
                       'test-stream',
                       stream_name='donut-sales',
                       shard_count=2)

        create_order = step.Pass(self,
                                 'create-order',
                                 result=step.Result.from_object({
                                     "Order": {
                                         "Customer": "Alice",
                                         "Product": "Coffee",
                                         "Billing": {
                                             "Price": 10.0,
                                             "Quantity": 4.0
                                         }
                                     }
                                 }))
        calculate_amount = step.Pass(self,
                                     'calculate-amount',
                                     result=step.Result.from_number(40.0),
                                     result_path='$.Order.Billing.Amount',
                                     output_path='$.Order.Billing')
        order_definition = create_order.next(calculate_amount)
        step.StateMachine(self,
                          'test-state-machine',
                          state_machine_name='order-machine',
                          definition=order_definition)

        make_tea = step.Choice(
            self, 'make-tea', comment='Input should look like {"tea":"green"}')
        green = step.Pass(self,
                          'green',
                          result=step.Result.from_string('Green tea'))
        make_tea.when(step.Condition.string_equals('$.tea', 'green'), green)
        black = step.Pass(self,
                          'black',
                          result=step.Result.from_string('Black tea'))
        make_tea.when(step.Condition.string_equals('$.tea', 'black'), black)
        orange = step.Pass(self,
                           'orange',
                           result=step.Result.from_string('Black tea'))
        make_tea.when(step.Condition.string_equals('$.tea', 'orange'), orange)
        error = step.Pass(self,
                          'error',
                          result=step.Result.from_string('Bad input'))
        make_tea.otherwise(error)
        step.StateMachine(self,
                          'test-state-machine-2',
                          state_machine_name='tea-machine',
                          definition=make_tea)
Esempio n. 28
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # Create both lambdas

        with open("lambda-submit.py", encoding="utf8") as fp:
            lambda_submit_code = fp.read()

        lambdaFn1 = lambda_.Function(
            self,
            "submitsmbatch",
            code=lambda_.InlineCode(lambda_submit_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={
            "transform_job_name":transform_job_name,
            "model_name":model_name,
            "max_concurrent":max_concurrent,
            "max_payload_size":max_payload_size,
            "s3_uri_in":s3_uri_in,
            "s3_uri_out":s3_uri_out,
            "instance_type":instance_type,
            "instance_count":instance_count,
            }
        )

        # Add perms
        lambdaFn1.add_to_role_policy(aws_iam.PolicyStatement(
            actions = ['sagemaker:CreateTransformJob',],
            resources = ['arn:aws:sagemaker:{}:{}:transform-job/{}*'.format(my_region,my_acc_id,transform_job_name),]
            ))

       
        with open("lambda-check.py", encoding="utf8") as fp:
            lambda_check_code = fp.read()

        lambdaFn2 = lambda_.Function(
            self,
            "checksmbatch",
            code=lambda_.InlineCode(lambda_check_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={"model_name":model_name, # CHANGE TO YOUR ENDPOINT NAME!!
                        "content_type":"text/csv"}
        )
        # Add perms
        lambdaFn2.add_to_role_policy(aws_iam.PolicyStatement(
            actions = ['sagemaker:DescribeTransformJob',],
            resources = ['arn:aws:sagemaker:{}:{}:transform-job/{}*'.format(my_region,my_acc_id,transform_job_name),]
            ))
        # Define state machine

        # submit_job_activity = sfn.Activity(
        #     self, "SubmitJob"
        # )
        # check_job_activity = sfn.Activity(
        #     self, "CheckJob"
        # )

        submit_job = sfn.Task(
            self, "Submit Job",
            task=sfn_tasks.InvokeFunction(lambdaFn1),
        )

        wait_x = sfn.Wait(
            self, "Wait 1 minute",
            time=sfn.WaitTime.duration(core.Duration.minutes(1)),
        )
        get_status = sfn.Task(
            self, "Get Job Status",
            task=sfn_tasks.InvokeFunction(lambdaFn2),
        )
        is_complete = sfn.Choice(
            self, "Job Complete?"
        )
        job_failed = sfn.Fail(
            self, "Job Failed",
            cause="AWS Batch Job Failed",
            error="DescribeJob returned FAILED"
        )
        final_status = sfn.Task(
            self, "Get Final Job Status",
            task=sfn_tasks.InvokeFunction(lambdaFn2),
        )

        definition = submit_job\
            .next(wait_x)\
            .next(get_status)\
            .next(is_complete
                  .when(sfn.Condition.string_equals(
                      "$.status", "Failed"), job_failed)
                  .when(sfn.Condition.string_equals(
                      "$.status", "Completed"), final_status)
                  .otherwise(wait_x))

        sfn.StateMachine(
            self, "SMbatchInference",
            definition=definition,
        )
Esempio n. 29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The start of the image pipeline
        imageBucket = aws_s3.Bucket(self, "imageBucket")

        # Capture API activity with a trail
        imageBucketTrail = aws_cloudtrail.Trail(self,
                                                "imageBucketTrail",
                                                is_multi_region_trail=False)

        # Restrict to S3 data-plane events
        imageBucketTrail.add_s3_event_selector(
            include_management_events=False,
            prefixes=[f"{imageBucket.bucket_arn}/"],
            read_write_type=aws_cloudtrail.ReadWriteType.WRITE_ONLY)

        # Filter to just PutObject and CopyObject events
        imageBucketRule = aws_events.Rule(
            self,
            "imageBucketRule",
            event_pattern={
                "source": ["aws.s3"],
                "detail": {
                    "eventSource": ["s3.amazonaws.com"],
                    "eventName": ["PutObject", "CopyObject"],
                    "requestParameters": {
                        "bucketName": [imageBucket.bucket_name]
                    }
                }
            })

        #--
        #  Lambda Layers
        #--------------------#

        opencvLayer = aws_lambda.LayerVersion(
            self,
            'opencvLayer',
            code=aws_lambda.AssetCode('layers/opencvLayer'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_6])

        boto3Layer = aws_lambda.LayerVersion(
            self,
            'boto3Layer',
            code=aws_lambda.AssetCode('layers/boto3Layer'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_6])

        #--
        #  Lambda Functions
        #--------------------#

        # Gather info about an image, name, extension, etc
        getImageInfoFunc = aws_lambda.Function(
            self,
            "getImageInfoFunc",
            code=aws_lambda.AssetCode('functions/getImageInfoFunc'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6)

        # The home for the website
        webBucket = aws_s3.Bucket(self,
                                  "webBucket",
                                  website_index_document='index.html')

        # Copy the image to the web bucket
        copyImageFunc = aws_lambda.Function(
            self,
            "copyImageFunc",
            code=aws_lambda.AssetCode('functions/copyImageFunc'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            layers=[boto3Layer],
            environment={
                'OUTPUTBUCKET': webBucket.bucket_name,
                'OUTPUTPREFIX': 'images/'
            })

        # Grant permissions to read from the source and write to the desination
        imageBucket.grant_read(copyImageFunc)
        webBucket.grant_write(copyImageFunc)

        # Create a thumbnail of the image and place in the web bucket
        createThumbnailFunc = aws_lambda.Function(
            self,
            "createThumbnailFunc",
            code=aws_lambda.AssetCode('functions/createThumbnailFunc'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            layers=[boto3Layer, opencvLayer],
            timeout=core.Duration.seconds(10),
            memory_size=256,
            environment={
                'OUTPUTBUCKET': webBucket.bucket_name,
                'OUTPUTPREFIX': 'images/'
            })

        # Grant permissions to read from the source and write to the desination
        imageBucket.grant_read(createThumbnailFunc)
        webBucket.grant_write(createThumbnailFunc)

        # Store page information
        pageTable = aws_dynamodb.Table(
            self,
            'pageTable',
            partition_key={
                'name': 'pageName',
                'type': aws_dynamodb.AttributeType.STRING
            },
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
            stream=aws_dynamodb.StreamViewType.NEW_IMAGE)

        # Save page and image information
        updatePageInfoFunc = aws_lambda.Function(
            self,
            "updatePageInfoFunc",
            code=aws_lambda.AssetCode('functions/updatePageInfoFunc'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            layers=[boto3Layer],
            environment={
                'PAGETABLE': pageTable.table_name,
                'PAGEPREFIX': 'posts/'
            })

        # Grant permissions to write to the page table
        pageTable.grant_write_data(updatePageInfoFunc)

        imagePipelineDone = aws_stepfunctions.Succeed(self,
                                                      "Done processing image")

        updatePageInfoJob = aws_stepfunctions.Task(
            self,
            'Update page info',
            task=aws_stepfunctions_tasks.InvokeFunction(updatePageInfoFunc))
        updatePageInfoJob.next(imagePipelineDone)

        copyImageJob = aws_stepfunctions.Task(
            self,
            'Copy image',
            task=aws_stepfunctions_tasks.InvokeFunction(copyImageFunc))

        createThumbnailJob = aws_stepfunctions.Task(
            self,
            'Create thumbnail',
            task=aws_stepfunctions_tasks.InvokeFunction(createThumbnailFunc))

        # These tasks can be done in parallel
        processImage = aws_stepfunctions.Parallel(self,
                                                  'Process image',
                                                  result_path="$.images")

        processImage.branch(copyImageJob)
        processImage.branch(createThumbnailJob)
        processImage.next(updatePageInfoJob)

        # Results of file extension check
        notPng = aws_stepfunctions.Succeed(self, "Not a PNG")

        # Verify the file extension
        checkForPng = aws_stepfunctions.Choice(self, 'Is a PNG?')
        checkForPng.when(
            aws_stepfunctions.Condition.string_equals('$.extension', 'png'),
            processImage)
        checkForPng.otherwise(notPng)

        # A single image pipeline job for testing
        getImageInfoJob = aws_stepfunctions.Task(
            self,
            'Get image info',
            task=aws_stepfunctions_tasks.InvokeFunction(getImageInfoFunc))
        getImageInfoJob.next(checkForPng)

        # Configure the image pipeline and starting state
        imagePipeline = aws_stepfunctions.StateMachine(
            self, "imagePipeline", definition=getImageInfoJob)

        # Matching events start the image pipline
        imageBucketRule.add_target(
            aws_events_targets.SfnStateMachine(
                imagePipeline,
                input=aws_events.RuleTargetInput.from_event_path(
                    "$.detail.requestParameters")))
    def __init__(self, app: App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # Lambda Handlers Definitions

        submit_lambda = _lambda.Function(
            self,
            'submitLambda',
            handler='lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_9,
            code=_lambda.Code.from_asset('lambdas/submit'))

        status_lambda = _lambda.Function(
            self,
            'statusLambda',
            handler='lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_9,
            code=_lambda.Code.from_asset('lambdas/status'))

        # Step functions Definition

        submit_job = _aws_stepfunctions_tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            output_path="$.Payload",
        )

        wait_job = _aws_stepfunctions.Wait(
            self,
            "Wait 30 Seconds",
            time=_aws_stepfunctions.WaitTime.duration(Duration.seconds(30)))

        status_job = _aws_stepfunctions_tasks.LambdaInvoke(
            self,
            "Get Status",
            lambda_function=status_lambda,
            output_path="$.Payload",
        )

        fail_job = _aws_stepfunctions.Fail(self,
                                           "Fail",
                                           cause='AWS Batch Job Failed',
                                           error='DescribeJob returned FAILED')

        succeed_job = _aws_stepfunctions.Succeed(
            self, "Succeeded", comment='AWS Batch Job succeeded')

        # Create Chain

        definition = submit_job.next(wait_job)\
            .next(status_job)\
            .next(_aws_stepfunctions.Choice(self, 'Job Complete?')
                  .when(_aws_stepfunctions.Condition.string_equals('$.status', 'FAILED'), fail_job)
                  .when(_aws_stepfunctions.Condition.string_equals('$.status', 'SUCCEEDED'), succeed_job)
                  .otherwise(wait_job))

        # Create state machine
        sm = _aws_stepfunctions.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=Duration.minutes(5),
        )