Пример #1
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        flip_coin_function = lambda_.Function(
            self,
            "FlipCoinFunction",
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="index.handler",
            code=lambda_.Code.from_asset("./sfn/lambda/flip_coin"))

        flip_coin_invoke = tasks.LambdaInvoke(
            self, "FlipCoin", lambda_function=flip_coin_function)

        wait = stepfunctions.Wait(self,
                                  "Wait",
                                  time=stepfunctions.WaitTime.duration(
                                      core.Duration.seconds(5)))

        tails_result = stepfunctions.Pass(self, "TailsResult")
        tails_result.next(flip_coin_invoke)

        choice = stepfunctions.Choice(self,
                                      "HeadsTailsChoice") \
            .when(condition=stepfunctions.Condition.string_equals("$.Payload.result", "heads"),
                  next=stepfunctions.Succeed(self, "HeadsResult")) \
            .when(condition=stepfunctions.Condition.string_equals("$.Payload.result", "tails"),
                  next=tails_result)

        stepfunctions.StateMachine(self,
                                   "StateMachine",
                                   definition=flip_coin_invoke.next(
                                       wait.next(choice)))
    def __init__(self, scope: core.App, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pass_through_lambda = _lambda.Function(
            self,
            'PassThroughLambda',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='pass_through_lambda.handler')

        loop_count_lambda = _lambda.Function(
            self,
            'LoopCountLambda',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='loop_count_lambda.handler')

        start_state_machine = sfn.Task(
            self,
            "Start CodeBuild Lambda",
            task=sfn_tasks.InvokeFunction(pass_through_lambda))

        wait_x = sfn.Wait(
            self,
            "Wait X Seconds",
            time=sfn.WaitTime.seconds_path('$.wait_time'),
        )

        get_state_machine_status = sfn.Task(
            self,
            "Get Build Status",
            task=sfn_tasks.InvokeFunction(loop_count_lambda))

        is_complete = sfn.Choice(self, "Job Complete?")

        state_machine_failed = sfn.Fail(self,
                                        "Build Failed",
                                        cause="AWS Batch Job Failed",
                                        error="DescribeJob returned FAILED")

        state_machine_success = sfn.Pass(self, "Build Successs")

        definition = start_state_machine\
            .next(wait_x)\
            .next(get_state_machine_status)\
            .next(is_complete
                  .when(sfn.Condition.string_equals(
                      "$.status", "FAILED"), state_machine_failed)
                  .when(sfn.Condition.string_equals(
                      "$.status", "SUCCEEDED"), state_machine_success)
                  .otherwise(wait_x))

        sfn.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=core.Duration.seconds(60),
        )
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 polling_delay: int = 5,
                 statemachine_timeout: int = 300,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        state_fn = StateHandlerLambda(self, "config-state-handler").function
        config_fn = AccountConfigLambda(self,
                                        "account-config-handler").function

        config_state = tasks.LambdaInvoke(self,
                                          "Set Configuring State",
                                          lambda_function=state_fn,
                                          output_path="$.Payload")

        completed_state = tasks.LambdaInvoke(self,
                                             "Set Completed State",
                                             lambda_function=state_fn,
                                             output_path="$.Payload")

        config_task = tasks.LambdaInvoke(self,
                                         "Request Account Configuration",
                                         lambda_function=config_fn,
                                         output_path="$.Payload")

        polling_task = tasks.LambdaInvoke(self,
                                          "Poll Account Configuration",
                                          lambda_function=config_fn,
                                          output_path="$.Payload")

        delay = sfn.Wait(self,
                         "Delay Polling",
                         time=sfn.WaitTime.duration(
                             core.Duration.seconds(polling_delay)))

        is_ready = sfn.Choice(self, "Account Ready?")
        acct_ready = sfn.Condition.string_equals('$.state', "READY")
        acct_pending = sfn.Condition.string_equals('$.state', "PENDING")
        success = sfn.Succeed(self, "Config Succeeded")

        failed = sfn.Fail(self,
                          "Config Failed",
                          cause="Bad value in Polling loop")
        # this is the loop which polls for state change, either looping back to delay or setting completion state and finishing
        is_ready.when(acct_pending, delay).when(
            acct_ready, completed_state.next(success)).otherwise(failed)
        # this is the main chain starting with creation request a delay and then polling loop
        config_chain = config_task.next(config_state).next(delay).next(
            polling_task).next(is_ready)

        self.state_machine = sfn.StateMachine(
            self,
            "Account-Config-StateMachine",
            definition=config_chain,
            timeout=core.Duration.seconds(statemachine_timeout))
Пример #4
0
    def __init__(self, app: cdk.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        submit_job_activity = sfn.Activity(
            self, "SubmitJob"
        )
        check_job_activity = sfn.Activity(
            self, "CheckJob"
        )

        submit_job = sfn.Task(
            self, "Submit Job",
            task=sfn_tasks.InvokeActivity(submit_job_activity),
            result_path="$.guid",
        )
        wait_x = sfn.Wait(
            self, "Wait X Seconds",
            duration=sfn.WaitDuration.seconds_path('$.wait_time'),
        )
        get_status = sfn.Task(
            self, "Get Job Status",
            task=sfn_tasks.InvokeActivity(check_job_activity),
            input_path="$.guid",
            result_path="$.status",
        )
        is_complete = sfn.Choice(
            self, "Job Complete?"
        )
        job_failed = sfn.Fail(
            self, "Job Failed",
            cause="AWS Batch Job Failed",
            error="DescribeJob returned FAILED"
        )
        final_status = sfn.Task(
            self, "Get Final Job Status",
            task=sfn_tasks.InvokeActivity(check_job_activity),
            input_path="$.guid",
        )

        definition = submit_job\
            .next(wait_x)\
            .next(get_status)\
            .next(is_complete
                  .when(sfn.Condition.string_equals(
                      "$.status", "FAILED"), job_failed)
                  .when(sfn.Condition.string_equals(
                      "$.status", "SUCCEEDED"), final_status)
                  .otherwise(wait_x))

        sfn.StateMachine(
            self, "StateMachine",
            definition=definition,
            timeout_sec=30,
        )
Пример #5
0
    def __init__(self, scope: Construct, id: str, functions: LambdaLib, **kwargs) -> None:
        super().__init__(scope, id)

        # Step Function
        submit_job = tasks.LambdaInvoke(self, "Submit Job",
            lambda_function=functions.send_email_approval,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
            result_path=sfn.JsonPath.DISCARD
        )

        wait_x = sfn.Wait(self, "Wait",
            time= sfn.WaitTime.duration(Duration.minutes(2))
        )

        get_status = tasks.LambdaInvoke(self, "Get Job Status",
            lambda_function=functions.check_status_dynamo,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
            result_path="$.status"
        )

        restrict_es = tasks.LambdaInvoke(self, "Restric ES Policy",
            lambda_function=functions.restric_es_policy,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
        )

        restrict_rds = tasks.LambdaInvoke(self, "Restric RDS",
            lambda_function=functions.restric_rds_policy,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
        )

        restrict_es_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_ES_PUBLIC)
        restrict_rds_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_RDS_PUBLIC)

        definition = (submit_job.next(wait_x)
                                .next(get_status)
                                .next(sfn.Choice(self, "Job Complete?")
                                .when(sfn.Condition.string_equals("$.status.Payload.status", "Rejected!"), wait_x)
                                # .when(sfn.Condition.string_equals("$.status.Payload.status", "NON_COMPLIANT"), final_task)
                                # .when(sfn.Condition.string_equals("$.status.Payload.status", "Accepted!"), final_task))
                                .otherwise(sfn.Choice(self, "Remediation Choice")
                                .when(restrict_es_condition, restrict_es)
                                .when(restrict_rds_condition, restrict_rds)))
                                )


        self.state_machine = sfn.StateMachine(self, "StateMachine",
            definition=definition,
            timeout=Duration.hours(2)
        )
    def __init__(self, app: App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # Lambda Handlers Definitions

        submit_lambda = _lambda.Function(
            self,
            'submitLambda',
            handler='lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_9,
            code=_lambda.Code.from_asset('lambdas/submit'))

        status_lambda = _lambda.Function(
            self,
            'statusLambda',
            handler='lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_9,
            code=_lambda.Code.from_asset('lambdas/status'))

        # Step functions Definition

        submit_job = _aws_stepfunctions_tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            output_path="$.Payload",
        )

        wait_job = _aws_stepfunctions.Wait(
            self,
            "Wait 30 Seconds",
            time=_aws_stepfunctions.WaitTime.duration(Duration.seconds(30)))

        status_job = _aws_stepfunctions_tasks.LambdaInvoke(
            self,
            "Get Status",
            lambda_function=status_lambda,
            output_path="$.Payload",
        )

        fail_job = _aws_stepfunctions.Fail(self,
                                           "Fail",
                                           cause='AWS Batch Job Failed',
                                           error='DescribeJob returned FAILED')

        succeed_job = _aws_stepfunctions.Succeed(
            self, "Succeeded", comment='AWS Batch Job succeeded')

        # Create Chain

        definition = submit_job.next(wait_job)\
            .next(status_job)\
            .next(_aws_stepfunctions.Choice(self, 'Job Complete?')
                  .when(_aws_stepfunctions.Condition.string_equals('$.status', 'FAILED'), fail_job)
                  .when(_aws_stepfunctions.Condition.string_equals('$.status', 'SUCCEEDED'), succeed_job)
                  .otherwise(wait_job))

        # Create state machine
        sm = _aws_stepfunctions.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=Duration.minutes(5),
        )
Пример #7
0
 def _wait(self, path:str="$.wait_time"):
     return sfn.Wait(
         self, 'waiter',
         time=sfn.WaitTime.seconds_path(path=path)
     )
Пример #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.vpc = aws_ec2.Vpc(self,
                               "demo-stepfunctions",
                               cidr="10.100.0.0/16",
                               max_azs=2,
                               nat_gateways=0,
                               subnet_configuration=[
                                   aws_ec2.SubnetConfiguration(
                                       name='demo-stepfunctions',
                                       subnet_type=aws_ec2.SubnetType.ISOLATED,
                                       cidr_mask=24)
                               ])
        lambda_role = iam.Role(
            self,
            'demo-lambda-role',
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))

        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaENIManagementAccess'))
        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))

        fn_submit = lambda_.Function(
            self,
            'demo-sfn-submit',
            function_name='demo-sfn-submit',
            handler='handler.do',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset('./craftaws/func_submit'),
            role=lambda_role,
            timeout=core.Duration.seconds(900),
            allow_public_subnet=False,
            vpc=self.vpc,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED),
            environment={})

        fn_job_1 = lambda_.Function(
            self,
            'demo-sfn-job1',
            function_name='demo-sfn-job1',
            handler='handler.do',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset('./craftaws/func_job_1'),
            role=lambda_role,
            timeout=core.Duration.seconds(900),
            allow_public_subnet=False,
            vpc=self.vpc,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED),
            environment={})

        fn_job_2 = lambda_.Function(
            self,
            'demo-sfn-job2',
            function_name='demo-sfn-job2',
            handler='handler.do',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset('./craftaws/func_job_2'),
            role=lambda_role,
            timeout=core.Duration.seconds(900),
            allow_public_subnet=False,
            vpc=self.vpc,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED),
            environment={})

        submit_job = tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=fn_submit,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        step_1_job = tasks.LambdaInvoke(
            self,
            "Job_1",
            lambda_function=fn_job_1,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        wait_x = sfn.Wait(self,
                          "Wait X Seconds",
                          time=sfn.WaitTime.duration(
                              core.Duration.seconds(60)))

        step_2_job = tasks.LambdaInvoke(
            self,
            "Job_2",
            lambda_function=fn_job_1,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        job_succeed = sfn.Succeed(self,
                                  "Job Succeed",
                                  comment="AWS Batch Job Succeed")

        definition = submit_job.next(step_1_job).next(wait_x).next(
            step_2_job).next(job_succeed)

        sfn.StateMachine(self,
                         "StateMachine",
                         definition=definition,
                         timeout=core.Duration.minutes(5))
Пример #9
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # Create both lambdas

        with open("lambda-submit.py", encoding="utf8") as fp:
            lambda_submit_code = fp.read()

        lambdaFn1 = lambda_.Function(
            self,
            "submitsmbatch",
            code=lambda_.InlineCode(lambda_submit_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={
            "transform_job_name":transform_job_name,
            "model_name":model_name,
            "max_concurrent":max_concurrent,
            "max_payload_size":max_payload_size,
            "s3_uri_in":s3_uri_in,
            "s3_uri_out":s3_uri_out,
            "instance_type":instance_type,
            "instance_count":instance_count,
            }
        )

        # Add perms
        lambdaFn1.add_to_role_policy(aws_iam.PolicyStatement(
            actions = ['sagemaker:CreateTransformJob',],
            resources = ['arn:aws:sagemaker:{}:{}:transform-job/{}*'.format(my_region,my_acc_id,transform_job_name),]
            ))

       
        with open("lambda-check.py", encoding="utf8") as fp:
            lambda_check_code = fp.read()

        lambdaFn2 = lambda_.Function(
            self,
            "checksmbatch",
            code=lambda_.InlineCode(lambda_check_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={"model_name":model_name, # CHANGE TO YOUR ENDPOINT NAME!!
                        "content_type":"text/csv"}
        )
        # Add perms
        lambdaFn2.add_to_role_policy(aws_iam.PolicyStatement(
            actions = ['sagemaker:DescribeTransformJob',],
            resources = ['arn:aws:sagemaker:{}:{}:transform-job/{}*'.format(my_region,my_acc_id,transform_job_name),]
            ))
        # Define state machine

        # submit_job_activity = sfn.Activity(
        #     self, "SubmitJob"
        # )
        # check_job_activity = sfn.Activity(
        #     self, "CheckJob"
        # )

        submit_job = sfn.Task(
            self, "Submit Job",
            task=sfn_tasks.InvokeFunction(lambdaFn1),
        )

        wait_x = sfn.Wait(
            self, "Wait 1 minute",
            time=sfn.WaitTime.duration(core.Duration.minutes(1)),
        )
        get_status = sfn.Task(
            self, "Get Job Status",
            task=sfn_tasks.InvokeFunction(lambdaFn2),
        )
        is_complete = sfn.Choice(
            self, "Job Complete?"
        )
        job_failed = sfn.Fail(
            self, "Job Failed",
            cause="AWS Batch Job Failed",
            error="DescribeJob returned FAILED"
        )
        final_status = sfn.Task(
            self, "Get Final Job Status",
            task=sfn_tasks.InvokeFunction(lambdaFn2),
        )

        definition = submit_job\
            .next(wait_x)\
            .next(get_status)\
            .next(is_complete
                  .when(sfn.Condition.string_equals(
                      "$.status", "Failed"), job_failed)
                  .when(sfn.Condition.string_equals(
                      "$.status", "Completed"), final_status)
                  .otherwise(wait_x))

        sfn.StateMachine(
            self, "SMbatchInference",
            definition=definition,
        )
Пример #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        CLUSTER_NAME = self.node.try_get_context("cluster_name")
        NOTIFY_EMAIL = self.node.try_get_context("notify_email")
        SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url")

        if not CLUSTER_NAME or not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL:
            logger.error(
                f"Required context variables for {id} were not provided!")
        else:
            # Get the log group of our postgres instance
            log_group = logs.LogGroup.from_log_group_name(
                self,
                "InAur01DetectionLogGroup",
                f"/aws/rds/cluster/{CLUSTER_NAME}/postgresql",
            )

            # Create new metric
            metric = cloudwatch.Metric(
                namespace="LogMetrics",
                metric_name="InAur01DetectionFailedDbLoginAttempts",
            )

            # Apply metric filter
            # Filter all metrics of failed login attempts in log
            logs.MetricFilter(
                self,
                "InAur01DetectionMetricFilter",
                log_group=log_group,
                metric_namespace=metric.namespace,
                metric_name=metric.metric_name,
                filter_pattern=logs.FilterPattern.all_terms(
                    "FATAL:  password authentication failed for user"),
                metric_value="1",
            )

            # Create new SNS topic
            topic = sns.Topic(self, "InAur01DetectionTopic")

            # Add email subscription
            topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL))

            # Create new alarm for metric
            # Alarm will trigger if there is >= 10 failed login attempts
            # over a period of 30 seconds.
            alarm = cloudwatch.Alarm(
                self,
                "InAur01DetectionAlarm",
                metric=metric,
                threshold=10,
                evaluation_periods=1,
                period=core.Duration.seconds(30),
                datapoints_to_alarm=1,
                statistic="sum",
            )

            # Add SNS action to alarm
            alarm.add_alarm_action(cw_actions.SnsAction(topic))

            # Create unban lambda
            lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                           "in_aur_01")
            unban_lambda = _lambda.Function(
                self,
                "InAur01ResponseUnbanFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="unban_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
            )
            # Assign EC2 permissions to lambda
            unban_lambda.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["ec2:DeleteNetworkAclEntry"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))

            # Create stepfunction
            # Define a second state machine to unban the blacklisted IP after 1 hour
            wait_step = sfn.Wait(
                self,
                "InAur01ResponseStepWait",
                time=sfn.WaitTime.duration(core.Duration.hours(1)),
            )
            unban_step = sfn.Task(
                self,
                "InAur01ResponseStepUnban",
                task=tasks.RunLambdaTask(
                    unban_lambda,
                    integration_pattern=sfn.ServiceIntegrationPattern.
                    FIRE_AND_FORGET,
                ),
                parameters={"Payload.$": "$"},
            )
            statemachine = sfn.StateMachine(
                self,
                "InAur01ResponseUnbanStateMachine",
                definition=wait_step.next(unban_step),
                timeout=core.Duration.hours(1.5),
            )

            # Create lambda function
            lambda_func = _lambda.Function(
                self,
                "InAur01ResponseFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="response_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                environment={
                    "webhook_url": SLACK_WEBHOOK_URL,
                    "unban_sm_arn": statemachine.state_machine_arn,
                    "cluster_name": CLUSTER_NAME,
                },
            )
            # AWS CDK has a bug where it would not add the correct permission
            # to the lambda for Cloudwatch log subscription to invoke it.
            # Hence, we need to manually add permission to lambda.
            lambda_func.add_permission(
                "InAur01ResponseFunctionInvokePermission",
                principal=iam.ServicePrincipal("logs.amazonaws.com"),
                action="lambda:InvokeFunction",
                source_arn=log_group.log_group_arn + ":*",
            )
            # Assign permissions to response lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "states:StartExecution",
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=[statemachine.state_machine_arn],
                ))
            # Assign RDS Read-only permissions to lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["rds:Describe*"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))
            # Assign EC2 permissions to lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "ec2:Describe*",
                        "ec2:CreateNetworkAclEntry",
                        "ec2:DeleteNetworkAclEntry",
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))
            # Assign CloudWatch logs permissions to lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "cloudwatch:Get*",
                        "cloudwatch:Describe*",
                        "logs:FilterLogEvents",
                        "logs:DescribeMetricFilters",
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))

            sns_event_source = lambda_event_sources.SnsEventSource(topic)
            lambda_func.add_event_source(sns_event_source)
Пример #11
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        ####################################################################################
        # IoT Events

        # IoT Events: Execution role
        iot_events_execution_role = iam.Role(
            self,
            "IoTEventsExecutionRole",
            assumed_by=iam.ServicePrincipal("iotevents.amazonaws.com"))
        iot_events_execution_role.add_to_policy(
            iam.PolicyStatement(resources=["*"], actions=["iot:Publish"]))
        iot_events_execution_role.add_to_policy(
            iam.PolicyStatement(resources=["*"], actions=["SNS:Publish"]))

        # IoT Events: Input
        inputDefinitionProperty = iotevents.CfnInput.InputDefinitionProperty(
            attributes=[{
                "jsonPath": "gatewayid"
            }, {
                "jsonPath": "last_uplink_received_timestamp_ms"
            }, {
                "jsonPath": "last_connection_status"
            }, {
                "jsonPath": "timestamp_iso8601"
            }])

        iot_events_input = iotevents.CfnInput(
            self,
            "LoRaWANGatewayConnectivityStatusInput",
            input_definition=inputDefinitionProperty,
            input_name="LoRaWANGatewayConnectivityStatusInput",
            input_description=
            "Input for connectivity status updates for LoRaWAN gateways")
        # IoT Events: Detector Model
        detector_model_definition = iotevents.CfnDetectorModel.DetectorModelDefinitionProperty(
            initial_state_name=lorawan_gateway_monitoring_detectormodel.
            initial_state_name,
            states=lorawan_gateway_monitoring_detectormodel.get_states(self))

        iot_events_model = iotevents.CfnDetectorModel(
            self,
            "LoRaWANGatewayConnectivityModel",
            detector_model_definition=detector_model_definition,
            detector_model_name="LoRaWANGatewayConnectivityModel",
            detector_model_description=
            "Detector model for LoRaWAN gateway connectivity status",
            key="gatewayid",
            evaluation_method="BATCH",
            role_arn=iot_events_execution_role.role_arn)

        ####################################################################################
        # Lambda function GetWirelessGatewayStatisticsLambda

        # Lambda function GetWirelessGatewayStatisticsLambda: Execution Role
        get_wireless_gateway_statistics_lambda_role = iam.Role(
            self,
            "GetWirelessGatewayStatisticsLambdaExecutionRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))
        get_wireless_gateway_statistics_lambda_role.add_to_policy(
            iam.PolicyStatement(resources=[
                "arn:aws:iotwireless:" + self.region + ":" + self.account +
                ":WirelessGateway/*"
            ],
                                actions=[
                                    "iotwireless:ListWirelessGateways",
                                    "iotwireless:GetWirelessGatewayStatistics"
                                ]))
        get_wireless_gateway_statistics_lambda_role.add_to_policy(
            iam.PolicyStatement(resources=[
                "arn:aws:iotevents:" + self.region + ":" + self.account +
                ":input/LoRaWANGatewayConnectivityStatusInput"
            ],
                                actions=["iotevents:BatchPutMessage"]))

        # Lambda function GetWirelessGatewayStatisticsLambda: Lambda function configuration
        get_wireless_gateway_statistics_lambda = lambda_.Function(
            self,
            "GetWirelessGatewayStatisticsLambda",
            code=lambda_.Code.asset(
                "src_get_wireless_gateway_statistics_lambda"),
            runtime=lambda_.Runtime.PYTHON_3_7,
            handler="lambda.handler",
            role=get_wireless_gateway_statistics_lambda_role,
            timeout=cdk.Duration.seconds(25))

        get_wireless_gateway_statistics_lambda.add_environment(
            "TEST_MODE", "true")

        get_wireless_gateway_statistics_lambda.add_environment(
            "IOT_EVENTS_INPUT_NAME", "LoRaWANGatewayConnectivityStatusInput")

        ####################################################################################
        # SNS topic
        sns_topic = sns.Topic(
            self,
            "LoRaWANGatewayNotificationTopic",
            display_name=
            "Topic to use for notifications about LoRaWAN gateway events like connect or disconnect",
            topic_name="LoRaWANGatewayNotificationTopic")

        email_address = cdk.CfnParameter(self, "emailforalarms")
        sns_topic.add_subscription(
            subscriptions.EmailSubscription(email_address.value_as_string))

        ####################################################################################
        # Step Function

        # State 'Fail'
        failure_state = sfn.Fail(self, "Fail")

        # State 'Wait'
        wait_state = sfn.Wait(self,
                              "Sleep",
                              time=sfn.WaitTime.duration(
                                  cdk.Duration.minutes(4)))

        # State 'Ingest gateway connectivity status into IoT Events input'
        lambda_invoke_state = tasks.LambdaInvoke(
            self,
            "Ingest gateway connectivity status into IoT Events input",
            result_path="$.wireless_gateway_stats",
            lambda_function=get_wireless_gateway_statistics_lambda
            # payload=task_input_payload
        )

        # Stat 'Did IoT events ingestion run successfull?'
        choice_lambda_state = sfn.Choice(
            self, "Did IoT events ingestion run successfull?")
        choice_lambda_state.when(
            sfn.Condition.number_equals(
                "$.wireless_gateway_stats.Payload.status", 200), wait_state)
        choice_lambda_state.otherwise(failure_state)

        # Define transitions
        wait_state.next(lambda_invoke_state)
        lambda_invoke_state.next(choice_lambda_state)

        # Crreate a state machine
        gateway_watchdog_state_machine = sfn.StateMachine(
            self,
            "LoRaWANGatewayWatchdogStatemachine",
            definition=lambda_invoke_state,
            state_machine_name="LoRaWANGatewayWatchdogStatemachine")
        ####################################################################################
        # CloudFormation Stack outputs

        cdk.CfnOutput(
            self,
            "StateMachineARN",
            value=gateway_watchdog_state_machine.state_machine_arn,
            description=
            "Please run 'aws stepfunctions start-execution --state-machine-arn  <LorawanConnectivityWatchdogStack.StateMachineARN>' to start the monitoring of LoRaWAN gateway connectivity",
        )

        cdk.CfnOutput(
            self,
            "StateMachineStartCommand",
            value='aws stepfunctions start-execution --state-machine-arn ' +
            gateway_watchdog_state_machine.state_machine_arn,
            description=
            "Please run this command to start the monitoring of LoRaWAN gateway connectivity",
        )

        cdk.CfnOutput(
            self,
            "StateMachineStopommand",
            value='aws stepfunctions stop-execution --state-machine-arn ' +
            gateway_watchdog_state_machine.state_machine_arn,
            description=
            "Please run this command to stop the monitoring of LoRaWAN gateway connectivity",
        )
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        submit_lambda = PythonFunction(self,
                                       'Submit',
                                       handler='handler',
                                       index='submit.py',
                                       entry=os.path.join(
                                           os.getcwd(), 'lambdas'),
                                       runtime=lambda_.Runtime.PYTHON_3_8)

        get_status_lambda = PythonFunction(self,
                                           'Status',
                                           handler='handler',
                                           index='status.py',
                                           entry=os.path.join(
                                               os.getcwd(), 'lambdas'),
                                           runtime=lambda_.Runtime.PYTHON_3_8)

        submit_job = tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        wait_x = sfn.Wait(self,
                          "Wait X Seconds",
                          time=sfn.WaitTime.seconds_path("$.waitSeconds"))

        get_status = tasks.LambdaInvoke(
            self,
            "Get Job Status",
            lambda_function=get_status_lambda,
            # Pass just the field named "guid" into the Lambda, put the
            # Lambda's result in a field called "status" in the response
            output_path="$.Payload")

        job_failed = sfn.Fail(self,
                              "Job Failed",
                              cause="AWS Batch Job Failed",
                              error="DescribeJob returned FAILED")

        final_status = tasks.LambdaInvoke(
            self,
            "Get Final Job Status",
            lambda_function=get_status_lambda,
            # Use "guid" field as input
            output_path="$.Payload")

        definition = submit_job.next(wait_x).next(get_status).next(
            sfn.Choice(self, "Job Complete?").when(
                sfn.Condition.string_equals("$.status", "FAILED"),
                job_failed).when(
                    sfn.Condition.string_equals("$.status", "SUCCEEDED"),
                    final_status).otherwise(wait_x))

        sfn.StateMachine(self,
                         "StateMachine",
                         definition=definition,
                         timeout=cdk.Duration.minutes(5))
    def __init__(self, scope: core.Construct, id: str, QueueDefine="default",TaskDefine="default",LambdaDefine="default", SNSDefine="default",**kwargs):
        super().__init__(scope, id, **kwargs)

        self.Job_String_Split = _sfn.Task(
            self,"String_Split",
            input_path = "$.TaskInfo",
            result_path = "$.JobDetail.String_Split",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Split",
                job_definition = TaskDefine.getTaskDefine("String_Split"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Split.OUTPUT_KEY"),
                        "SPLIT_NUM":_sfn.Data.string_at("$.JobParameter.String_Split.SPLIT_NUM")
                    }
                )
            )
        )
        
        self.Job_Map = _sfn.Task(
            self,"Job_Map",
            input_path = "$.TaskInfo",
            result_path = "$.TaskInfo.JobDetail.Job_Map",
            output_path = "$",
            task = _sfn_tasks.RunLambdaTask(LambdaDefine.getLambdaFunction("Get_Job_List")),
        )
        
        self.Job_String_Reverse = _sfn.Task(
            self,"String_Reverse",
            input_path = "$",
            result_path = "$",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Reverse",
                job_definition = TaskDefine.getTaskDefine("String_Reverse"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "INDEX":_sfn.Data.string_at("$.INDEX"),
                        "INPUT_BUCKET":_sfn.Data.string_at("$.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.INPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.String_Reverse.OUTPUT_KEY")
                    }
                )
            )
        )
        
        self.Job_String_Repeat = _sfn.Task(
            self,"String_Repeat",
            input_path = "$",
            result_path = "$",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Repeat",
                job_definition = TaskDefine.getTaskDefine("String_Repeat"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "INDEX":_sfn.Data.string_at("$.INDEX"),
                        "INPUT_BUCKET":_sfn.Data.string_at("$.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.INPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.String_Repeat.OUTPUT_KEY")
                    }
                )
            )
        )
        
        self.Job_String_Process_Repeat = _sfn.Map(
            self, "String_Process_Repeat",
            max_concurrency=50,
            input_path = "$.TaskInfo.JobDetail.Job_Map",
            result_path = "DISCARD",
            items_path = "$.Payload",
            output_path = "$",
        ).iterator(self.Job_String_Repeat)
        
        self.Job_String_Repeat_Merge = _sfn.Task(
            self,"String_Repeat_Merge",
            input_path = "$.TaskInfo",
            result_path = "DISCARD",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Repeat_Merge",
                job_definition = TaskDefine.getTaskDefine("String_Merge"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "PERFIX":_sfn.Data.string_at("$.JobParameter.String_Repeat.Prefix"),
                        "FILE_NAME":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"),
                        "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Repeat.OUTPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Repeat.OUTPUT_KEY")
                    }
                )
            )
        )
        
        self.Job_String_Process_Repeat.next(self.Job_String_Repeat_Merge)
        
        self.Job_String_Process_Reverse = _sfn.Map(
            self, "String_Process_Reverse",
            max_concurrency=50,
            input_path = "$.TaskInfo.JobDetail.Job_Map",
            result_path = "DISCARD",
            items_path = "$.Payload",
            output_path = "$",
        ).iterator(self.Job_String_Reverse)
        
        self.Job_String_Reverse_Merge = _sfn.Task(
            self,"String_Reverse_Merge",
            input_path = "$.TaskInfo",
            result_path = "DISCARD",
            output_path = "$",
            task = _sfn_tasks.RunBatchJob(
                job_name = "String_Reverse_Merge",
                job_definition = TaskDefine.getTaskDefine("String_Merge"),
                job_queue = QueueDefine.getComputeQueue("ComputeQueue"),
                container_overrides = _sfn_tasks.ContainerOverrides(
                    environment = {
                        "PERFIX":_sfn.Data.string_at("$.JobParameter.String_Reverse.Prefix"),
                        "FILE_NAME":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"),
                        "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"),
                        "INPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Reverse.OUTPUT_KEY"),
                        "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"),
                        "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Reverse.OUTPUT_KEY")
                    }
                )
            )
        )
        
        self.Job_String_Process_Reverse.next(self.Job_String_Reverse_Merge)

        self.Job_Parallel_Process = _sfn.Parallel(
            self,
            'Parallel_Process',
            input_path = "$",
            result_path = "DISCARD"
        )
        
        self.Job_Parallel_Process.branch(self.Job_String_Process_Repeat)
        self.Job_Parallel_Process.branch(self.Job_String_Process_Reverse)
        
        self.Job_Check_Output = _sfn.Task(
            self,"Check_Output",
            input_path = "$.TaskInfo",
            
            result_path = "$.JobDetail.Check_Output",
            output_path = "$.JobDetail.Check_Output.Payload",
            task = _sfn_tasks.RunLambdaTask(LambdaDefine.getLambdaFunction("Get_Output_size")),
        )
        
        self.Job_Is_Complete = _sfn.Choice(
            self, "Is_Complete",
            input_path = "$.TaskInfo",
            output_path = "$"
        )
        
        self.Job_Finish = _sfn.Wait(
            self, "Finish",
            time = _sfn.WaitTime.duration(core.Duration.seconds(5))
        )
        
        self.Job_Notification = _sfn.Task(self, "Notification",
            input_path = "$.TaskInfo",
            result_path = "DISCARD",
            output_path = "$",
            task = _sfn_tasks.PublishToTopic(SNSDefine.getSNSTopic("Topic_Batch_Job_Notification"),
                integration_pattern = _sfn.ServiceIntegrationPattern.FIRE_AND_FORGET,
                message = _sfn.TaskInput.from_data_at("$.JobStatus.Job_Comment"),
                subject = _sfn.Data.string_at("$.JobStatus.SNS_Subject")
            )
        )
        
        self.Job_Failed = _sfn.Wait(
            self, "Failed",
            time = _sfn.WaitTime.duration(core.Duration.seconds(5))
        )
        
        self.statemachine = _sfn.StateMachine(
            self, "StateMachine",
            definition = self.Job_String_Split.next(self.Job_Map) \
                .next(self.Job_Parallel_Process) \
                .next(self.Job_Check_Output) \
                .next(self.Job_Notification) \
                .next(self.Job_Is_Complete \
                    .when(_sfn.Condition.string_equals(
                            "$.JobStatus.OutputStatus", "FAILED"
                        ), self.Job_Failed
                            .next(self.Job_Map)
                        )
                    .when(_sfn.Condition.string_equals(
                            "$.JobStatus.OutputStatus", "SUCCEEDED"
                        ), self.Job_Finish)
                    .otherwise(self.Job_Failed)
                ),
            timeout = core.Duration.hours(1),
        )
Пример #14
0
    def create_stepfunction(self) -> Resource:
        """コンポーネントをビルドしてデプロイするステートマシンの作成

        Returns:
            Resource: step function
        """

        name = f"{self.stack_name}_{self.component_id}_edgedeploy_pipeline"
        role_name = self.get_role_name("edgedeploy_pipeline")

        sf_role = aws_iam.Role(
            self,
            id=role_name,
            assumed_by=aws_iam.ServicePrincipal("states.amazonaws.com"),
            role_name=role_name,
            path="/service-role/",
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ])
        sf_role.attach_inline_policy(
            aws_iam.Policy(
                self,
                "AllowCloudWatchLogsForSF",
                document=aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(actions=[
                        "logs:CreateLogDelivery", "logs:GetLogDelivery",
                        "logs:UpdateLogDelivery", "logs:DeleteLogDelivery",
                        "logs:ListLogDeliveries", "logs:PutResourcePolicy",
                        "logs:DescribeResourcePolicies",
                        "logs:DescribeLogGroups"
                    ],
                                            resources=["*"])
                ])))
        sf_role.attach_inline_policy(
            aws_iam.Policy(
                self,
                "AllowXRayForSF",
                document=aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(actions=[
                        "xray:PutTraceSegments", "xray:PutTelemetryRecords",
                        "xray:GetSamplingRules", "xray:GetSamplingTargets"
                    ],
                                            resources=["*"])
                ])))
        sf_role.attach_inline_policy(
            aws_iam.Policy(
                self,
                "AllowInvokeLambda",
                document=aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        actions=["lambda:InvokeFunction"],
                        resources=[
                            self._lambda_build_image.function_arn,
                            self._lambda_build_image.function_arn +
                            ":*", self._lambda_check_image_status.function_arn,
                            self._lambda_check_image_status.function_arn +
                            ":*", self._lambda_create_component.function_arn,
                            self._lambda_create_component.function_arn +
                            ":*", self._lambda_deploy_component.function_arn,
                            self._lambda_deploy_component.function_arn + ":*",
                            self._lambda_check_deploy_status.function_arn,
                            self._lambda_check_deploy_status.function_arn +
                            ":*"
                        ])
                ])))

        # dockerコンテナをビルド
        task_build_image = aws_sf_tasks.LambdaInvoke(
            self,
            "BuildInferenceImage",
            lambda_function=self._lambda_build_image,
            output_path="$.Payload")
        # dockerコンテナのビルド結果を確認
        task_check_build_image_status = aws_sf_tasks.LambdaInvoke(
            self,
            "CheckDockerImageBuildStatus",
            lambda_function=self._lambda_check_image_status,
            output_path="$.Payload")

        # dockerコンテナのビルドを待つ
        wait_image_build = aws_sf.Wait(self,
                                       "WaitImageBuildFinish",
                                       time=aws_sf.WaitTime.duration(
                                           Duration.seconds(30)))

        # Greengrassのコンポーネントを作成
        task_create_greengrass_component = aws_sf_tasks.LambdaInvoke(
            self,
            "CreateComponent",
            lambda_function=self._lambda_create_component,
            output_path="$.Payload")

        # Greengrassへデプロイ
        task_deploy_component = aws_sf_tasks.LambdaInvoke(
            self,
            "DeployComponent",
            lambda_function=self._lambda_deploy_component,
            output_path="$.Payload")

        # Greengrassへのデプロイ終了を待つ
        wait_component_deploy = aws_sf.Wait(self,
                                            "WaitDeploymentFinish",
                                            time=aws_sf.WaitTime.duration(
                                                Duration.seconds(30)))

        # Greengrassへデプロイ結果を確認
        task_check_deployment_status = aws_sf_tasks.LambdaInvoke(
            self,
            "CheckDeploymentStatus",
            lambda_function=self._lambda_check_deploy_status,
            output_path="$.Payload")

        # デプロイ失敗
        pipeline_failed = aws_sf.Fail(self,
                                      "PipelineFailed",
                                      error="DeployPipelineFailed",
                                      cause="Something went wrong")
        # 正常終了
        pipeline_success = aws_sf.Succeed(self, "PipelineSuccessed")

        # dockerコンテナが存在したかを判定
        choice_component_exists_result = aws_sf.Choice(self,
                                                       "JudgeComponentExists")

        # dockerコンテナのビルド結果を判定
        choice_image_build_result = aws_sf.Choice(self,
                                                  "JudgeImageBuildStatus")

        # dockerコンテナのビルド結果を判定
        choice_deployment_result = aws_sf.Choice(self, "JudgeDeploymentStatus")

        # 正常終了を通知
        publish_success_message = aws_sf_tasks.SnsPublish(
            self,
            "Publish Success message",
            topic=aws_sns.Topic(self, "SendDeploySuccess"),
            message=aws_sf.TaskInput.from_json_path_at("$.message")).next(
                pipeline_success)

        # デプロイ失敗を通知
        publish_failed_message = aws_sf_tasks.SnsPublish(
            self,
            "Publish Failed message",
            topic=aws_sns.Topic(self, "SendPipelineFailed"),
            message=aws_sf.TaskInput.from_json_path_at("$.message")).next(
                pipeline_failed)

        definition = \
            task_build_image.next(
                choice_component_exists_result
                    .when(
                        aws_sf.Condition.string_equals("$.status", "component_exists"), task_deploy_component)
                    .otherwise(
                        wait_image_build.next(
                        task_check_build_image_status).next(
                            choice_image_build_result.when(
                                aws_sf.Condition.string_equals("$.status", "image_exists"), task_create_greengrass_component
                                .next(task_deploy_component)
                                .next(wait_component_deploy)
                                .next(task_check_deployment_status)
                                .next(
                                    choice_deployment_result
                                        .when(aws_sf.Condition.string_equals("$.status", "RUNNING"), wait_component_deploy)
                                        .when(aws_sf.Condition.string_equals("$.status", "COMPLETED"), publish_success_message)
                                        .otherwise(publish_failed_message).afterwards()))
                            .when(
                                aws_sf.Condition.string_equals("$.status", "image_faild"), publish_failed_message)
                            .otherwise(
                                wait_image_build).afterwards())
                )
            )
        #.next(aws_sf.Succeed(self, "GreengrassComponentDeployFinished"))

        state_machine = aws_sf.StateMachine(
            self,
            id=name,
            state_machine_name=name,
            definition=definition,
            state_machine_type=aws_sf.StateMachineType.STANDARD,
            role=sf_role)
Пример #15
0
    def __init__(self, scope: core.Construct, id: str, group_name: str,
                 minute_duration: int, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # TODO: Setup alerting of failure to an SNS
        # TODO: Failure is not the same as a student not in a group
        # TODO: Streamline input data so that lambda's only get the info they really need
        # TODO: Comment
        # TODO: Need to separate unexpected errors from regular errors
        # Setting up monitoring

        schedule_stop = lambda_.Function(
            self,
            id="ScheduleStopLambda",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_inline(
                open("./resources/schedule-termination.py", 'r').read()),
            handler="index.handler",
            log_retention=logs.RetentionDays.ONE_DAY,
            environment=dict(GROUP_NAME=group_name),
            timeout=core.Duration.seconds(30))
        schedule_stop.add_to_role_policy(
            statement=iam.PolicyStatement(actions=[
                "ec2:Describe*", "iam:ListGroupsForUser", "iam:ListUsers"
            ],
                                          effect=iam.Effect.ALLOW,
                                          resources=["*"]))

        terminate_ec2 = lambda_.Function(
            self,
            id="TerminateEC2",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_inline(
                open("./resources/terminate-ec2.py", 'r').read()),
            handler="index.handler",
            log_retention=logs.RetentionDays.ONE_DAY,
            timeout=core.Duration.seconds(30))
        terminate_ec2.add_to_role_policy(
            statement=iam.PolicyStatement(actions=[
                "ec2:DescribeInstance*",
                "ec2:TerminateInstances",
            ],
                                          effect=iam.Effect.ALLOW,
                                          resources=["*"]))

        # The lambda object that will see if we should schedule.
        schedule_stop_task = tasks.LambdaInvoke(
            self,
            id='schedule stop',
            lambda_function=schedule_stop,
            input_path="$.detail.userIdentity",
            result_path="$.Payload",
        )
        # TODO: Need to change this based on the configuration info above
        # Wait state to try and delete
        # wait_x = sfn.Wait(self, 'Wait x minutes', time=sfn.WaitTime.seconds_path("10"))
        wait_x = sfn.Wait(self,
                          id='Wait x minutes',
                          time=sfn.WaitTime.duration(
                              core.Duration.minutes(minute_duration)))

        job_failed = sfn.Fail(self,
                              id="Failed Job",
                              cause="Error in the input",
                              error="Error")
        job_finished = sfn.Succeed(self, id="Job Finished")
        choice = sfn.Choice(self, 'Can I delete')
        choice.when(sfn.Condition.boolean_equals('$.Payload.Payload', False),
                    job_finished)
        choice.otherwise(wait_x)
        terminate_ec2_task = tasks.LambdaInvoke(
            self,
            'terminate',
            lambda_function=terminate_ec2,
            input_path="$.detail.responseElements.instancesSet")
        wait_x.next(terminate_ec2_task).next(job_finished)

        state_definition = schedule_stop_task \
            .next(choice)
        terminate_machine = sfn.StateMachine(self,
                                             id="State Machine",
                                             definition=state_definition)
        cloudwatch.Alarm(self,
                         "EC2ScheduleAlarm",
                         metric=terminate_machine.metric_failed(),
                         threshold=1,
                         evaluation_periods=1)
        # TODO Build Rule that monitors for EC2 creation
        # Any new creation, the EC2 will have to be destroyed.  Including
        # other things?
        create_event = events.Rule(
            self,
            id='detect-ec2-start',
            description="Detects if an EC2 is created",
            enabled=True,
            event_pattern=events.EventPattern(
                detail_type=["AWS API Call via CloudTrail"],
                source=["aws.ec2"],
                detail={
                    "eventName": ["RunInstances"],
                    "eventSource": ["ec2.amazonaws.com"]
                }),
            targets=[targets.SfnStateMachine(terminate_machine)])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        submit_lambda = aws_lambda_python.PythonFunction(
            self,
            "submit-status",
            entry="./lambdas/example",
            handler="submit_status",
        )

        get_status_lambda = aws_lambda_python.PythonFunction(
            self,
            "get-status",
            entry="./lambdas/example",
            handler="get_status")

        final_status_lambda = aws_lambda_python.PythonFunction(
            self,
            "final-status",
            entry="./lambdas/example",
            handler="final_status")

        submit_job = tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            payload_response_only=True,
        )

        wait_x = sfn.Wait(self,
                          "Wait X Seconds",
                          time=sfn.WaitTime.seconds_path("$.seconds"))

        get_status = tasks.LambdaInvoke(
            self,
            "Get Job Status",
            lambda_function=get_status_lambda,
            payload_response_only=True,
        )

        job_failed = sfn.Fail(
            self,
            "Job Failed",
            cause="AWS Batch Job Failed",
            error="DescribeJob returned FAILED",
        )

        final_status = tasks.LambdaInvoke(
            self,
            "Get Final Job Status",
            lambda_function=final_status_lambda,
            payload_response_only=True,
        )

        definition = (submit_job.next(wait_x).next(get_status).next(
            sfn.Choice(self, "Job Complete?").when(
                sfn.Condition.string_equals("$.status", "FAILED"),
                job_failed).when(
                    sfn.Condition.string_equals("$.status", "SUCCEEDED"),
                    final_status).otherwise(wait_x)))

        sfn.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=core.Duration.minutes(5),
        )
Пример #17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        API_ARN = self.node.try_get_context("api_arn")
        RATE = self.node.try_get_context("rate")

        if not API_ARN or not RATE:
            logger.error(
                f"Required context variables for {id} were not provided!")
        else:
            # Create the WAF IPSets
            doslist = wafv2.CfnIPSet(
                self,
                "Ext06DosIpSet",
                addresses=[],
                ip_address_version="IPV4",
                scope="REGIONAL",
                name="Ext06DosIpSet",
            )

            suslist = wafv2.CfnIPSet(
                self,
                "Ext06SusIpSet",
                addresses=[],
                ip_address_version="IPV4",
                scope="REGIONAL",
                name="Ext06SusIpSet",
            )

            # Create a WAF
            waf = wafv2.CfnWebACL(
                self,
                id="Ext06_WAF",
                name="Ext06-WAF",
                default_action=wafv2.CfnWebACL.DefaultActionProperty(allow={}),
                scope="REGIONAL",
                visibility_config=wafv2.CfnWebACL.VisibilityConfigProperty(
                    cloud_watch_metrics_enabled=True,
                    metric_name="EXT06_WAF",
                    sampled_requests_enabled=True),
                rules=[],
            )

            # Create Susunban lambda
            lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                           "ext_06")
            susunban_lambda = _lambda.Function(
                self,
                "Ext06ResponseSusUnbanFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="susunban_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                environment={
                    "ipset_id": suslist.attr_id,
                    "ipset_name": suslist.name,
                    "ipset_scope": suslist.scope,
                })
            # Assign WAF permissions to lambda
            susunban_lambda.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["wafv2:GetIPSet", "wafv2:UpdateIPSet"],
                    effect=iam.Effect.ALLOW,
                    resources=[suslist.attr_arn],
                ))

            # Create Dosunban lambda
            lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                           "ext_06")
            dosunban_lambda = _lambda.Function(
                self,
                "Ext06ResponseDosUnbanFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="dosunban_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                environment={
                    "ipset_id": doslist.attr_id,
                    "ipset_name": doslist.name,
                    "ipset_scope": doslist.scope,
                })
            # Assign WAF permissions to lambda
            dosunban_lambda.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["wafv2:GetIPSet", "wafv2:UpdateIPSet"],
                    effect=iam.Effect.ALLOW,
                    resources=[doslist.attr_arn],
                ))

            # Create dos stepfunction
            # Define a second state machine to unban the blacklisted IP after 1 hour
            doswait_step = sfn.Wait(
                self,
                "Ext06ResponseStepDosWait",
                time=sfn.WaitTime.duration(core.Duration.hours(1)),
            )
            suswait_step = sfn.Wait(
                self,
                "Ext06ResponseStepSusWait",
                time=sfn.WaitTime.duration(core.Duration.hours(1)),
            )
            dosunban_step = sfn.Task(
                self,
                "Ext06ResponseStepDosUnban",
                task=tasks.RunLambdaTask(
                    dosunban_lambda,
                    integration_pattern=sfn.ServiceIntegrationPattern.
                    FIRE_AND_FORGET,
                    payload={"Input.$": "$"},
                ),
            )
            susunban_step = sfn.Task(
                self,
                "Ext06ResponseStepSosUnban",
                task=tasks.RunLambdaTask(
                    susunban_lambda,
                    integration_pattern=sfn.ServiceIntegrationPattern.
                    FIRE_AND_FORGET,
                    payload={"Input.$": "$"},
                ),
            )
            dos_statemachine = sfn.StateMachine(
                self,
                "Ext06ResponseDosUnbanStateMachine",
                definition=doswait_step.next(dosunban_step),
                timeout=core.Duration.hours(1.5),
            )

            sus_statemachine = sfn.StateMachine(
                self,
                "Ext06ResponseSusUnbanStateMachine",
                definition=suswait_step.next(susunban_step),
                timeout=core.Duration.hours(1.5),
            )
            # Create lambda function
            lambda_func = _lambda.Function(
                self,
                "Ext06ResponseFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="response_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                environment={
                    "suslist_id": suslist.attr_id,
                    "suslist_name": suslist.name,
                    "suslist_scope": suslist.scope,
                    "doslist_id": doslist.attr_id,
                    "doslist_name": doslist.name,
                    "doslist_scope": doslist.scope,
                    "dos_arn": dos_statemachine.state_machine_arn,
                    "sus_arn": sus_statemachine.state_machine_arn,
                },
            )

            kinesis_log = s3.Bucket(
                self,
                id='dos_logs',
                access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE,
            )

            # Assign permissions to response lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "wafv2:GetIPSet",
                        "wafv2:UpdateIPSet",
                        "states:StartExecution",
                        "s3:GetObject",
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=[
                        doslist.attr_arn, suslist.attr_arn,
                        sus_statemachine.state_machine_arn,
                        dos_statemachine.state_machine_arn,
                        kinesis_log.bucket_arn, kinesis_log.bucket_arn,
                        kinesis_log.bucket_arn + "/*"
                    ],
                ))

            # Create an IAM role for the steram
            stream_role = iam.Role(
                self,
                id="waf-kinesis-log-role",
                assumed_by=iam.ServicePrincipal(
                    service="firehose.amazonaws.com", ),
            )

            stream_permissions = iam.Policy(
                self,
                id="Ext-06-kinesis-permissions",
                statements=[
                    iam.PolicyStatement(
                        actions=[
                            "s3:AbortMultipartUpload",
                            "s3:GetBucketLocation",
                            "s3:GetObject",
                            "s3:ListBucket",
                            "s3:ListBucketMultipartUploads",
                            "s3:PutObject",
                        ],
                        effect=iam.Effect.ALLOW,
                        resources=[
                            kinesis_log.bucket_arn,
                            kinesis_log.bucket_arn + "/*"
                        ],
                    )
                ])

            stream_role.attach_inline_policy(stream_permissions)

            log_stream = firehose.CfnDeliveryStream(
                self,
                id="aws-waf-logs-ext06",
                delivery_stream_type="DirectPut",
                delivery_stream_name="aws-waf-logs-ext06",
                s3_destination_configuration=firehose.CfnDeliveryStream.
                S3DestinationConfigurationProperty(
                    bucket_arn=kinesis_log.bucket_arn,
                    buffering_hints=firehose.CfnDeliveryStream.
                    BufferingHintsProperty(interval_in_seconds=300,
                                           size_in_m_bs=5),
                    compression_format="UNCOMPRESSED",
                    role_arn=stream_role.role_arn),
            )
            kinesis_log.add_event_notification(
                s3.EventType.OBJECT_CREATED,
                dest=s3_notifications.LambdaDestination(lambda_func))
            utc_time = datetime.now(tz=timezone.utc)
            utc_time = utc_time + timedelta(minutes=5)
            cron_string = "cron(" + str(utc_time.minute) + " " + str(
                utc_time.hour) + " " + str(utc_time.day) + " " + str(
                    utc_time.month) + " ? " + str(utc_time.year) + ")"
            trigger = events.Rule(
                self,
                id="ext-06 setup",
                rule_name="Ext06-trigger",
                schedule=events.Schedule.expression(cron_string))

            setup_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                          "ext_06")
            setup_func = _lambda.Function(
                self,
                id="Ext06Setup",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="setup.lambda_handler",
                code=_lambda.Code.from_asset(setup_dir_path),
                environment={
                    "waf_arn": waf.attr_arn,
                    "waf_id": waf.attr_id,
                    "waf_scope": waf.scope,
                    "waf_name": waf.name,
                    "firehose_arn": log_stream.attr_arn,
                    "rule_name": "Ext06-trigger",
                    "doslist_arn": doslist.attr_arn,
                    "rate": str(RATE),
                },
            )

            # Assign permissions to setup lambda
            setup_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "wafv2:PutLoggingConfiguration", "wafv2:GetWebACL",
                        "wafv2:UpdateWebACL"
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=[waf.attr_arn, doslist.attr_arn],
                ))

            setup = targets.LambdaFunction(handler=setup_func, )

            setup.bind(rule=trigger)
            trigger.add_target(target=setup)

            wafv2.CfnWebACLAssociation(
                self,
                id="API gateway association",
                resource_arn=API_ARN,
                web_acl_arn=waf.attr_arn,
            )
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 redshift_cluster_name: str,
                 user_secret: Secret) -> None:
        super().__init__(scope, id)

        stack = Stack.of(self)

        subprocess.call(
            ['pip', 'install', '-t', 'dwh/dwh_loader_layer/python/lib/python3.8/site-packages', '-r',
             'dwh/dwh_loader/requirements.txt', '--platform', 'manylinux1_x86_64', '--only-binary=:all:',
             '--upgrade'])

        requirements_layer = _lambda.LayerVersion(scope=self,
                                                  id='PythonRequirementsTemplate',
                                                  code=_lambda.Code.from_asset('dwh/dwh_loader_layer'),
                                                  compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        dwh_loader_role = _iam.Role(
            self, 'Role',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        dwh_loader_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name(
            'service-role/AWSLambdaBasicExecutionRole'
        ))

        dwh_loader_role.attach_inline_policy(
            _iam.Policy(
                self, 'InlinePolicy',
                statements=[
                    _iam.PolicyStatement(
                        actions=[
                            "redshift-data:ExecuteStatement",
                            "redshift-data:CancelStatement",
                            "redshift-data:ListStatements",
                            "redshift-data:GetStatementResult",
                            "redshift-data:DescribeStatement",
                            "redshift-data:ListDatabases",
                            "redshift-data:ListSchemas",
                            "redshift-data:ListTables",
                            "redshift-data:DescribeTable"
                        ],
                        resources=['*']
                    ),
                    _iam.PolicyStatement(
                        actions=["secretsmanager:GetSecretValue"],
                        resources=[user_secret.secret_arn]
                    ),
                    _iam.PolicyStatement(
                        actions=["redshift:GetClusterCredentials"],
                        resources=[
                            "arn:aws:redshift:*:*:dbname:*/*",
                            "arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER
                        ]
                    ),
                    _iam.PolicyStatement(
                        effect=_iam.Effect('DENY'),
                        actions=["redshift:CreateClusterUser"],
                        resources=["arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER]
                    ),
                    _iam.PolicyStatement(
                        conditions={
                            'StringLike': {
                                "iam:AWSServiceName": "redshift-data.amazonaws.com"
                            }
                        },
                        actions=["iam:CreateServiceLinkedRole"],
                        resources=["arn:aws:iam::*:role/aws-service-role/redshift-data.amazonaws.com/AWSServiceRoleForRedshift"]
                    ),
                ]
            )
        )

        dwh_loader_function = _lambda.Function(
            self, 'Lambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('dwh/dwh_loader'),
            handler='dwh_loader.handler',
            function_name='dwh-loader',
            environment={
                'CLUSTER_NAME': redshift_cluster_name,
                'PROCEDURE': _config.Redshift.ETL_PROCEDURE,
                'SECRET_ARN': user_secret.secret_arn,
                'DATABASE': _config.Redshift.DATABASE,
                'REGION': core.Aws.REGION,
                'SCHEMA': _config.Redshift.SCHEMA
            },
            layers=[requirements_layer],
            timeout=core.Duration.seconds(30),
            role=dwh_loader_role
        )

        dwh_loader_submit = _sfn_tasks.LambdaInvoke(
            self, 'Submit',
            lambda_function=dwh_loader_function,
            payload_response_only=True
        )

        dwh_loader_wait = _sfn.Wait(
            self, 'Wait',
            time=_sfn.WaitTime.duration(core.Duration.seconds(30))
        )

        dwh_loader_complete = _sfn.Choice(
            self, 'Complete'
        )

        dwh_loader_failed = _sfn.Fail(
            self, 'Fail',
            cause="Redshift Data API statement failed",
            error="$.Result.Error"
        )

        dwh_loader_status = _sfn_tasks.LambdaInvoke(
            self, 'Status',
            lambda_function=dwh_loader_function,
            result_path='$.Result',
            payload_response_only=True
        )

        definition = dwh_loader_submit \
            .next(dwh_loader_wait) \
            .next(dwh_loader_status) \
            .next(dwh_loader_complete
                  .when(_sfn.Condition.string_equals('$.Result.Status', 'FAILED'), dwh_loader_failed)
                  .when(_sfn.Condition.string_equals('$.Result.Status', 'FINISHED'), _sfn.Succeed(self, 'DwhLoaderSuccess'))
                  .otherwise(dwh_loader_wait))

        dwh_loader_stepfunctions = _sfn.StateMachine(
            self, 'StepFunctions',
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        step_trigger = _events.Rule(
            self, 'StepTrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=dwh_loader_stepfunctions,
            )
        )
Пример #19
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        submit_job_activity = sfn.Activity(self, "SubmitJob")
        check_job_activity = sfn.Activity(self, "CheckJob")
        do_mapping_activity1 = sfn.Activity(self, "MapJOb1")
        do_mapping_activity2 = sfn.Activity(self, "MapJOb2")

        submit_job = sfn.Task(
            self,
            "Submit Job",
            task=sfn_tasks.InvokeActivity(submit_job_activity),
            result_path="$.guid",
        )

        task1 = sfn.Task(
            self,
            "Task 1 in Mapping",
            task=sfn_tasks.InvokeActivity(do_mapping_activity1),
            result_path="$.guid",
        )

        task2 = sfn.Task(
            self,
            "Task 2 in Mapping",
            task=sfn_tasks.InvokeActivity(do_mapping_activity2),
            result_path="$.guid",
        )

        wait_x = sfn.Wait(
            self,
            "Wait X Seconds",
            time=sfn.WaitTime.seconds_path('$.wait_time'),
        )
        get_status = sfn.Task(
            self,
            "Get Job Status",
            task=sfn_tasks.InvokeActivity(check_job_activity),
            input_path="$.guid",
            result_path="$.status",
        )
        is_complete = sfn.Choice(self, "Job Complete?")
        job_failed = sfn.Fail(self,
                              "Job Failed",
                              cause="AWS Batch Job Failed",
                              error="DescribeJob returned FAILED")
        final_status = sfn.Task(
            self,
            "Get Final Job Status",
            task=sfn_tasks.InvokeActivity(check_job_activity),
            input_path="$.guid",
        )

        definition_map = task1.next(task2)

        process_map = sfn.Map(self, "Process_map",
                              max_concurrency=10).iterator(definition_map)

        definition = submit_job \
            .next(process_map) \
            .next(wait_x) \
            .next(get_status) \
            .next(is_complete
                  .when(sfn.Condition.string_equals(
                    "$.status", "FAILED"), job_failed)
                  .when(sfn.Condition.string_equals(
                    "$.status", "SUCCEEDED"), final_status)
                  .otherwise(wait_x))

        sfn.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=core.Duration.seconds(30),
        )
Пример #20
0
    def __init__(self, scope: core.App, id_: str, stack_env: str, **kwargs) -> None:
        super().__init__(scope, id_, **kwargs)

        # create dynamo table
        demo_table = aws_dynamodb.Table(
            scope=self,
            id="demo_table",
            partition_key=aws_dynamodb.Attribute(
                name="id",
                type=aws_dynamodb.AttributeType.STRING
            ),
            write_capacity=3,
            read_capacity=3,
            removal_policy=core.RemovalPolicy.DESTROY
        )

        queue = aws_sqs.Queue(self, f"{id_}-SQSQueue")

        # create producer lambda function
        producer_lambda = self._create_lambda_function(
            function_name="producer",
            environment={
                "TABLE_NAME": demo_table.table_name,
                "QUEUE_URL": queue.queue_url
            }
        )
        queue.grant_send_messages(producer_lambda)

        # grant permission to lambda to write to demo table
        demo_table.grant_write_data(producer_lambda)

        # create consumer lambda function
        consumer_lambda = self._create_lambda_function(
            function_name="consumer",
            environment={"TABLE_NAME": demo_table.table_name}
        )

        # grant permission to lambda to read from demo table
        demo_table.grant_read_data(consumer_lambda)

        # api_gateway for root
        base_api = apigw_.RestApi(
            scope=self,
            id=f"{id_}-{stack_env}-apigw",
            rest_api_name=f"{id_}-{stack_env}-apigw",
            deploy_options=apigw_.StageOptions(stage_name=stack_env)
        )

        # /example entity
        api_entity = base_api.root.add_resource("example")

        # GET /example
        api_entity.add_method(
            http_method="GET",
            integration=apigw_.LambdaIntegration(
                handler=consumer_lambda,
                integration_responses=[
                    apigw_.IntegrationResponse(
                        status_code="200"
                    )
                ]
            )
        )

        # POST /example
        api_entity.add_method(
            http_method="POST",
            integration=apigw_.LambdaIntegration(
                handler=producer_lambda,
                integration_responses=[
                    apigw_.IntegrationResponse(
                        status_code="200"
                    )
                ]
            )
        )

        # ============= #
        # StepFunctions #
        # ============= #

        dynamodb_update_running_task = self._dynamodb_update_in_sfn(table=demo_table, status="running")

        wait_1_min = aws_sfn.Wait(
            scope=self,
            id="Wait one minutes as heavy task",
            time=aws_sfn.WaitTime.duration(core.Duration.minutes(1)),
        )

        dynamodb_update_complete_task = self._dynamodb_update_in_sfn(table=demo_table, status="complete")
        dynamodb_update_failure_task = self._dynamodb_update_in_sfn(table=demo_table, status="failure")

        check_task_status = aws_sfn.Choice(scope=self, id="Job Complete?")\
            .when(aws_sfn.Condition.string_equals("$.job_status", "success"), dynamodb_update_complete_task) \
            .otherwise(dynamodb_update_failure_task)

        # StepFunctions
        definition = dynamodb_update_running_task \
            .next(wait_1_min) \
            .next(check_task_status)

        sfn_process = aws_sfn.StateMachine(
            scope=self,
            id=f"{id_}-{stack_env}",
            definition=definition
        )

        # Lambda to invoke StepFunction
        sfn_invoke_lambda = self._create_lambda_function(
            function_name="invoke_step_function",
            environment={
                "STEP_FUNCTION_ARN": sfn_process.state_machine_arn,
                "QUEUE_URL": queue.queue_url
            }
        )
        # grant
        queue.grant_consume_messages(sfn_invoke_lambda)
        sfn_process.grant_start_execution(sfn_invoke_lambda)

        # ================ #
        # CloudWatch Event #
        # ================ #

        # Runs every 2 hour
        invoke_automatically = aws_events.Rule(
            scope=self,
            id=f"InvokeSFnViaLambda-{stack_env}",
            schedule=aws_events.Schedule.rate(core.Duration.hours(2))
        )
        invoke_automatically.add_target(aws_events_targets.LambdaFunction(sfn_invoke_lambda))
Пример #21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        API_ARN = self.node.try_get_context("api_arn")
        if not API_ARN:
            logger.error(
                f"Required context variables for {id} were not provided!")
        else:
            # Create XSS rule
            xss_body = wafv2.CfnRuleGroup.StatementOneProperty(
                xss_match_statement=wafv2.CfnRuleGroup.
                XssMatchStatementProperty(
                    field_to_match=BODY,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))
            xss_query_string = wafv2.CfnRuleGroup.StatementOneProperty(
                xss_match_statement=wafv2.CfnRuleGroup.
                XssMatchStatementProperty(
                    field_to_match=QUERY_STRING,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))
            xss_uri = wafv2.CfnRuleGroup.StatementOneProperty(
                xss_match_statement=wafv2.CfnRuleGroup.
                XssMatchStatementProperty(
                    field_to_match=URI_PATH,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))
            xss_header = wafv2.CfnRuleGroup.StatementOneProperty(
                xss_match_statement=wafv2.CfnRuleGroup.
                XssMatchStatementProperty(
                    field_to_match=SINGLE_HEADER,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))

            xss_rule_group = wafv2.CfnRuleGroup(
                self,
                id="XSS",
                capacity=160,
                scope="REGIONAL",
                visibility_config=wafv2.CfnRuleGroup.VisibilityConfigProperty(
                    cloud_watch_metrics_enabled=True,
                    metric_name="xss_attacks",
                    sampled_requests_enabled=False),
                rules=[
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="xss_query_string",
                        priority=1,
                        statement=xss_query_string,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="xss_attacks",
                            sampled_requests_enabled=False),
                    ),
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="xss_body",
                        priority=2,
                        statement=xss_body,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="xss_attacks",
                            sampled_requests_enabled=False)),
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="xss_uri",
                        priority=3,
                        statement=xss_uri,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="xss_attacks",
                            sampled_requests_enabled=False)),
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="xss_header",
                        priority=4,
                        statement=xss_header,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="xss_attacks",
                            sampled_requests_enabled=False),
                    ),
                ],
            )

            # Create the SQLI rule group
            sqli_body = wafv2.CfnRuleGroup.StatementOneProperty(
                sqli_match_statement=wafv2.CfnRuleGroup.
                SqliMatchStatementProperty(
                    field_to_match=BODY,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))
            sqli_query_string = wafv2.CfnRuleGroup.StatementOneProperty(
                sqli_match_statement=wafv2.CfnRuleGroup.
                SqliMatchStatementProperty(
                    field_to_match=QUERY_STRING,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))
            sqli_uri = wafv2.CfnRuleGroup.StatementOneProperty(
                sqli_match_statement=wafv2.CfnRuleGroup.
                SqliMatchStatementProperty(
                    field_to_match=URI_PATH,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))
            sqli_header = wafv2.CfnRuleGroup.StatementOneProperty(
                sqli_match_statement=wafv2.CfnRuleGroup.
                SqliMatchStatementProperty(
                    field_to_match=SINGLE_HEADER,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))

            sqli_rule_group = wafv2.CfnRuleGroup(
                self,
                id="SQLI",
                capacity=80,
                scope="REGIONAL",
                visibility_config=wafv2.CfnRuleGroup.VisibilityConfigProperty(
                    cloud_watch_metrics_enabled=True,
                    metric_name="sqli_attacks",
                    sampled_requests_enabled=False),
                rules=[
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="sqli_query_string",
                        priority=1,
                        statement=sqli_query_string,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="sqli_attacks",
                            sampled_requests_enabled=False),
                    ),
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="sqli_body",
                        priority=2,
                        statement=sqli_body,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="sqli_attacks",
                            sampled_requests_enabled=False)),
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="sqli_uri",
                        priority=3,
                        statement=sqli_uri,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="sqli_attacks",
                            sampled_requests_enabled=False)),
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="sqli_header",
                        priority=4,
                        statement=sqli_header,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="sqli_attacks",
                            sampled_requests_enabled=False),
                    ),
                ],
            )

            # Create the LFI and path traversal sets
            regex_pattern_set = wafv2.CfnRegexPatternSet(
                self,
                id="Ext01LptSet",
                regular_expression_list=[".*\.\./.*", ".*://.*"],
                scope="REGIONAL")
            lpt_query_string = wafv2.CfnRuleGroup.StatementOneProperty(
                regex_pattern_set_reference_statement=wafv2.CfnRuleGroup.
                RegexPatternSetReferenceStatementProperty(
                    arn=regex_pattern_set.attr_arn,
                    field_to_match=QUERY_STRING,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))
            lpt_uri = wafv2.CfnRuleGroup.StatementOneProperty(
                regex_pattern_set_reference_statement=wafv2.CfnRuleGroup.
                RegexPatternSetReferenceStatementProperty(
                    arn=regex_pattern_set.attr_arn,
                    field_to_match=URI_PATH,
                    text_transformations=[NO_TEXT_TRANSFORMATION]))

            lpt_rule_group = wafv2.CfnRuleGroup(
                self,
                id="LPT",
                capacity=50,
                scope="REGIONAL",
                visibility_config=wafv2.CfnRuleGroup.VisibilityConfigProperty(
                    cloud_watch_metrics_enabled=True,
                    metric_name="lpt_attacks",
                    sampled_requests_enabled=False),
                rules=[
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="lpt_query_string",
                        priority=1,
                        statement=lpt_query_string,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="lpt_attacks",
                            sampled_requests_enabled=False),
                    ),
                    wafv2.CfnRuleGroup.RuleProperty(
                        name="lpt_uri",
                        priority=2,
                        statement=lpt_uri,
                        action=wafv2.CfnRuleGroup.RuleActionProperty(block={}),
                        visibility_config=wafv2.CfnRuleGroup.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="lpt_attacks",
                            sampled_requests_enabled=False)),
                ],
            )

            # Create new WAF IPSet
            blacklist = wafv2.CfnIPSet(
                self,
                "Ext01ResponseIpSet",
                addresses=[],
                ip_address_version="IPV4",
                scope="REGIONAL",
                name="Ext01ResponseIpSet",
            )

            # Create reference statements
            xss_ref = wafv2.CfnWebACL.RuleGroupReferenceStatementProperty(
                arn=xss_rule_group.attr_arn)
            sqli_ref = wafv2.CfnWebACL.RuleGroupReferenceStatementProperty(
                arn=sqli_rule_group.attr_arn)
            lpt_ref = wafv2.CfnWebACL.RuleGroupReferenceStatementProperty(
                arn=lpt_rule_group.attr_arn)

            # Create a WAF
            waf = wafv2.CfnWebACL(
                self,
                id="Ext01_WAF",
                name="Ext01-WAF",
                default_action=wafv2.CfnWebACL.DefaultActionProperty(allow={}),
                scope="REGIONAL",
                visibility_config=wafv2.CfnWebACL.VisibilityConfigProperty(
                    cloud_watch_metrics_enabled=True,
                    metric_name="EXT01_WAF",
                    sampled_requests_enabled=True),
                rules=[
                    wafv2.CfnWebACL.RuleProperty(
                        name="SQLI",
                        priority=2,
                        statement=wafv2.CfnWebACL.StatementOneProperty(
                            rule_group_reference_statement=sqli_ref),
                        visibility_config=wafv2.CfnWebACL.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="sqli_requests",
                            sampled_requests_enabled=False),
                        override_action=wafv2.CfnWebACL.OverrideActionProperty(
                            none={}),
                    ),
                    wafv2.CfnWebACL.RuleProperty(
                        name="XSS",
                        priority=3,
                        statement=wafv2.CfnWebACL.StatementOneProperty(
                            rule_group_reference_statement=xss_ref),
                        visibility_config=wafv2.CfnWebACL.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="xss_requests",
                            sampled_requests_enabled=False),
                        override_action=wafv2.CfnWebACL.OverrideActionProperty(
                            none={}),
                    ),
                    wafv2.CfnWebACL.RuleProperty(
                        name="LPT",
                        priority=4,
                        statement=wafv2.CfnWebACL.StatementOneProperty(
                            rule_group_reference_statement=lpt_ref),
                        visibility_config=wafv2.CfnWebACL.
                        VisibilityConfigProperty(
                            cloud_watch_metrics_enabled=False,
                            metric_name="lpt_requests",
                            sampled_requests_enabled=False),
                        override_action=wafv2.CfnWebACL.OverrideActionProperty(
                            none={}),
                    ),
                ],
            )

            # Create unban lambda
            lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                           "ext_01")
            unban_lambda = _lambda.Function(
                self,
                "Ext01ResponseUnbanFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="unban_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                environment={
                    "ipset_id": blacklist.attr_id,
                    "ipset_name": blacklist.name,
                    "ipset_scope": blacklist.scope,
                })
            # Assign WAF permissions to lambda
            unban_lambda.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["wafv2:GetIPSet", "wafv2:UpdateIPSet"],
                    effect=iam.Effect.ALLOW,
                    resources=[blacklist.attr_arn],
                ))

            # Create stepfunction
            # Define a second state machine to unban the blacklisted IP after 1 hour
            wait_step = sfn.Wait(
                self,
                "Ext01ResponseStepWait",
                time=sfn.WaitTime.duration(core.Duration.hours(1)),
            )
            unban_step = sfn.Task(
                self,
                "Ext01ResponseStepUnban",
                task=tasks.RunLambdaTask(
                    unban_lambda,
                    integration_pattern=sfn.ServiceIntegrationPattern.
                    FIRE_AND_FORGET,
                    payload={"Input.$": "$"}),
            )
            statemachine = sfn.StateMachine(
                self,
                "Ext01ResponseUnbanStateMachine",
                definition=wait_step.next(unban_step),
                timeout=core.Duration.hours(1.5),
            )

            # Create lambda function
            lambda_func = _lambda.Function(
                self,
                "Ext01ResponseFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="response_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                environment={
                    "ipset_id": blacklist.attr_id,
                    "ipset_name": blacklist.name,
                    "ipset_scope": blacklist.scope,
                    "sfn_arn": statemachine.state_machine_arn,
                },
            )

            kinesis_log = s3.Bucket(
                self,
                id='waf_logs',
                access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE,
            )

            # Assign permissions to response lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "wafv2:GetIPSet",
                        "wafv2:UpdateIPSet",
                        "states:StartExecution",
                        "s3:GetObject",
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=[
                        blacklist.attr_arn, statemachine.state_machine_arn,
                        kinesis_log.bucket_arn, kinesis_log.bucket_arn,
                        kinesis_log.bucket_arn + "/*"
                    ],
                ))

            # Create an IAM role for the steram
            stream_role = iam.Role(
                self,
                id="waf-kinesis-log-role",
                assumed_by=iam.ServicePrincipal(
                    service="firehose.amazonaws.com", ),
            )

            stream_permissions = iam.Policy(
                self,
                id="Ext-01-kinesis-permissions",
                statements=[
                    iam.PolicyStatement(
                        actions=[
                            "s3:AbortMultipartUpload",
                            "s3:GetBucketLocation",
                            "s3:GetObject",
                            "s3:ListBucket",
                            "s3:ListBucketMultipartUploads",
                            "s3:PutObject",
                        ],
                        effect=iam.Effect.ALLOW,
                        resources=[
                            kinesis_log.bucket_arn,
                            kinesis_log.bucket_arn + "/*"
                        ],
                    )
                ])

            stream_role.attach_inline_policy(stream_permissions)

            log_stream = firehose.CfnDeliveryStream(
                self,
                id="aws-waf-logs-ext01",
                delivery_stream_type="DirectPut",
                delivery_stream_name="aws-waf-logs-ext01",
                s3_destination_configuration=firehose.CfnDeliveryStream.
                S3DestinationConfigurationProperty(
                    bucket_arn=kinesis_log.bucket_arn,
                    buffering_hints=firehose.CfnDeliveryStream.
                    BufferingHintsProperty(interval_in_seconds=120,
                                           size_in_m_bs=5),
                    compression_format="UNCOMPRESSED",
                    role_arn=stream_role.role_arn),
            )
            kinesis_log.add_event_notification(
                s3.EventType.OBJECT_CREATED,
                dest=s3_notifications.LambdaDestination(lambda_func))
            utc_time = datetime.now(tz=timezone.utc)
            utc_time = utc_time + timedelta(minutes=5)
            cron_string = "cron(" + str(utc_time.minute) + " " + str(
                utc_time.hour) + " " + str(utc_time.day) + " " + str(
                    utc_time.month) + " ? " + str(utc_time.year) + ")"
            trigger = events.Rule(
                self,
                id="ext-01 setup",
                rule_name="Ext01-trigger",
                schedule=events.Schedule.expression(cron_string))

            setup_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                          "ext_01")
            setup_func = _lambda.Function(
                self,
                id="Ext01Setup",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="setup.lambda_handler",
                code=_lambda.Code.from_asset(setup_dir_path),
                environment={
                    "waf_arn": waf.attr_arn,
                    "waf_id": waf.attr_id,
                    "waf_scope": waf.scope,
                    "waf_name": waf.name,
                    "firehose_arn": log_stream.attr_arn,
                    "rule_name": "Ext01-trigger",
                    "blacklist_arn": blacklist.attr_arn,
                },
            )

            # Assign permissions to setup lambda
            setup_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "wafv2:PutLoggingConfiguration", "wafv2:GetWebACL",
                        "wafv2:UpdateWebACL"
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=[
                        waf.attr_arn, blacklist.attr_arn,
                        xss_rule_group.attr_arn, sqli_rule_group.attr_arn,
                        lpt_rule_group.attr_arn
                    ],
                ))

            setup = targets.LambdaFunction(handler=setup_func, )

            setup.bind(rule=trigger)
            trigger.add_target(target=setup)

            wafv2.CfnWebACLAssociation(
                self,
                id="API gateway association",
                resource_arn=API_ARN,
                web_acl_arn=waf.attr_arn,
            )