예제 #1
0
 def __init__(self, scope: core.Construct, lambda_name: str, source_topic: aws_sns.Topic = None, table_name : str = 'S3Table'):
     
     super().__init__(scope = scope, id=lambda_name, code = aws_lambda.InlineCode(open('serverless/lambdas/lambda_snstarget.py', encoding="utf-8").read()), handler= 'index.handler', timeout = core.Duration.seconds(30), runtime = aws_lambda.Runtime.PYTHON_3_7, environment = {'TABLENAME':table_name})
     self.add_to_role_policy(aws_iam.PolicyStatement(actions=['dynamodb:*'], effect = aws_iam.Effect.ALLOW, resources = ['*']))
     if source_topic is not None:
         sns_source = aws_lambda_event_sources.SnsEventSource(source_topic)
         sns_source.bind(self)
    def __init__(self, scope: core.Construct, id: str,
                 infra: RtspBaseResourcesConstruct, **kwargs) -> None:
        super().__init__(scope, id, infra=infra, **kwargs)

        self.function.add_event_source(
            events.SnsEventSource(topic=self.topic,
                                  dead_letter_queue=self.dlq,
                                  filter_policy=self.filter_policy))
예제 #3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        def get_userdata():
            with open('bootstrap.sh', 'r') as userdata:
                return userdata.read()

        kratos_role = aws_iam.Role.from_role_arn(
            self, 'KratosXL', role_arn="arn:aws:iam::88888888:role/KratosRole")

        lambda_role = aws_iam.Role.from_role_arn(
            self,
            'LambdaXL',
            role_arn="arn:aws:iam::999999999:role/Lambda_Kratos")

        sns_topic = aws_sns.Topic(self,
                                  "Topic",
                                  display_name="cdk-sns-trigger")

        lambda_function = aws_lambda.Function(
            self,
            "FetchAtopLogs",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            role=lambda_role,
            handler="lambda_handler.lambda_handler",
            code=aws_lambda.Code.from_asset('myfunc'))

        lambda_function.add_event_source(
            aws_lambda_event_sources.SnsEventSource(sns_topic))
        sns_subscription = aws_sns_subscriptions.LambdaSubscription(
            lambda_function)

        def generate_instances(count=1):
            amazon_linux_2 = aws_ec2.GenericLinuxImage(
                {"us-east-1": "ami-0fc61db8544a617ed"})
            ec2_objects = []
            for i in range(count):
                ec2_instnace = aws_ec2.Instance(
                    self,
                    f"CDK-Instance-{i + int(1)}",
                    instance_type=aws_ec2.InstanceType('t2.micro'),
                    role=kratos_role,
                    machine_image=amazon_linux_2,
                    security_group=aws_ec2.CfnSecurityGroup(
                        self,
                        id=f"SG{i + int(1)}",
                        group_description=f"SG-CDK-{i}"),
                    vpc=aws_ec2.Vpc.from_lookup(self,
                                                f'CDK-VPC-{i + int(1)}',
                                                vpc_id="vpc-eeeee3"),
                    user_data=aws_ec2.UserData.custom(get_userdata()),
                    key_name="covidQuarantine")
                ec2_objects.append(ec2_instnace)
            return ec2_objects

        generate_instances()
 def __init__(
     self, scope: core.Construct, id: str, map_params: dict, **kwargs
 ):  # pylint: disable=W0622
     super().__init__(scope, id, **kwargs)
     LOGGER.debug('Notification configuration required for %s', map_params['name'])
     stack = core.Stack.of(self)
     # pylint: disable=no-value-for-parameter
     _slack_func = _lambda.Function.from_function_arn(
         self,
         'slack_lambda_function',
         f'arn:{stack.partition}:lambda:{ADF_DEPLOYMENT_REGION}:'
         f'{ADF_DEPLOYMENT_ACCOUNT_ID}:function:SendSlackNotification'
     )
     kms_alias = _kms.Alias.from_alias_name(self, "KMSAlias", f"alias/codepipeline-{ADF_DEPLOYMENT_ACCOUNT_ID}")
     _topic = _sns.Topic(self, "PipelineTopic", master_key=kms_alias)
     _statement = _iam.PolicyStatement(
         actions=["sns:Publish"],
         effect=_iam.Effect.ALLOW,
         principals=[
             _iam.ServicePrincipal("sns.amazonaws.com"),
             _iam.ServicePrincipal("codecommit.amazonaws.com"),
             _iam.ServicePrincipal("events.amazonaws.com"),
         ],
         resources=["*"],
     )
     _topic.add_to_resource_policy(_statement)
     _endpoint = map_params.get("params", {}).get("notification_endpoint", "")
     _sub = _sns.Subscription(
         self,
         "sns_subscription",
         topic=_topic,
         endpoint=_endpoint if "@" in _endpoint else _slack_func.function_arn,
         protocol=_sns.SubscriptionProtocol.EMAIL
         if "@" in _endpoint
         else _sns.SubscriptionProtocol.LAMBDA,
     )
     if "@" not in _endpoint:
         _lambda.CfnPermission(
             self,
             "slack_notification_sns_permissions",
             principal="sns.amazonaws.com",
             action="lambda:InvokeFunction",
             source_arn=_topic.topic_arn,
             function_name="SendSlackNotification",
         )
         _slack_func.add_event_source(source=_event_sources.SnsEventSource(_topic))
     self.topic_arn = _topic.topic_arn
 def __init__(self, scope: core.Construct, id: str, map_params: dict,
              **kwargs):  #pylint: disable=W0622
     super().__init__(scope, id, **kwargs)
     LOGGER.debug('Notification configuration required for %s',
                  map_params['name'])
     # pylint: disable=no-value-for-parameter
     _slack_func = _lambda.Function.from_function_arn(
         self, 'slack_lambda_function',
         'arn:aws:lambda:{0}:{1}:function:SendSlackNotification'.format(
             ADF_DEPLOYMENT_REGION, ADF_DEPLOYMENT_ACCOUNT_ID))
     _topic = _sns.Topic(self, 'PipelineTopic')
     _statement = _iam.PolicyStatement(
         actions=["sns:Publish"],
         effect=_iam.Effect.ALLOW,
         principals=[
             _iam.ServicePrincipal('sns.amazonaws.com'),
             _iam.ServicePrincipal('codecommit.amazonaws.com'),
             _iam.ServicePrincipal('events.amazonaws.com')
         ],
         resources=["*"])
     _topic.add_to_resource_policy(_statement)
     _lambda.CfnPermission(self,
                           'slack_notification_sns_permissions',
                           principal='sns.amazonaws.com',
                           action='lambda:InvokeFunction',
                           source_arn=_topic.topic_arn,
                           function_name='SendSlackNotification')
     _endpoint = map_params.get('params', {}).get('notification_endpoint',
                                                  '')
     _sub = _sns.Subscription(
         self,
         'sns_subscription',
         topic=_topic,
         endpoint=_endpoint
         if '@' in _endpoint else _slack_func.function_arn,
         protocol=_sns.SubscriptionProtocol.EMAIL
         if '@' in _endpoint else _sns.SubscriptionProtocol.LAMBDA)
     if '@' not in _endpoint:
         _slack_func.add_event_source(
             source=_event_sources.SnsEventSource(_topic))
     self.topic_arn = _topic.topic_arn
예제 #6
0
    def __init__(self, scope: core.Construct, id: str,
                 target_step_function_arn: str, source_bucket_sns: sns.Topic,
                 dynamo_table: dynamo.Table, **kwargs):
        super().__init__(scope, id, **kwargs)

        # SNS Triggered Pipeline
        lambda_code = aws_lambda.Code.from_asset(
            'infrastructure/emr_trigger/lambda_source/')

        sns_lambda = aws_lambda.Function(
            self,
            'SNSTriggeredLambda',
            code=lambda_code,
            handler='trigger.handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.minutes(1),
            environment={
                'PIPELINE_ARN': target_step_function_arn,
                'TABLE_NAME': dynamo_table.table_name
            },
            initial_policy=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=['states:StartExecution', 'states:ListExecutions'],
                    resources=[target_step_function_arn]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        "dynamodb:BatchGet*", "dynamodb:DescribeStream",
                        "dynamodb:DescribeTable", "dynamodb:Get*",
                        "dynamodb:Query", "dynamodb:Scan",
                        "dynamodb:BatchWrite*", "dynamodb:CreateTable",
                        "dynamodb:Delete*", "dynamodb:Update*",
                        "dynamodb:PutItem"
                    ],
                    resources=[dynamo_table.table_arn])
            ],
            events=[sources.SnsEventSource(source_bucket_sns)])
예제 #7
0
    def _create_lambda_for_manual_approval(self) -> Resource:
        """
        Ref: https://github.com/aws-samples/aws-cdk-examples/tree/master/python/lambda-cron
        """
        lambdaFn_id = f"{self.name_prefix}-manual-approval-lambda_handler"
        entry = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn_manual_approve/"
        topic = self.sns_topic
        slack_hook_url = self.context["slack_hook_url"]

        lambdaFn = lambda_python.PythonFunction(
            scope=self,
            id=lambdaFn_id,
            entry=entry,
            index="lambda_handler.py",
            handler="lambda_handler",
            timeout=Duration.seconds(300),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            environment={"slack_hook_url": slack_hook_url})

        sns_event_source = lambda_event_sources.SnsEventSource(topic)
        lambdaFn.add_event_source(sns_event_source)

        return lambdaFn
예제 #8
0
# Create the State Machine
state_machine = sfn.StateMachine(stack,
                                 'SNSTriggeredPipeline',
                                 state_machine_name='sns-triggered-pipeline',
                                 definition=definition)

# Create a Lambda to receive SNS Events from another Pipeline and execute our
# SNS Triggered Pipeline
lambda_code = aws_lambda.Code.from_asset('./lambda_sources')
sns_lambda = aws_lambda.Function(
    stack,
    'SNSTriggeredLambda',
    code=lambda_code,
    handler='execute_pipeline.handler',
    runtime=aws_lambda.Runtime.PYTHON_3_7,
    timeout=core.Duration.minutes(1),
    environment={'PIPELINE_ARN': state_machine.state_machine_arn},
    initial_policy=[
        iam.PolicyStatement(effect=iam.Effect.ALLOW,
                            actions=['states:StartExecution'],
                            resources=[state_machine.state_machine_arn])
    ],
    events=[
        sources.SnsEventSource(
            sns.Topic.from_topic_arn(
                stack, 'TransientPipelineSuccessTopic',
                core.Fn.import_value('TransientPipeline-SuccessTopicArn')))
    ])

app.synth()
예제 #9
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        invoker_topic = sns.Topic(self, "experiment-invoker")

        experiment_table = dynamodb.Table(
            self,
            id="experiment-table",
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=dynamodb.Attribute(
                name="PK", type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name="SK",
                                        type=dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        lambda_code_asset = _lambda.Code.from_asset(
            path=os.path.join(os.path.dirname(__file__), "..", "src"))

        # Now we build as many lambdas as we need.
        current_mem_size = LAMBDA_MEMORY_MIN_SIZE_IN_MB

        while current_mem_size <= LAMBDA_MEMORY_MAX_SIZE_IN_MB:

            # Build the function to test the client call
            client_function = _lambda.Function(
                self,
                id=f"measurement-client-{current_mem_size}-mb",
                code=lambda_code_asset,
                environment={
                    "TEST_METHOD": "client",
                    "MEMORY_SIZE": str(current_mem_size),
                    "TABLE_NAME": experiment_table.table_name,
                },
                handler="lambda_handler.client_handler",
                runtime=_lambda.Runtime.PYTHON_3_8,
                memory_size=current_mem_size,
                timeout=core.Duration.seconds(120),
            )

            client_function.add_event_source(
                lambda_event_sources.SnsEventSource(invoker_topic))

            experiment_table.grant_read_write_data(client_function)

            # Allow for self-mutating function
            client_function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "lambda:getFunctionConfiguration",
                        "lambda:updateFunctionConfiguration",
                    ],
                    # CFN screams at me with circular dependencies if I use the ref here.
                    resources=["*"]))

            # Build the function to test the resource call
            resource_function = _lambda.Function(
                self,
                id=f"measurement-resource-{current_mem_size}-mb",
                code=lambda_code_asset,
                environment={
                    "TEST_METHOD": "resource",
                    "MEMORY_SIZE": str(current_mem_size),
                    "TABLE_NAME": experiment_table.table_name
                },
                handler="lambda_handler.resource_handler",
                runtime=_lambda.Runtime.PYTHON_3_8,
                memory_size=current_mem_size,
                timeout=core.Duration.seconds(120),
            )

            resource_function.add_event_source(
                lambda_event_sources.SnsEventSource(invoker_topic))

            experiment_table.grant_read_write_data(resource_function)

            # Allow for self-mutating function
            resource_function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "lambda:getFunctionConfiguration",
                        "lambda:updateFunctionConfiguration",
                    ],
                    # CFN screams at me with circular dependencies if I use the ref here.
                    resources=["*"]))

            current_mem_size += LAMBDA_MEMORY_INCREMENTS_IN_MB

        # The function to gather and aggregate the measurements
        result_aggregator = _lambda.Function(
            self,
            id="result-aggregator",
            code=lambda_code_asset,
            environment={
                "TABLE_NAME": experiment_table.table_name,
            },
            handler="lambda_handler.result_aggregator",
            runtime=_lambda.Runtime.PYTHON_3_8,
            memory_size=1024,
            timeout=core.Duration.seconds(300),
        )

        experiment_table.grant_read_write_data(result_aggregator)

        # The function to gather and aggregate the measurements
        invoker = _lambda.Function(
            self,
            id="experiment-invoker-function",
            code=lambda_code_asset,
            environment={
                "INVOKER_TOPIC_ARN": invoker_topic.topic_arn,
                "TABLE_NAME": experiment_table.table_name,
            },
            handler="lambda_handler.invoke_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            memory_size=1024,
            timeout=core.Duration.seconds(300),
        )

        invoker_topic.grant_publish(invoker)
        experiment_table.grant_read_write_data(invoker)

        core.CfnOutput(self,
                       "invokerFn",
                       value=invoker.function_name,
                       description="Name of the invoker function")

        core.CfnOutput(self,
                       "resultAggregatorFn",
                       value=result_aggregator.function_name,
                       description="Name of the result aggregator function")
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        repository = codecommit.Repository(
            self,
            "slackops-repository",
            repository_name="slackops-pipeline-repo",
            description="Repo for the SlackOps Pipeline Demo",
        )

        website_bucket = s3.Bucket(self,
                                   "website-bucket",
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   auto_delete_objects=True,
                                   public_read_access=True,
                                   website_index_document="index.html")

        manual_approval_topic = sns.Topic(
            self,
            "manual-approval-notification",
        )

        artifact_bucket = s3.Bucket(self,
                                    "artifact-bucket",
                                    removal_policy=core.RemovalPolicy.DESTROY)

        source_artifact = codepipeline.Artifact(artifact_name="Source")
        deployment_artifact = codepipeline.Artifact(artifact_name="Deployment")

        pipeline = codepipeline.Pipeline(
            self,
            "slackops-pipeline",
            artifact_bucket=artifact_bucket,
            stages=[
                codepipeline.StageOptions(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            repository=repository,
                            branch="master",
                            output=source_artifact,
                            action_name="Source")
                    ]),
                codepipeline.StageOptions(
                    stage_name="Build",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            input=source_artifact,
                            action_name="Build",
                            project=codebuild.PipelineProject(
                                self,
                                "build-project",
                                build_spec=codebuild.BuildSpec.
                                from_source_filename("buildspec.yml"),
                                environment=codebuild.BuildEnvironment(
                                    build_image=codebuild.LinuxBuildImage.
                                    STANDARD_5_0),
                            ),
                            outputs=[deployment_artifact])
                    ]),
                codepipeline.StageOptions(
                    stage_name=MANUAL_APPROVAL_STAGE_NAME,
                    actions=[
                        codepipeline_actions.ManualApprovalAction(
                            action_name=MANUAL_APPROVAL_ACTION_NAME,
                            additional_information=
                            "Please Approve the Deployment",
                            notification_topic=manual_approval_topic,
                        )
                    ]),
                codepipeline.StageOptions(
                    stage_name="Deploy",
                    actions=[
                        codepipeline_actions.S3DeployAction(
                            bucket=website_bucket,
                            input=deployment_artifact,
                            access_control=s3.BucketAccessControl.PUBLIC_READ,
                            action_name="deploy-to-s3")
                    ])
            ])

        # Build the API Gateway to record the approval or rejection

        rest_api = apigateway.RestApi(self,
                                      "slackops-apigw",
                                      deploy_options=apigateway.StageOptions(
                                          stage_name="prod", ))

        root_resource = rest_api.root.add_resource("v1")

        approval_resource = root_resource.add_resource("approval")

        api_gateway_role = iam.Role(self,
                                    "slackops-apigw-role",
                                    assumed_by=iam.ServicePrincipal(
                                        service="apigateway.amazonaws.com", ))
        api_gateway_role.add_to_policy(
            iam.PolicyStatement(actions=["codepipeline:PutApprovalResult"],
                                resources=[pipeline.pipeline_arn + "/*"]))

        # Double curlies to make str.format work
        mapping_template = """
#set($token = $input.params("token"))
#set($response = $input.params("response"))
{{
   "actionName": "{action_name}",
   "pipelineName": "{pipeline_name}",
   "result": {{ 
      "status": "$response",
      "summary": ""
   }},
   "stageName": "{stage_name}",
   "token": "$token"
}}
        """.format(
            action_name="approve-before-publication",
            pipeline_name=pipeline.pipeline_name,
            stage_name="Approval",
        )

        approval_integration = apigateway.AwsIntegration(
            service="codepipeline",
            action="PutApprovalResult",
            integration_http_method="POST",
            options=apigateway.IntegrationOptions(
                credentials_role=api_gateway_role,
                request_parameters={
                    "integration.request.header.x-amz-target":
                    "'CodePipeline_20150709.PutApprovalResult'",
                    "integration.request.header.Content-Type":
                    "'application/x-amz-json-1.1'",
                },
                passthrough_behavior=apigateway.PassthroughBehavior.NEVER,
                request_templates={"application/json": mapping_template},
                integration_responses=[
                    apigateway.IntegrationResponse(
                        status_code='400',
                        selection_pattern="4\d{2}",
                        response_parameters={
                            'method.response.header.error':
                            'integration.response.body'
                        }),
                    apigateway.IntegrationResponse(
                        status_code='500',
                        selection_pattern="5\d{2}",
                        response_parameters={
                            'method.response.header.error':
                            'integration.response.body'
                        }),
                    apigateway.IntegrationResponse(
                        status_code='200',
                        selection_pattern="2\d{2}",
                        response_parameters={
                            'method.response.header.response':
                            'integration.response.body'
                        }),
                ]))

        approval_method = approval_resource.add_method(
            http_method="GET",
            request_validator=apigateway.RequestValidator(
                self,
                "request-validator",
                rest_api=rest_api,
                request_validator_name="ParamValidator",
                validate_request_parameters=True),
            request_parameters={
                "method.request.querystring.token": True,
                "method.request.querystring.response":
                True,  # Approved / Rejected
            },
            method_responses=[
                apigateway.MethodResponse(
                    status_code='400',
                    response_parameters={'method.response.header.error':
                                         True}),
                apigateway.MethodResponse(
                    status_code='500',
                    response_parameters={'method.response.header.error':
                                         True}),
                apigateway.MethodResponse(
                    status_code='200',
                    response_parameters={
                        'method.response.header.response': True
                    }),
            ],
            integration=approval_integration,
        )

        # Notification mechanism

        ssm_parameter_webhook = ssm.StringParameter(
            self,
            "slackops-webhook-parameter",
            string_value="<replace-me>",
            parameter_name="/slackops/webhook-url")

        notification_lambda = _lambda.PythonFunction(
            self,
            "slackops-notification",
            entry=os.path.join(os.path.dirname(__file__), "..", "src"),
            index="index.py",
            handler="notification_handler",
            environment={
                "WEBHOOK_URL_PARAMETER": ssm_parameter_webhook.parameter_name,
                "API_ENDPOINT": rest_api.url_for_path("/v1/approval"),
            })

        notification_lambda.add_event_source(
            lambda_event_sources.SnsEventSource(topic=manual_approval_topic))

        ssm_parameter_webhook.grant_read(notification_lambda)

        # Outputs

        core.CfnOutput(self,
                       "repositoryHttps",
                       value=repository.repository_clone_url_http)

        core.CfnOutput(self,
                       "repositorySSH",
                       value=repository.repository_clone_url_ssh)

        core.CfnOutput(self,
                       "websiteUrl",
                       value=website_bucket.bucket_website_url)
예제 #11
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # vpc_name = self.node.try_get_context("vpc_name")
        # vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC",
        #   is_default=True,
        #   vpc_name=vpc_name)
        vpc = aws_ec2.Vpc(
            self,
            "FirehoseToS3VPC",
            max_azs=2,
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        ASYNC_CALLEE_LAMBDA_FN_NAME = "LambdaAsyncCallee"
        async_callee_lambda_fn = aws_lambda.Function(
            self,
            "LambdaAsyncCallee",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            function_name="LambdaAsyncCallee",
            handler="lambda_aync_callee.lambda_handler",
            description=
            "Lambda function asynchrously invoked by LambdaAsyncCaller",
            code=aws_lambda.Code.from_asset(
                os.path.join(os.path.dirname(__file__), 'src/main/python')),
            timeout=cdk.Duration.minutes(5))

        log_group = aws_logs.LogGroup(
            self,
            "LambdaAsyncCalleeLogGroup",
            #XXX: Circular dependency between resources occurs
            # if aws_lambda.Function.function_name is used
            # instead of literal name of lambda function such as "LambdaAsyncCallee"
            log_group_name="/aws/lambda/{}".format(
                ASYNC_CALLEE_LAMBDA_FN_NAME),
            retention=aws_logs.RetentionDays.THREE_DAYS,
            removal_policy=cdk.RemovalPolicy.DESTROY)
        log_group.grant_write(async_callee_lambda_fn)

        event_bus = aws_events.EventBus(
            self,
            "EventBusForLambda",
            event_bus_name="EventBusForLambdaDestinations",
        )
        event_bus.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        log_group = aws_logs.LogGroup(
            self,
            "EventBusLogGroup",
            log_group_name="/aws/events/{}".format(event_bus.event_bus_name),
            retention=aws_logs.RetentionDays.THREE_DAYS,
            removal_policy=cdk.RemovalPolicy.DESTROY)

        event_rule = aws_events.Rule(
            self,
            "EventRuleForLambdaDestinations",
            rule_name="EventRuleForLambdaDestinations",
            event_bus=event_bus,
            event_pattern={"account": [self.account]})
        event_rule.add_target(aws_events_targets.CloudWatchLogGroup(log_group))
        event_rule.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        CALLER_LAMBDA_FN_NAME = "LambdaAsyncCaller"
        caller_lambda_fn = aws_lambda.Function(
            self,
            "LambdaAsyncCaller",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            function_name="LambdaAsyncCaller",
            handler="lambda_caller.lambda_handler",
            description="Asynchronusly call lambda function",
            code=aws_lambda.Code.from_asset(
                os.path.join(os.path.dirname(__file__), 'src/main/python')),
            timeout=cdk.Duration.minutes(5),
            #XXX: Uncomments out if you want to use different lambda function version
            # current_version_options=aws_lambda.VersionOptions(
            #   on_success=aws_lambda_destinations.LambdaDestination(async_callee_lambda_fn, response_only=False),
            #   on_failure=aws_lambda_destinations.EventBridgeDestination(event_bus),
            #   max_event_age=cdk.Duration.hours(6), # Minimum: 60 seconds, Maximum: 6 hours, Default: 6 hours
            #   retry_attempts=0 # Minimum: 0, Maximum: 2, Default: 2
            # ),
            on_success=aws_lambda_destinations.LambdaDestination(
                async_callee_lambda_fn, response_only=False),
            on_failure=aws_lambda_destinations.EventBridgeDestination(
                event_bus),
            max_event_age=cdk.Duration.hours(
                6),  # Minimum: 60 seconds Maximum: 6 hours, Default: 6 hours
            #XXX: Set retry_attempts to 0 in order to invoke other lambda function as soon as a error occurred
            retry_attempts=0  # Minimum: 0, Maximum: 2, Default: 2
        )

        sns_topic = aws_sns.Topic(self,
                                  'SnsTopicForLambda',
                                  topic_name='LambdaSourceEvent',
                                  display_name='lambda source event')
        caller_lambda_fn.add_event_source(
            aws_lambda_event_sources.SnsEventSource(sns_topic))

        caller_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(
                        partition="aws",
                        service="lambda",
                        region=cdk.Aws.REGION,
                        account=cdk.Aws.ACCOUNT_ID,
                        resource="function",
                        resource_name="{}*".format(
                            async_callee_lambda_fn.function_name),
                        arn_format=cdk.ArnFormat.COLON_RESOURCE_NAME)
                ],
                actions=["lambda:InvokeFunction"]))

        caller_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[event_bus.event_bus_arn],
                actions=["events:PutEvents"]))

        log_group = aws_logs.LogGroup(
            self,
            "LambdaAsyncCallerLogGroup",
            #XXX: Circular dependency between resources occurs
            # if aws_lambda.Function.function_name is used
            # instead of literal name of lambda function such as "LambdaAsyncCaller"
            log_group_name="/aws/lambda/{}".format(CALLER_LAMBDA_FN_NAME),
            retention=aws_logs.RetentionDays.THREE_DAYS,
            removal_policy=cdk.RemovalPolicy.DESTROY)
        log_group.grant_write(caller_lambda_fn)

        cdk.CfnOutput(self,
                      'SNSTopicName',
                      value=sns_topic.topic_name,
                      export_name='SNSTopicName')
        cdk.CfnOutput(self,
                      'SNSTopicArn',
                      value=sns_topic.topic_arn,
                      export_name='SNSTopicArn')
예제 #12
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.poc_config = {'api_poc': dict()}
        self.read_config()

        # shared stuff
        self._vpc = ec2.Vpc(
            self,
            'api_poc-vpc',
            cidr='10.0.0.0/23',
            max_azs=1,
            nat_gateways=1,
        )

        self._private_subnet_selection = self._vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE)
        self._security_group = ec2.SecurityGroup.from_security_group_id(
            self,
            'default_sg',
            security_group_id=self._vpc.vpc_default_security_group)

        self._security_group.add_ingress_rule(description='redis',
                                              peer=self._security_group,
                                              connection=ec2.Port.tcp_range(
                                                  start_port=6379,
                                                  end_port=6379))

        self._python3_lib_layer = _lambda.LayerVersion(
            self,
            'python3-lib-layer',
            description="python3 module dependencies",
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_6
            ],
            code=_lambda.Code.from_asset('layers/python3-lib-layer.zip'))

        # redis cache cluster
        self._cache_subnet_group = elasticache.CfnSubnetGroup(
            self,
            'cache_subnet_group',
            description='elasticache subnet group',
            subnet_ids=self._private_subnet_selection.subnet_ids,
            cache_subnet_group_name='cache-subnet-group')

        self._redis_cache = elasticache.CfnCacheCluster(
            self,
            'cache',
            cache_node_type='cache.t2.micro',
            num_cache_nodes=1,
            engine='redis',
            cache_subnet_group_name='cache-subnet-group',
            vpc_security_group_ids=[self._security_group.security_group_id],
        )
        self._redis_cache.add_depends_on(self._cache_subnet_group)

        # external API simulator lambda
        api_handler = _lambda.Function(
            self,
            "external-api",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='external_api.handler',
            layers=[self._python3_lib_layer],
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE)
        api_handler.add_environment('REDIS_ADDRESS', self.redis_address)
        api_handler.add_environment('REDIS_PORT', self.redis_port)

        # API Gateway frontend to simulator lambda
        self._api_gateway = apigw.LambdaRestApi(
            self,
            'external_api',
            description='external API emulator',
            options=apigw.StageOptions(stage_name='dev'),
            handler=api_handler,
            proxy=True)

        job_dlq = sqs.Queue(self, 'job-dlq')

        job_queue = sqs.Queue(self,
                              'job-queue',
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  queue=job_dlq, max_receive_count=3))

        throttle_event_topic = sns.Topic(self, 'throttle-events-topic')

        self.add_sns_subscriptions(throttle_event_topic)

        worker = _lambda.Function(self,
                                  'worker',
                                  runtime=_lambda.Runtime.PYTHON_3_7,
                                  code=_lambda.Code.asset('lambda'),
                                  handler='worker.handler',
                                  layers=[self._python3_lib_layer],
                                  reserved_concurrent_executions=20,
                                  timeout=core.Duration.minutes(1),
                                  vpc=self._vpc,
                                  vpc_subnets=self._private_subnet_selection,
                                  security_group=self._security_group,
                                  log_retention=logs.RetentionDays.FIVE_DAYS,
                                  tracing=_lambda.Tracing.ACTIVE,
                                  dead_letter_queue_enabled=False)
        worker.add_environment('API_KEY', '212221848ab214821de993a9d')
        worker.add_environment('JOB_QUEUE_URL', job_queue.queue_url)
        worker.add_environment('THROTTLE_EVENTS_TOPIC',
                               throttle_event_topic.topic_arn)
        worker.add_environment('REDIS_ADDRESS', self.redis_address)
        worker.add_environment('REDIS_PORT', self.redis_port)
        job_queue.grant_send_messages(worker)
        throttle_event_topic.grant_publish(worker)

        orchestrator = _lambda.Function(
            self,
            'orchestrator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='orchestrator.handler',
            layers=[self._python3_lib_layer],
            reserved_concurrent_executions=1,
            timeout=core.Duration.minutes(2),
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        orchestrator.add_environment('API_HOST_URL', self._api_gateway.url)
        orchestrator.add_environment('JOB_QUEUE_URL', job_queue.queue_url)
        orchestrator.add_environment('JOB_DLQ_URL', job_dlq.queue_url)
        orchestrator.add_environment('THROTTLE_EVENTS_TOPIC',
                                     throttle_event_topic.topic_arn)
        orchestrator.add_environment('REDIS_ADDRESS', self.redis_address)
        orchestrator.add_environment('REDIS_PORT', self.redis_port)
        orchestrator.add_environment('WORKER_FUNCTION_ARN',
                                     worker.function_arn)
        job_queue.grant_consume_messages(orchestrator)
        job_dlq.grant_send_messages(orchestrator)
        throttle_event_topic.grant_publish(orchestrator)
        worker.grant_invoke(orchestrator)

        task_master = _lambda.Function(
            self,
            'task_master',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='task_master.handler',
            layers=[self._python3_lib_layer],
            reserved_concurrent_executions=1,
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        task_master.add_environment('SQS_URL', job_queue.queue_url)
        task_master.add_environment('REDIS_ADDRESS', self.redis_address)
        task_master.add_environment('REDIS_PORT', self.redis_port)
        task_master.add_environment('API_HOST_URL', self._api_gateway.url)
        job_queue.grant_send_messages(task_master)

        slack_notify = _lambda.Function(
            self,
            'slack-notify',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset('lambda'),
            handler='slack_notify.lambda_handler',
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        # lambda uses ssm parameter store to retrieve values
        slack_notify.add_environment('encryptedHookUrlKey',
                                     '/api_poc/notify/slack/hook_url')
        slack_notify.add_environment('slackChannelKey',
                                     '/api_poc/notify/slack/channel')
        slack_notify.add_environment('notifySlack', 'false')
        slack_notify.add_event_source(
            event_sources.SnsEventSource(throttle_event_topic))
        slack_notify.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                # TODO fix least privilege
                # actions=['ssm:GetParameter'],
                # resources=['arn:aws:ssm:::parameter/api_poc/notify/slack/*'],
                actions=['ssm:*'],
                resources=['*'],
            ))

        # kick off lambda(s) once per interval
        rule = events.Rule(self,
                           'orchestrator_rule',
                           schedule=events.Schedule.rate(
                               core.Duration.hours(1)))
        # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
        rule.add_target(targets.LambdaFunction(orchestrator))
        rule.add_target(targets.LambdaFunction(task_master))

        # stack outputs
        core.CfnOutput(self,
                       'Redis_Address',
                       value=self._redis_cache.attr_redis_endpoint_address +
                       ':' + self._redis_cache.attr_redis_endpoint_port)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bucket_with_sns = s3.Bucket(self,
                                    "bucket-with-sns-integration",
                                    removal_policy=core.RemovalPolicy.DESTROY)

        bucket_with_lambda = s3.Bucket(
            self,
            "bucket-with-lambda-integration",
            removal_policy=core.RemovalPolicy.DESTROY)

        exchange_topic = sns.Topic(self, "lambda-info-topic")

        bucket_with_sns.add_event_notification(
            event=s3.EventType.OBJECT_CREATED,
            dest=s3_notifications.SnsDestination(exchange_topic))

        measurement_table = dynamodb.Table(
            self,
            "measurement-table",
            partition_key=dynamodb.Attribute(
                name="PK", type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name="SK",
                                        type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY)

        s3_event_generator = _lambda.Function(
            self,
            "s3-event-generator",
            code=_lambda.Code.from_asset(
                path=os.path.join(os.path.dirname(__file__), "..", "src")),
            environment={
                "BUCKET_WITH_LAMBDA": bucket_with_lambda.bucket_name,
                "BUCKET_WITH_SNS": bucket_with_sns.bucket_name,
            },
            handler="s3_event_generator.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(300),
            memory_size=1024,
        )

        bucket_with_lambda.grant_write(s3_event_generator)
        bucket_with_sns.grant_write(s3_event_generator)

        measure_lambda = _lambda.Function(
            self,
            "measure-lambda",
            code=_lambda.Code.from_asset(
                path=os.path.join(os.path.dirname(__file__), "..", "src")),
            environment={"MEASUREMENT_TABLE": measurement_table.table_name},
            handler="measure_lambda.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(50),
            memory_size=1024,
        )

        measurement_table.grant_read_write_data(measure_lambda)

        measure_lambda.add_event_source(
            lambda_event_sources.SnsEventSource(exchange_topic))

        measure_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=bucket_with_lambda,
                events=[s3.EventType.OBJECT_CREATED]))

        ssm.StringParameter(self,
                            "bucket-with-sns-parameter",
                            string_value=bucket_with_sns.bucket_name,
                            parameter_name=BUCKET_WITH_SNS_PARAMETER)

        ssm.StringParameter(self,
                            "bucket-with-lambda-parameter",
                            string_value=bucket_with_lambda.bucket_name,
                            parameter_name=BUCKET_WITH_LAMBDA_PARAMETER)

        ssm.StringParameter(self,
                            "measurement-table-parameter",
                            string_value=measurement_table.table_name,
                            parameter_name=MEASUREMENT_TABLE_PARAMETER)

        ssm.StringParameter(self,
                            "generator-function-name-parameter",
                            string_value=s3_event_generator.function_name,
                            parameter_name=GENERATOR_FUNCTION_NAME_PARAMETER)
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        domain_arn: str,
        aws_cli_profile: str = None,
        cw_trigger_sns_arn_list: list = [],
        enable_es_api_output: bool = False,
        es_api_output_sns_arn: str = None,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # Configuring certain aspects of the stack based on the ES domain details
        self.configure(domain_arn, aws_cli_profile, cw_trigger_sns_arn_list)

        # Setting a Cloudwatch Alarm on the ClusterStatus.red metric
        self.create_cw_alarm_with_action(
            "ClusterStatus.red",
            1,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            1,
            1,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the ClusterStatus.yellow metric
        self.create_cw_alarm_with_action(
            "ClusterStatus.yellow",
            1,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            1,
            1,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the FreeStorageSpace metric. The threshold is 25% of the current volume size (in MB) of a data node.
        self.create_cw_alarm_with_action(
            "FreeStorageSpace",
            self._volume_size * 0.25 * 1000,
            cloudwatch.ComparisonOperator.LESS_THAN_OR_EQUAL_TO_THRESHOLD,
            1,
            1,
            "min",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the ClusterIndexWritesBlocked metric
        self.create_cw_alarm_with_action(
            "ClusterIndexWritesBlocked",
            1,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            5,
            1,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the Nodes metric
        self.create_cw_alarm_with_action(
            "Nodes",
            self._node_count,
            cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD,
            1440,
            1,
            "min",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the AutomatedSnapshotFailure metric
        self.create_cw_alarm_with_action(
            "AutomatedSnapshotFailure",
            1,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            1,
            1,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the CPUUtilization metric
        self.create_cw_alarm_with_action(
            "CPUUtilization",
            80,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            15,
            3,
            "avg",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the JVMMemoryPressure metric
        self.create_cw_alarm_with_action(
            "JVMMemoryPressure",
            80,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            5,
            3,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the MasterCPUUtilization & MasterJVMMemoryPressure metrics
        # only if Dedicated Master is enabled
        if self._is_dedicated_master_enabled:
            self.create_cw_alarm_with_action(
                "MasterCPUUtilization",
                50,
                cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                15,
                3,
                "avg",
                self._sns_topic_list,
            )

            self.create_cw_alarm_with_action(
                "MasterJVMMemoryPressure",
                80,
                cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                15,
                1,
                "max",
                self._sns_topic_list,
            )

        # Setting a Cloudwatch Alarm on the KMSKeyError & KMSKeyInaccessible metrics
        # only if Encryption at Rest config is enabled
        if self._is_encryption_at_rest_enabled:
            self.create_cw_alarm_with_action(
                "KMSKeyError",
                1,
                cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                1,
                1,
                "max",
                self._sns_topic_list,
            )

            self.create_cw_alarm_with_action(
                "KMSKeyInaccessible",
                1,
                cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                1,
                1,
                "max",
                self._sns_topic_list,
            )

        if enable_es_api_output:
            # Creating a Lambda function to invoke ES _cat APIs corresponding to the triggered CW Alarm
            if self._is_vpc_domain:
                self._lambda_vpc = ec2.Vpc.from_lookup(self,
                                                       self._vpc,
                                                       vpc_id=self._vpc)

                self._lambda_security_group = ec2.SecurityGroup.from_security_group_id(
                    self,
                    self._security_group,
                    security_group_id=self._security_group)

                self._lambda_security_group.connections.allow_internally(
                    port_range=ec2.Port.tcp(443),
                    description=
                    "Ingress rule that allows the aws_es_cw_alarms Lambda to talk to a VPC based ES domain"
                )

                self._lambda_func = _lambda.Function(
                    self,
                    "CWAlarmHandler",
                    runtime=_lambda.Runtime.PYTHON_3_7,
                    code=_lambda.Code.asset("lambda"),
                    handler="lambda_function.lambda_handler",
                    timeout=core.Duration.minutes(15),
                    environment={
                        "DOMAIN_ENDPOINT": self._domain_endpoint,
                        "DOMAIN_ARN": domain_arn
                    },
                    vpc=self._lambda_vpc,
                    vpc_subnets=ec2.SubnetSelection(
                        subnet_type=ec2.SubnetType.PRIVATE),
                    security_group=self._lambda_security_group)
            else:
                self._lambda_func = _lambda.Function(
                    self,
                    "CWAlarmHandler",
                    runtime=_lambda.Runtime.PYTHON_3_7,
                    code=_lambda.Code.asset("lambda"),
                    handler="lambda_function.lambda_handler",
                    timeout=core.Duration.minutes(15),
                    environment={
                        "DOMAIN_ENDPOINT": self._domain_endpoint,
                        "DOMAIN_ARN": domain_arn
                    },
                )

            # A Custom IAM Policy statement to grant _cat API access to the Lambda function
            self._es_policy_statement = iam.PolicyStatement(
                actions=["es:ESHttpHead", "es:ESHttpGet"],
                effect=iam.Effect.ALLOW,
                resources=[domain_arn + "/*"],
            )

            self._lambda_func.add_to_role_policy(self._es_policy_statement)

            # Attaching a SNS topic provided by the user as the trigger for the Lambda function
            # If more than one SNS topic is provided, we will attach just the first SNS topic as the trigger
            self._lambda_func.add_event_source(
                _lambda_event_source.SnsEventSource(self._sns_topic_list[0]))

            if es_api_output_sns_arn:
                self._lambda_func.add_environment("SNS_TOPIC_ARN",
                                                  es_api_output_sns_arn)

                # Adding SNS Publish permission since the Lambda function is configured to post
                # the output of _cat APIs to the same SNS topic that triggers the function
                self._sns_publish_policy_statement = iam.PolicyStatement(
                    actions=["SNS:Publish"],
                    effect=iam.Effect.ALLOW,
                    resources=[es_api_output_sns_arn],
                )

                self._lambda_func.add_to_role_policy(
                    self._sns_publish_policy_statement)
예제 #15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        CLUSTER_NAME = self.node.try_get_context("cluster_name")
        NOTIFY_EMAIL = self.node.try_get_context("notify_email")
        SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url")

        if not CLUSTER_NAME or not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL:
            logger.error(
                f"Required context variables for {id} were not provided!")
        else:
            # Get the log group of our postgres instance
            log_group = logs.LogGroup.from_log_group_name(
                self,
                "InAur01DetectionLogGroup",
                f"/aws/rds/cluster/{CLUSTER_NAME}/postgresql",
            )

            # Create new metric
            metric = cloudwatch.Metric(
                namespace="LogMetrics",
                metric_name="InAur01DetectionFailedDbLoginAttempts",
            )

            # Apply metric filter
            # Filter all metrics of failed login attempts in log
            logs.MetricFilter(
                self,
                "InAur01DetectionMetricFilter",
                log_group=log_group,
                metric_namespace=metric.namespace,
                metric_name=metric.metric_name,
                filter_pattern=logs.FilterPattern.all_terms(
                    "FATAL:  password authentication failed for user"),
                metric_value="1",
            )

            # Create new SNS topic
            topic = sns.Topic(self, "InAur01DetectionTopic")

            # Add email subscription
            topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL))

            # Create new alarm for metric
            # Alarm will trigger if there is >= 10 failed login attempts
            # over a period of 30 seconds.
            alarm = cloudwatch.Alarm(
                self,
                "InAur01DetectionAlarm",
                metric=metric,
                threshold=10,
                evaluation_periods=1,
                period=core.Duration.seconds(30),
                datapoints_to_alarm=1,
                statistic="sum",
            )

            # Add SNS action to alarm
            alarm.add_alarm_action(cw_actions.SnsAction(topic))

            # Create unban lambda
            lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                           "in_aur_01")
            unban_lambda = _lambda.Function(
                self,
                "InAur01ResponseUnbanFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="unban_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
            )
            # Assign EC2 permissions to lambda
            unban_lambda.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["ec2:DeleteNetworkAclEntry"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))

            # Create stepfunction
            # Define a second state machine to unban the blacklisted IP after 1 hour
            wait_step = sfn.Wait(
                self,
                "InAur01ResponseStepWait",
                time=sfn.WaitTime.duration(core.Duration.hours(1)),
            )
            unban_step = sfn.Task(
                self,
                "InAur01ResponseStepUnban",
                task=tasks.RunLambdaTask(
                    unban_lambda,
                    integration_pattern=sfn.ServiceIntegrationPattern.
                    FIRE_AND_FORGET,
                ),
                parameters={"Payload.$": "$"},
            )
            statemachine = sfn.StateMachine(
                self,
                "InAur01ResponseUnbanStateMachine",
                definition=wait_step.next(unban_step),
                timeout=core.Duration.hours(1.5),
            )

            # Create lambda function
            lambda_func = _lambda.Function(
                self,
                "InAur01ResponseFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="response_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                environment={
                    "webhook_url": SLACK_WEBHOOK_URL,
                    "unban_sm_arn": statemachine.state_machine_arn,
                    "cluster_name": CLUSTER_NAME,
                },
            )
            # AWS CDK has a bug where it would not add the correct permission
            # to the lambda for Cloudwatch log subscription to invoke it.
            # Hence, we need to manually add permission to lambda.
            lambda_func.add_permission(
                "InAur01ResponseFunctionInvokePermission",
                principal=iam.ServicePrincipal("logs.amazonaws.com"),
                action="lambda:InvokeFunction",
                source_arn=log_group.log_group_arn + ":*",
            )
            # Assign permissions to response lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "states:StartExecution",
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=[statemachine.state_machine_arn],
                ))
            # Assign RDS Read-only permissions to lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["rds:Describe*"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))
            # Assign EC2 permissions to lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "ec2:Describe*",
                        "ec2:CreateNetworkAclEntry",
                        "ec2:DeleteNetworkAclEntry",
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))
            # Assign CloudWatch logs permissions to lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "cloudwatch:Get*",
                        "cloudwatch:Describe*",
                        "logs:FilterLogEvents",
                        "logs:DescribeMetricFilters",
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))

            sns_event_source = lambda_event_sources.SnsEventSource(topic)
            lambda_func.add_event_source(sns_event_source)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = sqs.Queue(self,
                          "StartProwlerScan",
                          receive_message_wait_time=core.Duration.seconds(20),
                          visibility_timeout=core.Duration.seconds(7200))
        push_all_active_accounts_onto_queue_lambda_function = lambda_.Function(
            self,
            "PushAllActiveAccountsOntoQueue",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset("lambda/pushAllActiveActivesOntoQueue"),
            handler="lambda_function.lambda_handler",
            environment={"SQS_QUEUE_URL": queue.queue_url})
        event_lambda_target = events_targets.LambdaFunction(
            handler=push_all_active_accounts_onto_queue_lambda_function)
        queue.grant_send_messages(
            push_all_active_accounts_onto_queue_lambda_function)
        schedule = events.Schedule.rate(core.Duration.days(1))
        events.Rule(self,
                    "DailyTrigger",
                    schedule=schedule,
                    targets=[event_lambda_target])

        vpc = ec2.Vpc(self, "Vpc")
        cluster = ecs.Cluster(self, "Cluster", vpc=vpc)
        logging = ecs.AwsLogDriver(stream_prefix="ProwlerTask",
                                   log_retention=logs.RetentionDays.ONE_DAY)
        results_bucket = s3.Bucket(self, "ResultsBucket")
        dockerfile_directory = path.join(path.dirname(path.realpath(__file__)),
                                         "docker")
        image = ecr_assets.DockerImageAsset(self,
                                            "ProwlerImageBuild",
                                            directory=dockerfile_directory)
        prowler_task = ecs.FargateTaskDefinition(self,
                                                 "ProwlerTaskDefinition",
                                                 cpu=256,
                                                 memory_limit_mib=512)
        prowler_task.add_container(
            "Prowler_image",
            image=ecs.ContainerImage.from_docker_image_asset(image),
            logging=logging,
            environment={
                "RESULTS_BUCKET": results_bucket.bucket_name,
                "SQS_QUEUE_URL": queue.queue_url
            })
        task_role = prowler_task.task_role
        task_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name("ReadOnlyAccess"))
        queue.grant(task_role, "sqs:DeleteMessage")
        results_bucket.grant_put(task_role)
        task_role.attach_inline_policy(
            iam.Policy(self,
                       "AssumeRolePermissions",
                       statements=[
                           iam.PolicyStatement(actions=["sts:AssumeRole"],
                                               effect=iam.Effect.ALLOW,
                                               resources=["*"])
                       ]))
        run_fargate_task_lambda_function = lambda_.Function(
            self,
            "RunFargateTask",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset("lambda/runFargateTask"),
            handler="lambda_function.lambda_handler",
            environment={
                "CLUSTER_ARN":
                cluster.cluster_arn,
                "SUBNET_IDS":
                json.dumps(
                    [subnet.subnet_id for subnet in vpc.private_subnets]),
                "QUEUE_URL":
                queue.queue_url,
                "TASK_DEFINITION_ARN":
                prowler_task.task_definition_arn
            })
        queue.grant(run_fargate_task_lambda_function, "sqs:GetQueueAttributes")
        sqs_alarm_topic = sns.Topic(self, "SqsAlarmTopic")
        sqs_alarm_topic.grant_publish(run_fargate_task_lambda_function)
        sqs_alarm_queue = sqs.Queue(
            self,
            "SqsAlarmQueue",
            retention_period=core.Duration.days(14),
            visibility_timeout=core.Duration.minutes(3))
        sqs_alarm_topic.add_subscription(
            sns_subscriptions.SqsSubscription(sqs_alarm_queue))
        run_fargate_task_lambda_function.add_event_source(
            lambda_event_sources.SqsEventSource(sqs_alarm_queue))
        run_fargate_task_lambda_function.add_to_role_policy(
            iam.PolicyStatement(actions=["ecs:RunTask"],
                                effect=iam.Effect.ALLOW,
                                resources=[prowler_task.task_definition_arn]))
        run_fargate_task_lambda_function.add_to_role_policy(
            iam.PolicyStatement(actions=["iam:PassRole"],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    prowler_task.execution_role.role_arn,
                                    prowler_task.task_role.role_arn
                                ]))
        sqs_ok_topic = sns.Topic(self, "SqsOkTopic")
        clear_alarm_queue = lambda_.Function(
            self,
            "ClearAlarmQueue",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset("lambda/clearAlarmQueue"),
            handler="lambda_function.lambda_handler",
            environment={"QUEUE_URL": sqs_alarm_queue.queue_url})
        clear_alarm_queue.add_event_source(
            lambda_event_sources.SnsEventSource(sqs_ok_topic))
        sqs_alarm_queue.grant(clear_alarm_queue, "sqs:DeleteMessage")

        alarm = cloudwatch.Alarm(
            self,
            "FargateTaskTrigger",
            metric=queue.metric_approximate_number_of_messages_visible(
                period=core.Duration.seconds(60), statistic="max"),
            evaluation_periods=1,
            threshold=1,
            alarm_description="Run a fargate task when there "
            "are messages in the queue",
            treat_missing_data=cloudwatch.TreatMissingData.IGNORE)
        alarm.add_alarm_action(cloudwatch_actions.SnsAction(sqs_alarm_topic))
        alarm.add_ok_action(cloudwatch_actions.SnsAction(sqs_ok_topic))