示例#1
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create a cluster
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        # Create Fargate Service
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "sample-app",
            cluster=cluster,
            task_image_options={
                'image':
                ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            })

        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=ec2.Port.tcp(80),
                                description="Allow http inbound from VPC")

        # Setup AutoScaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=50,
            scale_in_cooldown=Duration.seconds(60),
            scale_out_cooldown=Duration.seconds(60),
        )

        CfnOutput(self,
                  "LoadBalancerDNS",
                  value=fargate_service.load_balancer.load_balancer_dns_name)
示例#2
0
    def _create_mlflow_server(self):
        """
        Create a Farget task for MLflow server
        """
        cluster = ecs.Cluster(scope=self, id="CLUSTER", cluster_name=self.cluster_name, vpc=self.vpc)

        task_id = f"{self.stack_name}-{self.component_id}-MLflow"
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id=task_id,
            task_role=self.role,
        )

        container_id = f"{self.stack_name}-{self.component_id}-container"
        container = task_definition.add_container(
            id=container_id,
            image=ecs.ContainerImage.from_asset(
                directory="cdk_ml_cicd_pipeline/resources/visualization/mlflow/container",
            ),
            environment={
                "BUCKET": f"s3://{self.artifact_bucket.bucket_name}",
                "HOST": self.database.db_instance_endpoint_address,
                "PORT": str(self.port),
                "DATABASE": self.dbname,
                "USERNAME": self.username,
            },
            secrets={"PASSWORD": ecs.Secret.from_secrets_manager(self.db_password_secret)},
            logging=ecs.LogDriver.aws_logs(stream_prefix='mlflow')
        )
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service_id = f"{self.stack_name}-{self.component_id}-" + "mlflow-fargate"
        self.fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id=fargate_service_id,
            service_name=self.service_name,
            cluster=cluster,
            task_definition=task_definition,
        )

        # Setup security group
        self.fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4(self.vpc.vpc_cidr_block),
            connection=ec2.Port.tcp(5000),
            description="Allow inbound from VPC for mlflow",
        )

        # Setup autoscaling policy
        autoscaling_policy_id = f"{self.stack_name}-{self.component_id}-" + "autoscaling-policy"
        scaling = self.fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id=autoscaling_policy_id,
            target_utilization_percent=70,
            scale_in_cooldown=Duration.seconds(60),
            scale_out_cooldown=Duration.seconds(60),
        )
    def create_lambda(self, function_name: str,
                      custom_role: iam_.Role) -> lambda_.Function:
        if custom_role is None:
            custom_role: iam_.Role = self.create_default_role(function_name)

        return lambda_.Function(
            self,
            f"{self.id}-{function_name}",
            code=lambda_.Code.from_asset(
                lambda_dir,
                exclude=[
                    "*.test.py",
                    "requirements.txt",
                ],
            ),
            current_version_options=lambda_.VersionOptions(
                removal_policy=RemovalPolicy.DESTROY,
                retry_attempts=2,
            ),
            function_name=f"{self.id}-{function_name}",
            handler=f"{function_name}.lambda_handler",
            log_retention=RetentionDays.ONE_DAY,
            role=custom_role,
            runtime=lambda_.Runtime.PYTHON_3_8,
            timeout=Duration.seconds(900),
            tracing=lambda_.Tracing.DISABLED,
        )
示例#4
0
def create_hosted_zone(scope: InfraStack) -> route53.HostedZone:
    domain = scope.context.domain_name
    hosted_zone = route53.HostedZone(
        scope,
        id=domain,
        vpcs=None,
        comment=None,
        query_logs_log_group_arn=None,
        zone_name=domain,
    )

    route53.MxRecord(
        scope,
        scope.context.construct_id("MX-Gmail-1"),
        values=[
            route53.MxRecordValue(host_name='ASPMX.L.GOOGLE.COM', priority=1),
            route53.MxRecordValue(host_name='ALT1.ASPMX.L.GOOGLE.COM',
                                  priority=5),
            route53.MxRecordValue(host_name='ALT2.ASPMX.L.GOOGLE.COM',
                                  priority=5),
            route53.MxRecordValue(host_name='ALT3.ASPMX.L.GOOGLE.COM',
                                  priority=10),
            route53.MxRecordValue(host_name='ALT4.ASPMX.L.GOOGLE.COM',
                                  priority=10),
        ],
        zone=hosted_zone,
        ttl=Duration.seconds(3600),
    )

    return hosted_zone
示例#5
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        lambdaFn = lambda_.Function(
            self, "Singleton",
            code=lambda_.InlineCode(handler_code),
            handler="index.main",
            timeout=Duration.seconds(10),
            runtime=lambda_.Runtime.PYTHON_3_9,
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name = f"/aws/lambda/{lambdaFn.function_name}",
            removal_policy = RemovalPolicy.DESTROY,
            retention = logs.RetentionDays.ONE_DAY
        )

        # EventBridge Rule
        rule = events.Rule(
            self, "Rule",
        )
        rule.add_event_pattern(
            source=["cdk.myApp"],
            detail_type=["transaction"]
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "VsamToDynamoQueue",
                           visibility_timeout=Duration.seconds(300),
                           queue_name='VsamToDynamoQueue')

        dynamoTable = _dyn.Table(
            self,
            "CLIENT",
            partition_key=_dyn.Attribute(name="CLIENT-KEY",
                                         type=_dyn.AttributeType.STRING),
            table_name="CLIENT",
        )

        # Create the Lambda function to subscribe to SQS and store the record in DynamoDB
        # The source code is in './src' directory
        lambda_fn = _lambda.Function(
            self,
            "SQSToDynamoFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="insertRecord.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
        )

        dynamoTable.grant_write_data(lambda_fn)

        queue.grant_consume_messages(lambda_fn)
        lambda_fn.add_event_source(_event.SqsEventSource(queue))
示例#7
0
    def create_lambda_build_image(self) -> Resource:
        """Greengrassのコンポーネント用に推論アプリのdockerイメージをビルドするcodebuildを実行するLambda

        Returns:
            Resource: lambda
        """

        lambdaFn_name = self.get_lambda_name("build_image")
        role_name = self.get_role_name("build_image")

        lambda_role = aws_iam.Role(
            self,
            id=role_name,
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name=role_name,
            path="/service-role/",
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AWSCodeBuildDeveloperAccess")
            ])
        lambda_role.attach_inline_policy(
            aws_iam.Policy(
                self,
                "AllowDynamoDBAccess",
                document=aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(actions=[
                        "dynamodb:PutItem", "dynamodb:GetItem",
                        "dynamodb:UpdateItem"
                    ],
                                            resources=[self._table.table_arn])
                ])))
        lambdaFn_path = self.get_lambda_path("build_image")

        lambdaFn = aws_lambda.Function(
            self,
            id=lambdaFn_name,
            function_name=lambdaFn_name,
            code=aws_lambda.AssetCode(path=lambdaFn_path),
            handler="lambda_handler.handler",
            timeout=Duration.seconds(10),
            runtime=aws_lambda.Runtime.PYTHON_3_9,
            description="コンポーネント用のイメージを作成",
            role=lambda_role,
            environment={
                "TABLE_NAME":
                self._table.table_name,
                "CODEBUILD_PROJECT_NAME":
                self._docker_image_buildproject.project_name,
                "COMPONENT_IMAGE_REPOSITORY":
                self._component_ecr.repository_name,
                "COMPONENT_APP_SOURCE_REPOSITORY":
                self._component_source_repository.repository_clone_url_grc,
                "COMPONENT_BASE_IMAGE_REPOSITORY":
                self._component_base_ecr.repository_uri
            })
        self._table.grant_read_write_data(lambdaFn)

        return lambdaFn
示例#8
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        lambdaFn = _lambda.Function(self,
                                    "SNSEventHandler",
                                    runtime=_lambda.Runtime.PYTHON_3_9,
                                    code=_lambda.Code.from_asset("lambda"),
                                    handler="handler.main",
                                    timeout=Duration.seconds(10))

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(self,
                      'logs',
                      log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
                      removal_policy=RemovalPolicy.DESTROY,
                      retention=logs.RetentionDays.ONE_DAY)

        # SNS topic
        topic = sns.Topic(self,
                          'sns-to-lambda-topic-test',
                          display_name='My SNS topic')

        # subscribe Lambda to SNS topic
        topic.add_subscription(subs.LambdaSubscription(lambdaFn))

        # Output information about the created resources
        CfnOutput(self,
                  'snsTopicArn',
                  value=topic.topic_arn,
                  description='The arn of the SNS topic')
        CfnOutput(self,
                  'functionName',
                  value=lambdaFn.function_name,
                  description='The name of the handler function')
示例#9
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        lambdaFn = lambda_.Function(
            self, "Singleton",
            code=lambda_.InlineCode(handler_code),
            handler="index.main",
            timeout=Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
        )

        # Run every day at 6PM UTC
        # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
        rule = events.Rule(
            self, "Rule",
            schedule=events.Schedule.cron(
                minute='0',
                hour='18',
                month='*',
                week_day='MON-FRI',
                year='*'),
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))
示例#10
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        # Creates reference to already existing kinesis stream
        kinesis_stream = kinesis.Stream.from_stream_arn(
            self, 'KinesisStream',
            Arn.format(
                ArnComponents(resource='stream',
                              service='kinesis',
                              resource_name='my-stream'), self))

        lambdaFn = lambda_.Function(self,
                                    'Singleton',
                                    handler='index.main',
                                    code=lambda_.InlineCode(handler_code),
                                    runtime=lambda_.Runtime.PYTHON_3_7,
                                    timeout=Duration.seconds(300))

        # Update Lambda Permissions To Use Stream
        kinesis_stream.grant_read(lambdaFn)

        # Create New Kinesis Event Source
        kinesis_event_source = event_sources.KinesisEventSource(
            stream=kinesis_stream,
            starting_position=lambda_.StartingPosition.LATEST,
            batch_size=1)

        # Attach New Event Source To Lambda
        lambdaFn.add_event_source(kinesis_event_source)
def create_target_group(self, vpc, tg_name):
    tg = _elbv2.ApplicationTargetGroup(
        self, tg_name,
        port=80,
        target_type=_elbv2.TargetType.IP,
        target_group_name=tg_name,
        vpc=vpc,
        health_check=_elbv2.HealthCheck(path='/login'),
    )
    tg.enable_cookie_stickiness(Duration.seconds(1800))
    return tg
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "MyQueue",
                           visibility_timeout=Duration.seconds(300))

        # Create the AWS Lambda function to subscribe to Amazon SQS queue
        # The source code is in './lambda' directory
        lambda_function = _lambda.Function(
            self,
            "MyLambdaFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="get_messages.handler",
            code=_lambda.Code.from_asset("lambda"),
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name=f"/aws/lambda/{lambda_function.function_name}",
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.ONE_DAY)
        #Grant permission to AWS Lambda function to consume messages from the Amazon SQS queue
        queue.grant_consume_messages(lambda_function)

        #Configure the Amazon SQS queue to trigger the AWS Lambda function
        lambda_function.add_event_source(_event.SqsEventSource(queue))

        CfnOutput(self,
                  "FunctionName",
                  value=lambda_function.function_name,
                  export_name='FunctionName',
                  description='Function name')

        CfnOutput(self,
                  "QueueName",
                  value=queue.queue_name,
                  export_name='QueueName',
                  description='SQS queue name')

        CfnOutput(self,
                  "QueueArn",
                  value=queue.queue_arn,
                  export_name='QueueArn',
                  description='SQS queue ARN')

        CfnOutput(self,
                  "QueueUrl",
                  value=queue.queue_url,
                  export_name='QueueUrl',
                  description='SQS queue URL')
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(
            self, "%name.PascalCased%Queue",
            visibility_timeout=Duration.seconds(300),
        )

        topic = sns.Topic(
            self, "%name.PascalCased%Topic"
        )

        topic.add_subscription(subs.SqsSubscription(queue))
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(
            self, "SqsFargateCdkPythonQueue",
            visibility_timeout=Duration.seconds(300)
        )

        nat_provider = ec2.NatProvider.instance(
            instance_type=ec2.InstanceType("t3.small")
        )

        vpc = ec2.Vpc(self, "SqsFargateCdkPythonVpc", nat_gateway_provider=nat_provider, nat_gateways=1)

        cluster = ecs.Cluster(self, "SqsFargateCdkPythonCluster", vpc=vpc)

        role = iam.Role(self, "SqsFargateCdkPythonRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        queue.grant_consume_messages(role)

        fargate_task_definition = ecs.FargateTaskDefinition(self, "SqsFargateCdkPythonFargateTaskDefinition",
                                                            memory_limit_mib=512, cpu=256,
                                                            task_role=role)

        aws_log_drive = ecs.AwsLogDriver(stream_prefix="sqs_fargate_cdk_python")

        fargate_task_definition.add_container("SqsFargateCdkPythonContainer",
                                              image=ecs.ContainerImage.from_asset("./docker"),
                                              environment={"QUEUE_URL": queue.queue_url}, logging=aws_log_drive)

        fargate_service = ecs.FargateService(self, "SqsFargateCdkPythonFargateService", cluster=cluster,
                                             task_definition=fargate_task_definition, desired_count=0)

        auto_scale_task_count = fargate_service.auto_scale_task_count(min_capacity=0, max_capacity=1)
        auto_scale_task_count.scale_on_metric("SqsFargateCdkPythonScaleOnMetric",
                                              metric=queue.metric_approximate_number_of_messages_visible(),
                                              adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
                                              cooldown=Duration.seconds(300),
                                              scaling_steps=[{"upper": 0, "change": -1}, {"lower": 1, "change": +1}])
示例#15
0
 def create_standard_lambda(self, scope, name: str, layers: list = [], env: dict = {}, duration_seconds: int = 20):
     id = camel_case_upper(f"{self.namespace}_{name}_Lambda")
     function_name = f"{self.namespace}_{name}"
     # TODO: switch to aws_cdk.aws_lambda_python -> PythonFunction
     return lmbda.Function(scope=scope,
                           id=id,
                           function_name=function_name,
                           handler=f"{name}.handler",
                           runtime=lmbda.Runtime.PYTHON_3_8,
                           code=lmbda.Code.from_asset(f"lambdas/functions/{name}"),
                           environment=env,
                           timeout=Duration.seconds(duration_seconds),
                           layers=layers)
示例#16
0
    def create_lambda_check_image_status(self) -> Resource:
        """dockerイメージのビルド状況を確認するLambda

        Returns:
            Resource: lambda
        """

        lambdaFn_name = self.get_lambda_name("check_image_status")
        role_name = self.get_role_name("check_image_status")

        lambda_role = aws_iam.Role(
            self,
            id=role_name,
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name=role_name,
            path="/service-role/",
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ])
        lambda_role.attach_inline_policy(
            aws_iam.Policy(
                self,
                "AllowCodeBuildStatus",
                document=aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        actions=["codebuild:BatchGetBuilds"],
                        resources=[
                            self._docker_image_buildproject.project_arn
                        ]),
                    aws_iam.PolicyStatement(actions=[
                        "dynamodb:PutItem", "dynamodb:GetItem",
                        "dynamodb:UpdateItem"
                    ],
                                            resources=[self._table.table_arn])
                ])))

        lambdaFn_path = self.get_lambda_path("check_image_status")
        lambdaFn = aws_lambda.Function(
            self,
            id=lambdaFn_name,
            function_name=lambdaFn_name,
            code=aws_lambda.AssetCode(path=lambdaFn_path),
            handler="lambda_handler.handler",
            timeout=Duration.seconds(10),
            runtime=aws_lambda.Runtime.PYTHON_3_9,
            description="コンポーネント用のイメージのビルド結果を確認",
            role=lambda_role,
            environment={"TABLE_NAME": self._table.table_name})

        return lambdaFn
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        phoneNumber = CfnParameter(self,
                                   "phoneNumber",
                                   type="String",
                                   description="Recipient phone number")
        tenDLC = CfnParameter(self,
                              "tenDLC",
                              type="String",
                              description="10DLC origination number")

        # We create a log group so it will be gracefully cleaned up on a destroy event.  By default
        # logs never expire and won't be removed.
        lambdaLogGroup = logs.LogGroup(
            self,
            'SMSPublisherFunctionLogGroup',
            log_group_name='/aws/lambda/SMSPublisherFunction',
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.FIVE_DAYS,
        )

        SMSPublisherFunction = aws_lambda.Function(
            self,
            'SMSPublisherFunction',
            code=aws_lambda.Code.from_asset('src'),
            function_name='SMSPublisherFunction',
            handler='app.handler',
            runtime=aws_lambda.Runtime.NODEJS_12_X,
            timeout=Duration.seconds(3),
            memory_size=128,
            environment={
                'phoneNumber': phoneNumber.value_as_string,
                'tenDLC': tenDLC.value_as_string
            },
            initial_policy=[
                iam.PolicyStatement(actions=['sns:Publish'],
                                    effect=iam.Effect.DENY,
                                    resources=['arn:aws:sns:*:*:*']),
                iam.PolicyStatement(actions=['sns:Publish'],
                                    effect=iam.Effect.ALLOW,
                                    resources=['*'])
            ],
        )
        # Make sure the log group is created prior to the function so CDK doesn't create a new one
        SMSPublisherFunction.node.add_dependency(lambdaLogGroup)

        CfnOutput(self,
                  'SMSPublisherFunctionName',
                  description='SMSPublisherFunction function name',
                  value=SMSPublisherFunction.function_name)
示例#18
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        lambda_code_bucket = s3.Bucket.from_bucket_attributes(
            self, 'LambdaCodeBucket', bucket_name='my-lambda-code-bucket')

        lambdaFn = lambda_.Function(self,
                                    'Singleton',
                                    handler='index.main',
                                    code=lambda_.S3Code(
                                        bucket=lambda_code_bucket,
                                        key='my-lambda.py'),
                                    runtime=lambda_.Runtime.PYTHON_3_7,
                                    timeout=Duration.seconds(300))
示例#19
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        lambdaFn = _lambda.Function(
            self,
            "IoTTriggerLambda",
            code=_lambda.InlineCode(handler_code),
            handler="index.main",
            timeout=Duration.seconds(10),
            runtime=_lambda.Runtime.PYTHON_3_9,
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(self,
                      'logs',
                      log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
                      removal_policy=RemovalPolicy.DESTROY,
                      retention=logs.RetentionDays.ONE_DAY)

        # IoT Thing
        iot_thing = iot.CfnThing(self, "IoTThing", thing_name="MyIotThing")

        # IoT Rule with SQL, which invokes a Lambda Function
        iot_topic_rule_sql = 'SELECT * FROM "$aws/things/MyIotThing/*"'
        iot_topic_rule = iot.CfnTopicRule(
            self,
            "IoTRule",
            topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty(
                sql=iot_topic_rule_sql,
                actions=[
                    iot.CfnTopicRule.ActionProperty(
                        lambda_=iot.CfnTopicRule.LambdaActionProperty(
                            function_arn=lambdaFn.function_arn))
                ]))

        # Lambda Resource Policy allows invocation from IoT Rule
        lambdaFn.add_permission(
            "GrantIoTRule",
            principal=iam.ServicePrincipal("iot.amazonaws.com"),
            source_arn=iot_topic_rule.attr_arn)
示例#20
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self._account_id = os.environ["CDK_DEFAULT_ACCOUNT"]
        self._region = os.environ["CDK_DEFAULT_REGION"]

        self._queue = _sqs.Queue(
            self,
            "ApigwV2SqsLambdaQueue",
            visibility_timeout=Duration.seconds(300),
        )

        self._sqs_event_source = SqsEventSource(self._queue)

        self._fn = _lambda.Function(
            self,
            'SqsMessageHandler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='app.handler',
            code=_lambda.Code.from_asset(path='src'),
            timeout=Duration.minutes(3),
            memory_size=128,
            environment={
                'REGION': self._region,
                'ACCOUNT_ID': self._account_id
            },
        )

        self._fn.add_event_source(self._sqs_event_source)

        self._http_api = self._create_apigw_v2()

        self._integration_role = self._create_apigw_to_sqs_role()

        self._send_msg_route = self._create_sqs_send_msg_route()

        # Enable Auto Deploy
        self._stage = self._create_stage()

        # Outputs
        CfnOutput(self,
                  "API Endpoint",
                  description="API Endpoint",
                  value=self._http_api.attr_api_endpoint)
    def _create_lambda_for_set_experiment_info_env(self) -> Resource:
        """
        Ref: https://github.com/aws-samples/aws-cdk-examples/tree/master/python/lambda-cron
        """
        lambdaFn_id = f"{self.name_prefix}-set-experiment-info-env-lambda_handler"
        entry = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn_set_experiment_info_env/"

        lambdaFn = lambda_python.PythonFunction(
            scope=self,
            id=lambdaFn_id,
            entry=entry,
            index="lambda_handler.py",
            handler="lambda_handler",
            timeout=Duration.seconds(300),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            role=self.lambda_experiment_info_role)

        return lambdaFn
    def _create_lambda(self):
        role = iam.Role(
            self,
            "LambdaPrepareDbRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            description="Role for Lambda preparing RDS",
            role_name=f"{self.name_prefix}-lambda-prepare-db-role",
            managed_policies=[
                #iam.ManagedPolicy.from_aws_managed_policy_name("AWSLambdaBasicExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaVPCAccessExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "SecretsManagerReadWrite"),
            ],
        )

        lambda_function_id = f"{self.name_prefix}-prepare_db_function"
        lambda_function_path = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn/prepare_db_function/"
        lambda_layer_path = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn/lambda_layer/"

        layer = aws_lambda.LayerVersion(
            self, 'Layer', code=aws_lambda.AssetCode(lambda_layer_path))

        lambda_fn = aws_lambda.Function(
            scope=self,
            id=lambda_function_id,
            function_name=lambda_function_id,
            code=aws_lambda.AssetCode(path=lambda_function_path),
            handler="lambda_handler.lambda_handler",
            layers=[layer],
            timeout=Duration.seconds(300),
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            role=role,
            description="write some description for this lambda",
            security_groups=[self.security_group],
            vpc=self.vpc,
            vpc_subnets=self.subnet_selection)

        lambda_fn.add_environment('SECRETS_NAME', self.rds.secret.secret_arn)
        lambda_fn.add_environment('REGION_NAME', self.region)
示例#23
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create SQS Queue
        queue = _sqs.Queue(self,
                           "LambdaToSqsQueue",
                           visibility_timeout=Duration.seconds(300),
                           queue_name='LambdaToSqsQueue')

        # Create Lambda function
        lambda_fn = _lambda.Function(
            self,
            "LambdaFunctionToSqs",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="sendSqsMessage.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
        )

        # Grant send message to lambda function
        queue.grant_send_messages(lambda_fn)
    def _create_lambda_for_post_process(self) -> Resource:
        """
        Ref: https://github.com/aws-samples/aws-cdk-examples/tree/master/python/lambda-cron
        """
        lambdaFn_id = f"{self.name_prefix}-post-process-lambda_handler"
        entry = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn_post_process/"
        slack_hook_url = self.context["slack_hook_url"]

        lambdaFn = lambda_python.PythonFunction(
            scope=self,
            id=lambdaFn_id,
            entry=entry,
            index="lambda_handler.py",
            handler="lambda_handler",
            timeout=Duration.seconds(300),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            environment={"slack_hook_url": slack_hook_url})

        return lambdaFn
    def _create_lambda_for_manual_approval(self) -> Resource:
        """
        Ref: https://github.com/aws-samples/aws-cdk-examples/tree/master/python/lambda-cron
        """
        lambdaFn_id = f"{self.name_prefix}-manual-approval-lambda_handler"
        entry = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn_manual_approve/"
        topic = self.sns_topic
        slack_hook_url = self.context["slack_hook_url"]

        lambdaFn = lambda_python.PythonFunction(
            scope=self,
            id=lambdaFn_id,
            entry=entry,
            index="lambda_handler.py",
            handler="lambda_handler",
            timeout=Duration.seconds(300),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            environment={"slack_hook_url": slack_hook_url})

        sns_event_source = lambda_event_sources.SnsEventSource(topic)
        lambdaFn.add_event_source(sns_event_source)

        return lambdaFn
示例#26
0
    def __init__(self, scope: Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        rg_property = network_fw.CfnRuleGroup.RuleGroupProperty(
            rule_variables=None,
            rules_source=network_fw.CfnRuleGroup.RulesSourceProperty(
                stateless_rules_and_custom_actions=network_fw.CfnRuleGroup.
                StatelessRulesAndCustomActionsProperty(stateless_rules=[
                    network_fw.CfnRuleGroup.StatelessRuleProperty(
                        priority=10,
                        rule_definition=network_fw.CfnRuleGroup.
                        RuleDefinitionProperty(
                            actions=["aws:drop"],
                            match_attributes=network_fw.CfnRuleGroup.
                            MatchAttributesProperty(destinations=[
                                network_fw.CfnRuleGroup.AddressProperty(
                                    address_definition="127.0.0.1/32")
                            ])))
                ])))

        nf_rule_group = network_fw.CfnRuleGroup(
            scope=self,
            id='GuardDutyNetworkFireWallRuleGroup',
            capacity=100,
            rule_group_name='guardduty-network-firewall',
            type='STATELESS',
            description='Guard Duty network firewall rule group',
            tags=[CfnTag(key='Name', value='cfn.rule-group.stack')],
            rule_group=rg_property)
        """ https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-rule-dlq.html#dlq-considerations """
        dlq_statemachine = sqs.Queue(self,
                                     'DLQStateMachine',
                                     queue_name='dlq_state_machine')

        guardduty_firewall_ddb = ddb.Table(
            scope=self,
            id=f'GuarddutyFirewallDDB',
            table_name='GuardDutyFirewallDDBTable',
            removal_policy=RemovalPolicy.DESTROY,
            partition_key=ddb.Attribute(name='HostIp',
                                        type=ddb.AttributeType.STRING),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST)
        """ IAM role for ddb permission """
        nf_iam_role = iam.Role(
            self,
            'DDBRole',
            role_name=f'ddb-nf-role-{env.region}',
            assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["arn:aws:logs:*:*:*"],
                                actions=[
                                    "logs:CreateLogGroup",
                                    "logs:CreateLogStream", "logs:PutLogEvents"
                                ]))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    guardduty_firewall_ddb.table_arn,
                                    f"{guardduty_firewall_ddb.table_arn}/*"
                                ],
                                actions=[
                                    "dynamodb:PutItem", "dynamodb:GetItem",
                                    "dynamodb:Scan"
                                ]))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[nf_rule_group.ref, f"{nf_rule_group.ref}/*"],
                actions=[
                    "network-firewall:DescribeRuleGroup",
                    "network-firewall:UpdateRuleGroup"
                ]))

        record_ip_in_db = _lambda.Function(
            self,
            'RecordIpInDB',
            function_name='record-ip-in-ddb',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='addIPToDDB.handler',
            environment=dict(ACLMETATABLE=guardduty_firewall_ddb.table_name),
            role=nf_iam_role)
        """
        https://docs.amazonaws.cn/en_us/eventbridge/latest/userguide/eb-event-patterns-content-based-filtering.html
        """
        record_ip_task = step_fn_task.LambdaInvoke(
            self,
            'RecordIpDDBTask',
            lambda_function=record_ip_in_db,
            payload=step_fn.TaskInput.from_object({
                "comment":
                "Relevant fields from the GuardDuty / Security Hub finding",
                "HostIp.$":
                "$.detail.findings[0].ProductFields.aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4",
                "Timestamp.$":
                "$.detail.findings[0].ProductFields.aws/guardduty/service/eventLastSeen",
                "FindingId.$": "$.id",
                "AccountId.$": "$.account",
                "Region.$": "$.region"
            }),
            result_path='$',
            payload_response_only=True)

        firewall_update_rule = _lambda.Function(
            scope=self,
            id='GuardDutyUpdateNetworkFirewallRule',
            function_name='gurdduty-update-networkfirewal-rule-group',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='updateNetworkFireWall.handler',
            environment=dict(
                FIREWALLRULEGROUP=nf_rule_group.ref,
                RULEGROUPPRI='30000',
                CUSTOMACTIONNAME='GuardDutytoFirewall',
                CUSTOMACTIONVALUE='gurdduty-update-networkfirewal-rule-group'),
            role=nf_iam_role)

        firewall_update_rule_task = step_fn_task.LambdaInvoke(
            self,
            'FirewallUpdateRuleTask',
            lambda_function=firewall_update_rule,
            input_path='$',
            result_path='$',
            payload_response_only=True)

        firewall_no_update_job = step_fn.Pass(self, 'No Firewall change')
        notify_failure_job = step_fn.Fail(self,
                                          'NotifyFailureJob',
                                          cause='Any Failure',
                                          error='Unknown')

        send_to_slack = _lambda.Function(
            scope=self,
            id='SendAlertToSlack',
            function_name='gurdduty-networkfirewal-to-slack',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="sendSMSToSlack.handler",
            code=_lambda.Code.from_asset('lambda_fns'))

        send_slack_task = step_fn_task.LambdaInvoke(
            scope=self,
            id='LambdaToSlackDemo',
            lambda_function=send_to_slack,
            input_path='$',
            result_path='$')

        is_new_ip = step_fn.Choice(self, "New IP?")
        is_block_succeed = step_fn.Choice(self, "Block sucessfully?")

        definition = step_fn.Chain \
            .start(record_ip_task
                   .add_retry(errors=["States.TaskFailed"],
                              interval=Duration.seconds(2),
                              max_attempts=2)
                   .add_catch(errors=["States.ALL"], handler=notify_failure_job)) \
            .next(is_new_ip
                  .when(step_fn.Condition.boolean_equals('$.NewIP', True),
                        firewall_update_rule_task
                            .add_retry(errors=["States.TaskFailed"],
                                       interval=Duration.seconds(2),
                                       max_attempts=2
                                       )
                            .add_catch(errors=["States.ALL"], handler=notify_failure_job)
                            .next(
                                is_block_succeed
                                    .when(step_fn.Condition.boolean_equals('$.Result', False), notify_failure_job)
                                    .otherwise(send_slack_task)
                            )
                        )
                  .otherwise(firewall_no_update_job)
                  )

        guardduty_state_machine = step_fn.StateMachine(
            self,
            'GuarddutyStateMachine',
            definition=definition,
            timeout=Duration.minutes(5),
            state_machine_name='guardduty-state-machine')

        event.Rule(
            scope=self,
            id='EventBridgeCatchIPv4',
            description="Security Hub - GuardDuty findings with remote IP",
            rule_name='guardduty-catch-ipv4',
            event_pattern=event.EventPattern(
                account=['123456789012'],
                detail_type=["GuardDuty Finding"],
                source=['aws.securityhub'],
                detail={
                    "findings": {
                        "ProductFields": {
                            "aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4":
                            [{
                                "exists": True
                            }]
                        }
                    }
                }),
            targets=[
                event_target.SfnStateMachine(
                    machine=guardduty_state_machine,
                    dead_letter_queue=dlq_statemachine)
            ])
        """ Send other findings to slack """
        send_finding_to_slack = _lambda.Function(
            self,
            'SendFindingToSlack',
            function_name='send-finding-to-slack',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="sendFindingToSlack.handler",
            code=_lambda.Code.from_asset('lambda_fns'))

        send_findings_task = step_fn_task.LambdaInvoke(
            self,
            'SendFindingToSlackTask',
            lambda_function=send_finding_to_slack,
            payload=step_fn.TaskInput.from_object({
                "comment":
                "Others fields from the GuardDuty / Security Hub finding",
                "severity.$":
                "$.detail.findings[0].Severity.Label",
                "Account_ID.$":
                "$.account",
                "Finding_ID.$":
                "$.id",
                "Finding_Type.$":
                "$.detail.findings[0].Types",
                "Region.$":
                "$.region",
                "Finding_description.$":
                "$.detail.findings[0].Description"
            }),
            result_path='$')

        slack_failure_job = step_fn.Fail(self,
                                         'SlackNotifyFailureJob',
                                         cause='Any Failure',
                                         error='Unknown')

        finding_definition = step_fn.Chain \
            .start(send_findings_task
                   .add_retry(errors=["States.TaskFailed"],
                              interval=Duration.seconds(2),
                              max_attempts=2)
                   .add_catch(errors=["States.ALL"], handler=slack_failure_job))

        sechub_findings_state_machine = step_fn.StateMachine(
            self,
            'SecHubFindingsStateMachine',
            definition=finding_definition,
            timeout=Duration.minutes(5),
            state_machine_name='sechub-finding-state-machine')

        event.Rule(scope=self,
                   id='EventBridgeFindings',
                   description="Security Hub - GuardDuty findings others",
                   rule_name='others-findings',
                   event_pattern=event.EventPattern(
                       account=['123456789012'],
                       source=['aws.securityhub'],
                       detail_type=['Security Hub Findings - Imported'],
                       detail={"severity": [5, 8]}),
                   targets=[
                       event_target.SfnStateMachine(
                           machine=sechub_findings_state_machine,
                           dead_letter_queue=dlq_statemachine)
                   ])
示例#27
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "MyQueue",
                           visibility_timeout=Duration.seconds(300))

        # Create the AWS Lambda function to subscribe to Amazon SQS queue
        # The source code is in './lambda' directory
        lambda_function = _lambda.Function(
            self,
            "MyLambdaFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="submit_job.handler",
            code=_lambda.Code.from_asset("lambda"),
            environment={'QUEUE_URL': queue.queue_url})

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name=f"/aws/lambda/{lambda_function.function_name}",
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.ONE_DAY)

        #Event Bridge rule
        #Change the rate according to your needs
        rule = events.Rule(
            self,
            'Rule',
            description="Trigger Lambda function every 2 minutes",
            schedule=events.Schedule.expression('rate(2 minutes)'))

        rule.add_target(events_target.LambdaFunction(lambda_function))

        #Grant permission to AWS Lambda function to consume messages from the Amazon SQS queue
        queue.grant_consume_messages(lambda_function)

        CfnOutput(self,
                  "FunctionName",
                  value=lambda_function.function_name,
                  export_name='FunctionName',
                  description='Function name')

        CfnOutput(self,
                  "QueueName",
                  value=queue.queue_name,
                  export_name='QueueName',
                  description='SQS queue name')

        CfnOutput(self,
                  "QueueArn",
                  value=queue.queue_arn,
                  export_name='QueueArn',
                  description='SQS queue ARN')

        CfnOutput(self,
                  "QueueUrl",
                  value=queue.queue_url,
                  export_name='QueueUrl',
                  description='SQS queue URL')

        CfnOutput(self,
                  "RuleName",
                  value=rule.rule_name,
                  export_name='RuleName',
                  description='EventBridge rule name')
示例#28
0
port_mapping = ecs.PortMapping(container_port=80,
                               host_port=8080,
                               protocol=ecs.Protocol.TCP)
container.add_port_mappings(port_mapping)

# Create Service
service = ecs.Ec2Service(stack,
                         "Service",
                         cluster=cluster,
                         task_definition=task_definition)

# Create ALB
lb = elbv2.ApplicationLoadBalancer(stack, "LB", vpc=vpc, internet_facing=True)
listener = lb.add_listener("PublicListener", port=80, open=True)

health_check = elbv2.HealthCheck(interval=Duration.seconds(60),
                                 path="/health",
                                 timeout=Duration.seconds(5))

# Attach ALB to ECS Service
listener.add_targets(
    "ECS",
    port=80,
    targets=[service],
    health_check=health_check,
)

CfnOutput(stack, "LoadBalancerDNS", value=lb.load_balancer_dns_name)

app.synth()
    def __init__(self, app: App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # Lambda Handlers Definitions

        submit_lambda = _lambda.Function(
            self,
            'submitLambda',
            handler='lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_9,
            code=_lambda.Code.from_asset('lambdas/submit'))

        status_lambda = _lambda.Function(
            self,
            'statusLambda',
            handler='lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_9,
            code=_lambda.Code.from_asset('lambdas/status'))

        # Step functions Definition

        submit_job = _aws_stepfunctions_tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            output_path="$.Payload",
        )

        wait_job = _aws_stepfunctions.Wait(
            self,
            "Wait 30 Seconds",
            time=_aws_stepfunctions.WaitTime.duration(Duration.seconds(30)))

        status_job = _aws_stepfunctions_tasks.LambdaInvoke(
            self,
            "Get Status",
            lambda_function=status_lambda,
            output_path="$.Payload",
        )

        fail_job = _aws_stepfunctions.Fail(self,
                                           "Fail",
                                           cause='AWS Batch Job Failed',
                                           error='DescribeJob returned FAILED')

        succeed_job = _aws_stepfunctions.Succeed(
            self, "Succeeded", comment='AWS Batch Job succeeded')

        # Create Chain

        definition = submit_job.next(wait_job)\
            .next(status_job)\
            .next(_aws_stepfunctions.Choice(self, 'Job Complete?')
                  .when(_aws_stepfunctions.Condition.string_equals('$.status', 'FAILED'), fail_job)
                  .when(_aws_stepfunctions.Condition.string_equals('$.status', 'SUCCEEDED'), succeed_job)
                  .otherwise(wait_job))

        # Create state machine
        sm = _aws_stepfunctions.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=Duration.minutes(5),
        )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket
        s3_Bucket = s3.Bucket(self,
                              "AWS-Cookbook-Recipe-404",
                              removal_policy=RemovalPolicy.DESTROY,
                              auto_delete_objects=True)

        aws_s3_deployment.BucketDeployment(
            self,
            'S3Deployment',
            destination_bucket=s3_Bucket,
            sources=[aws_s3_deployment.Source.asset("./s3_content")],
            retain_on_delete=False)

        isolated_subnets = ec2.SubnetConfiguration(
            name="ISOLATED",
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
            cidr_mask=24)

        # create VPC
        vpc = ec2.Vpc(self,
                      'AWS-Cookbook-VPC',
                      cidr='10.10.0.0/23',
                      subnet_configuration=[isolated_subnets])

        vpc.add_interface_endpoint(
            'VPCSecretsManagerInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService(
                'secretsmanager'
            ),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
        )

        vpc.add_gateway_endpoint(
            's3GateWayEndPoint',
            service=ec2.GatewayVpcEndpointAwsService('s3'),
            subnets=[
                ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)
            ],
        )

        subnet_group = rds.SubnetGroup(
            self,
            'rds_subnet_group',
            description='VPC Subnet Group for RDS',
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        rds_instance = rds.DatabaseInstance(
            self,
            'DBInstance',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_8_0_23),
            instance_type=ec2.InstanceType("m5.large"),
            vpc=vpc,
            multi_az=False,
            database_name='AWSCookbookRecipe404',
            instance_identifier='awscookbook404db-orig',
            delete_automated_backups=True,
            deletion_protection=False,
            # iam_authentication=
            removal_policy=RemovalPolicy.DESTROY,
            allocated_storage=8,
            subnet_group=subnet_group)

        # mkdir -p lambda-layers/sqlparse/python
        # cd layers/sqlparse/python
        # pip install sqlparse --target="."
        # cd ../../../

        # create Lambda Layer
        sqlparse = aws_lambda.LayerVersion(
            self,
            "sqlparse",
            code=aws_lambda.AssetCode('lambda-layers/sqlparse'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="sqlparse",
            license=
            "https://github.com/andialbrecht/sqlparse/blob/master/LICENSE")

        pymysql = aws_lambda.LayerVersion(
            self,
            "pymysql",
            code=aws_lambda.AssetCode('lambda-layers/pymysql'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="pymysql",
            license="MIT")

        smartopen = aws_lambda.LayerVersion(
            self,
            "smartopen",
            code=aws_lambda.AssetCode('lambda-layers/smart_open'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="smartopen",
            license="MIT")

        lambda_function = aws_lambda.Function(
            self,
            'LambdaRDS',
            code=aws_lambda.AssetCode("./mysql-lambda/"),
            handler="lambda_function.lambda_handler",
            environment={
                "DB_SECRET_ARN": rds_instance.secret.secret_arn,
                "S3_BUCKET": s3_Bucket.bucket_name
            },
            layers=[sqlparse, pymysql, smartopen],
            memory_size=1024,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout=Duration.seconds(600),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        rds_instance.secret.grant_read(lambda_function)

        rds_instance.connections.allow_from(lambda_function.connections,
                                            ec2.Port.tcp(3306), "Ingress")

        s3_Bucket.grant_read(lambda_function)

        create_params = {
            "FunctionName": lambda_function.function_arn,
        }

        on_create = custom_resources.AwsSdkCall(
            action='invoke',
            service='Lambda',
            parameters=create_params,
            physical_resource_id=custom_resources.PhysicalResourceId.of(
                'LambdaRDS'))

        policy_statement = iam.PolicyStatement(
            actions=["lambda:InvokeFunction"],
            effect=iam.Effect.ALLOW,
            resources=[lambda_function.function_arn],
        )

        policy = custom_resources.AwsCustomResourcePolicy.from_statements(
            statements=[policy_statement])

        custom_resources.AwsCustomResource(
            self,
            'CustomResource',
            policy=policy,
            on_create=on_create,
            log_retention=logs.RetentionDays.TWO_WEEKS)

        # outputs

        CfnOutput(self, 'RdsSubnetGroup', value=subnet_group.subnet_group_name)

        CfnOutput(self,
                  'RdsDatabaseId',
                  value=rds_instance.instance_identifier)