Пример #1
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create a cluster
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        # Create Fargate Service
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "sample-app",
            cluster=cluster,
            task_image_options={
                'image':
                ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            })

        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=ec2.Port.tcp(80),
                                description="Allow http inbound from VPC")

        # Setup AutoScaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=50,
            scale_in_cooldown=Duration.seconds(60),
            scale_out_cooldown=Duration.seconds(60),
        )

        CfnOutput(self,
                  "LoadBalancerDNS",
                  value=fargate_service.load_balancer.load_balancer_dns_name)
Пример #2
0
    def _create_mlflow_server(self):
        """
        Create a Farget task for MLflow server
        """
        cluster = ecs.Cluster(scope=self, id="CLUSTER", cluster_name=self.cluster_name, vpc=self.vpc)

        task_id = f"{self.stack_name}-{self.component_id}-MLflow"
        task_definition = ecs.FargateTaskDefinition(
            scope=self,
            id=task_id,
            task_role=self.role,
        )

        container_id = f"{self.stack_name}-{self.component_id}-container"
        container = task_definition.add_container(
            id=container_id,
            image=ecs.ContainerImage.from_asset(
                directory="cdk_ml_cicd_pipeline/resources/visualization/mlflow/container",
            ),
            environment={
                "BUCKET": f"s3://{self.artifact_bucket.bucket_name}",
                "HOST": self.database.db_instance_endpoint_address,
                "PORT": str(self.port),
                "DATABASE": self.dbname,
                "USERNAME": self.username,
            },
            secrets={"PASSWORD": ecs.Secret.from_secrets_manager(self.db_password_secret)},
            logging=ecs.LogDriver.aws_logs(stream_prefix='mlflow')
        )
        port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP)
        container.add_port_mappings(port_mapping)

        fargate_service_id = f"{self.stack_name}-{self.component_id}-" + "mlflow-fargate"
        self.fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            scope=self,
            id=fargate_service_id,
            service_name=self.service_name,
            cluster=cluster,
            task_definition=task_definition,
        )

        # Setup security group
        self.fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer=ec2.Peer.ipv4(self.vpc.vpc_cidr_block),
            connection=ec2.Port.tcp(5000),
            description="Allow inbound from VPC for mlflow",
        )

        # Setup autoscaling policy
        autoscaling_policy_id = f"{self.stack_name}-{self.component_id}-" + "autoscaling-policy"
        scaling = self.fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            id=autoscaling_policy_id,
            target_utilization_percent=70,
            scale_in_cooldown=Duration.seconds(60),
            scale_out_cooldown=Duration.seconds(60),
        )
Пример #3
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        innerSfnPassState = sfn.Pass(self, 'PassState');

        innerSfn = sfn.StateMachine(self, 'InnerStepFunction',
            definition = innerSfnPassState,
            timeout=Duration.minutes(60)
        )

        task1 = tasks.StepFunctionsStartExecution(self, "StepFunction1",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        task2 = tasks.StepFunctionsStartExecution(self, "StepFunction2",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        task3 = tasks.StepFunctionsStartExecution(self, "StepFunction3",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        outer_sfn = sfn.StateMachine(self, "OuterStepFunction",
                definition=task1.next(task2).next(task3),
                timeout=Duration.minutes(60)
        )

        CfnOutput(self, "StepFunctionArn",
            value = outer_sfn.state_machine_arn,
            export_name = 'OuterStepFunctionArn',
            description = 'Outer Step Function arn')
Пример #4
0
 def _setup_mysql_serverless(self) -> None:
     port = 3306
     database = "test"
     schema = "test"
     aurora_mysql = rds.ServerlessCluster(
         self,
         "aws-data-wrangler-aurora-cluster-mysql-serverless",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_mysql(
             version=rds.AuroraMysqlEngineVersion.VER_5_7_12,
         ),
         cluster_identifier="mysql-serverless-cluster-wrangler",
         default_database_name=database,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         scaling=rds.ServerlessScalingOptions(
             auto_pause=Duration.minutes(5),
             min_capacity=rds.AuroraCapacityUnit.ACU_1,
             max_capacity=rds.AuroraCapacityUnit.ACU_1,
         ),
         backup_retention=Duration.days(1),
         vpc=self.vpc,
         vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT),
         subnet_group=self.rds_subnet_group,
         security_groups=[self.db_security_group],
         enable_data_api=True,
     )
     secret = secrets.Secret(
         self,
         "aws-data-wrangler-mysql-serverless-secret",
         secret_name="aws-data-wrangler/mysql-serverless",
         description="MySQL serverless credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "mysql",
                     "host": aurora_mysql.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_mysql.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "MysqlServerlessSecretArn", value=secret.secret_arn)
     CfnOutput(self, "MysqlServerlessClusterArn", value=aurora_mysql.cluster_arn)
     CfnOutput(self, "MysqlServerlessAddress", value=aurora_mysql.cluster_endpoint.hostname)
     CfnOutput(self, "MysqlServerlessPort", value=str(port))
     CfnOutput(self, "MysqlServerlessDatabase", value=database)
     CfnOutput(self, "MysqlServerlessSchema", value=schema)
Пример #5
0
    def create_lambda_build_image(self) -> Resource:
        """Greengrassのコンポーネント用に推論アプリのdockerイメージをビルドするcodebuildを実行するLambda

        Returns:
            Resource: lambda
        """

        lambdaFn_name = self.get_lambda_name("build_image")
        role_name = self.get_role_name("build_image")

        lambda_role = aws_iam.Role(
            self,
            id=role_name,
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name=role_name,
            path="/service-role/",
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AWSCodeBuildDeveloperAccess")
            ])
        lambda_role.attach_inline_policy(
            aws_iam.Policy(
                self,
                "AllowDynamoDBAccess",
                document=aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(actions=[
                        "dynamodb:PutItem", "dynamodb:GetItem",
                        "dynamodb:UpdateItem"
                    ],
                                            resources=[self._table.table_arn])
                ])))
        lambdaFn_path = self.get_lambda_path("build_image")

        lambdaFn = aws_lambda.Function(
            self,
            id=lambdaFn_name,
            function_name=lambdaFn_name,
            code=aws_lambda.AssetCode(path=lambdaFn_path),
            handler="lambda_handler.handler",
            timeout=Duration.seconds(10),
            runtime=aws_lambda.Runtime.PYTHON_3_9,
            description="コンポーネント用のイメージを作成",
            role=lambda_role,
            environment={
                "TABLE_NAME":
                self._table.table_name,
                "CODEBUILD_PROJECT_NAME":
                self._docker_image_buildproject.project_name,
                "COMPONENT_IMAGE_REPOSITORY":
                self._component_ecr.repository_name,
                "COMPONENT_APP_SOURCE_REPOSITORY":
                self._component_source_repository.repository_clone_url_grc,
                "COMPONENT_BASE_IMAGE_REPOSITORY":
                self._component_base_ecr.repository_uri
            })
        self._table.grant_read_write_data(lambdaFn)

        return lambdaFn
Пример #6
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        # Creates reference to already existing kinesis stream
        kinesis_stream = kinesis.Stream.from_stream_arn(
            self, 'KinesisStream',
            Arn.format(
                ArnComponents(resource='stream',
                              service='kinesis',
                              resource_name='my-stream'), self))

        lambdaFn = lambda_.Function(self,
                                    'Singleton',
                                    handler='index.main',
                                    code=lambda_.InlineCode(handler_code),
                                    runtime=lambda_.Runtime.PYTHON_3_7,
                                    timeout=Duration.seconds(300))

        # Update Lambda Permissions To Use Stream
        kinesis_stream.grant_read(lambdaFn)

        # Create New Kinesis Event Source
        kinesis_event_source = event_sources.KinesisEventSource(
            stream=kinesis_stream,
            starting_position=lambda_.StartingPosition.LATEST,
            batch_size=1)

        # Attach New Event Source To Lambda
        lambdaFn.add_event_source(kinesis_event_source)
Пример #7
0
    def __init__(self, scope: Construct, id: str, **kwarg) -> None:
        super().__init__(scope, id, **kwarg)

        # define the table that maps short codes to URLs.
        table = aws_dynamodb.Table(self,
                                   "Table",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=10,
                                   write_capacity=5)

        # define the API gateway request handler. all API requests will go to the same function.
        handler = aws_lambda.Function(
            self,
            "UrlShortenerFunction",
            code=aws_lambda.Code.from_asset("./lambda"),
            handler="handler.main",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_7)

        # pass the table name to the handler through an environment variable and grant
        # the handler read/write permissions on the table.
        handler.add_environment('TABLE_NAME', table.table_name)
        table.grant_read_write_data(handler)

        # define the API endpoint and associate the handler
        api = aws_apigateway.LambdaRestApi(self,
                                           "UrlShortenerApi",
                                           handler=handler)

        # map go.waltersco.co to this api gateway endpoint
        # the domain name is a shared resource that can be accessed through the API in WaltersCoStack
        # NOTE: you can comment this out if you want to bypass the domain name mapping
        self.map_waltersco_subdomain('go', api)
Пример #8
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create the Lambda function to receive the request
        # The source code is in './src' directory
        lambda_fn = lambda_.Function(
            self, "MyFunction",
            runtime=lambda_.Runtime.PYTHON_3_9,
            handler="index.handler",
            code=lambda_.Code.from_asset(os.path.join(DIRNAME, "src")),
            environment={
                "env_var1": "value 1",
                "env_var2": "value 2",
            }
        )

        # Create the HTTP API with CORS
        http_api = _apigw.HttpApi(
            self, "MyHttpApi",
            cors_preflight=_apigw.CorsPreflightOptions(
                allow_methods=[_apigw.CorsHttpMethod.GET],
                allow_origins=["*"],
                max_age=Duration.days(10),
            )
        )

        # Add a route to GET /
        http_api.add_routes(
            path="/",
            methods=[_apigw.HttpMethod.GET],
            integration=_integrations.HttpLambdaIntegration("LambdaProxyIntegration", handler=lambda_fn),
        )

        # Outputs
        CfnOutput(self, "API Endpoint", description="API Endpoint", value=http_api.api_endpoint)
Пример #9
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        lambdaFn = _lambda.Function(self,
                                    "SNSEventHandler",
                                    runtime=_lambda.Runtime.PYTHON_3_9,
                                    code=_lambda.Code.from_asset("lambda"),
                                    handler="handler.main",
                                    timeout=Duration.seconds(10))

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(self,
                      'logs',
                      log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
                      removal_policy=RemovalPolicy.DESTROY,
                      retention=logs.RetentionDays.ONE_DAY)

        # SNS topic
        topic = sns.Topic(self,
                          'sns-to-lambda-topic-test',
                          display_name='My SNS topic')

        # subscribe Lambda to SNS topic
        topic.add_subscription(subs.LambdaSubscription(lambdaFn))

        # Output information about the created resources
        CfnOutput(self,
                  'snsTopicArn',
                  value=topic.topic_arn,
                  description='The arn of the SNS topic')
        CfnOutput(self,
                  'functionName',
                  value=lambdaFn.function_name,
                  description='The name of the handler function')
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "VsamToDynamoQueue",
                           visibility_timeout=Duration.seconds(300),
                           queue_name='VsamToDynamoQueue')

        dynamoTable = _dyn.Table(
            self,
            "CLIENT",
            partition_key=_dyn.Attribute(name="CLIENT-KEY",
                                         type=_dyn.AttributeType.STRING),
            table_name="CLIENT",
        )

        # Create the Lambda function to subscribe to SQS and store the record in DynamoDB
        # The source code is in './src' directory
        lambda_fn = _lambda.Function(
            self,
            "SQSToDynamoFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="insertRecord.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
        )

        dynamoTable.grant_write_data(lambda_fn)

        queue.grant_consume_messages(lambda_fn)
        lambda_fn.add_event_source(_event.SqsEventSource(queue))
Пример #11
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        lambdaFn = lambda_.Function(
            self, "Singleton",
            code=lambda_.InlineCode(handler_code),
            handler="index.main",
            timeout=Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
        )

        # Run every day at 6PM UTC
        # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
        rule = events.Rule(
            self, "Rule",
            schedule=events.Schedule.cron(
                minute='0',
                hour='18',
                month='*',
                week_day='MON-FRI',
                year='*'),
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))
Пример #12
0
def create_hosted_zone(scope: InfraStack) -> route53.HostedZone:
    domain = scope.context.domain_name
    hosted_zone = route53.HostedZone(
        scope,
        id=domain,
        vpcs=None,
        comment=None,
        query_logs_log_group_arn=None,
        zone_name=domain,
    )

    route53.MxRecord(
        scope,
        scope.context.construct_id("MX-Gmail-1"),
        values=[
            route53.MxRecordValue(host_name='ASPMX.L.GOOGLE.COM', priority=1),
            route53.MxRecordValue(host_name='ALT1.ASPMX.L.GOOGLE.COM',
                                  priority=5),
            route53.MxRecordValue(host_name='ALT2.ASPMX.L.GOOGLE.COM',
                                  priority=5),
            route53.MxRecordValue(host_name='ALT3.ASPMX.L.GOOGLE.COM',
                                  priority=10),
            route53.MxRecordValue(host_name='ALT4.ASPMX.L.GOOGLE.COM',
                                  priority=10),
        ],
        zone=hosted_zone,
        ttl=Duration.seconds(3600),
    )

    return hosted_zone
    def create_lambda(self, function_name: str,
                      custom_role: iam_.Role) -> lambda_.Function:
        if custom_role is None:
            custom_role: iam_.Role = self.create_default_role(function_name)

        return lambda_.Function(
            self,
            f"{self.id}-{function_name}",
            code=lambda_.Code.from_asset(
                lambda_dir,
                exclude=[
                    "*.test.py",
                    "requirements.txt",
                ],
            ),
            current_version_options=lambda_.VersionOptions(
                removal_policy=RemovalPolicy.DESTROY,
                retry_attempts=2,
            ),
            function_name=f"{self.id}-{function_name}",
            handler=f"{function_name}.lambda_handler",
            log_retention=RetentionDays.ONE_DAY,
            role=custom_role,
            runtime=lambda_.Runtime.PYTHON_3_8,
            timeout=Duration.seconds(900),
            tracing=lambda_.Tracing.DISABLED,
        )
Пример #14
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        lambdaFn = lambda_.Function(
            self, "Singleton",
            code=lambda_.InlineCode(handler_code),
            handler="index.main",
            timeout=Duration.seconds(10),
            runtime=lambda_.Runtime.PYTHON_3_9,
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name = f"/aws/lambda/{lambdaFn.function_name}",
            removal_policy = RemovalPolicy.DESTROY,
            retention = logs.RetentionDays.ONE_DAY
        )

        # EventBridge Rule
        rule = events.Rule(
            self, "Rule",
        )
        rule.add_event_pattern(
            source=["cdk.myApp"],
            detail_type=["transaction"]
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))
Пример #15
0
 def cname_record(record_name, hosted_zone):
     _route53.CnameRecord(
         self, 'Route53Cname',
         domain_name=alb_dns,
         record_name=record_name,
         zone=hosted_zone,
         ttl=Duration.minutes(1)
     )
Пример #16
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self._account_id = os.environ["CDK_DEFAULT_ACCOUNT"]
        self._region = os.environ["CDK_DEFAULT_REGION"]

        self._queue = _sqs.Queue(
            self,
            "ApigwV2SqsLambdaQueue",
            visibility_timeout=Duration.seconds(300),
        )

        self._sqs_event_source = SqsEventSource(self._queue)

        self._fn = _lambda.Function(
            self,
            'SqsMessageHandler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='app.handler',
            code=_lambda.Code.from_asset(path='src'),
            timeout=Duration.minutes(3),
            memory_size=128,
            environment={
                'REGION': self._region,
                'ACCOUNT_ID': self._account_id
            },
        )

        self._fn.add_event_source(self._sqs_event_source)

        self._http_api = self._create_apigw_v2()

        self._integration_role = self._create_apigw_to_sqs_role()

        self._send_msg_route = self._create_sqs_send_msg_route()

        # Enable Auto Deploy
        self._stage = self._create_stage()

        # Outputs
        CfnOutput(self,
                  "API Endpoint",
                  description="API Endpoint",
                  value=self._http_api.attr_api_endpoint)
Пример #17
0
def create_target_group(self, vpc, tg_name):
    tg = _elbv2.ApplicationTargetGroup(
        self, tg_name,
        port=80,
        target_type=_elbv2.TargetType.IP,
        target_group_name=tg_name,
        vpc=vpc,
        health_check=_elbv2.HealthCheck(path='/login'),
    )
    tg.enable_cookie_stickiness(Duration.seconds(1800))
    return tg
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "MyQueue",
                           visibility_timeout=Duration.seconds(300))

        # Create the AWS Lambda function to subscribe to Amazon SQS queue
        # The source code is in './lambda' directory
        lambda_function = _lambda.Function(
            self,
            "MyLambdaFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="get_messages.handler",
            code=_lambda.Code.from_asset("lambda"),
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name=f"/aws/lambda/{lambda_function.function_name}",
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.ONE_DAY)
        #Grant permission to AWS Lambda function to consume messages from the Amazon SQS queue
        queue.grant_consume_messages(lambda_function)

        #Configure the Amazon SQS queue to trigger the AWS Lambda function
        lambda_function.add_event_source(_event.SqsEventSource(queue))

        CfnOutput(self,
                  "FunctionName",
                  value=lambda_function.function_name,
                  export_name='FunctionName',
                  description='Function name')

        CfnOutput(self,
                  "QueueName",
                  value=queue.queue_name,
                  export_name='QueueName',
                  description='SQS queue name')

        CfnOutput(self,
                  "QueueArn",
                  value=queue.queue_arn,
                  export_name='QueueArn',
                  description='SQS queue ARN')

        CfnOutput(self,
                  "QueueUrl",
                  value=queue.queue_url,
                  export_name='QueueUrl',
                  description='SQS queue URL')
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(
            self, "SqsFargateCdkPythonQueue",
            visibility_timeout=Duration.seconds(300)
        )

        nat_provider = ec2.NatProvider.instance(
            instance_type=ec2.InstanceType("t3.small")
        )

        vpc = ec2.Vpc(self, "SqsFargateCdkPythonVpc", nat_gateway_provider=nat_provider, nat_gateways=1)

        cluster = ecs.Cluster(self, "SqsFargateCdkPythonCluster", vpc=vpc)

        role = iam.Role(self, "SqsFargateCdkPythonRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        queue.grant_consume_messages(role)

        fargate_task_definition = ecs.FargateTaskDefinition(self, "SqsFargateCdkPythonFargateTaskDefinition",
                                                            memory_limit_mib=512, cpu=256,
                                                            task_role=role)

        aws_log_drive = ecs.AwsLogDriver(stream_prefix="sqs_fargate_cdk_python")

        fargate_task_definition.add_container("SqsFargateCdkPythonContainer",
                                              image=ecs.ContainerImage.from_asset("./docker"),
                                              environment={"QUEUE_URL": queue.queue_url}, logging=aws_log_drive)

        fargate_service = ecs.FargateService(self, "SqsFargateCdkPythonFargateService", cluster=cluster,
                                             task_definition=fargate_task_definition, desired_count=0)

        auto_scale_task_count = fargate_service.auto_scale_task_count(min_capacity=0, max_capacity=1)
        auto_scale_task_count.scale_on_metric("SqsFargateCdkPythonScaleOnMetric",
                                              metric=queue.metric_approximate_number_of_messages_visible(),
                                              adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
                                              cooldown=Duration.seconds(300),
                                              scaling_steps=[{"upper": 0, "change": -1}, {"lower": 1, "change": +1}])
Пример #20
0
 def create_standard_lambda(self, scope, name: str, layers: list = [], env: dict = {}, duration_seconds: int = 20):
     id = camel_case_upper(f"{self.namespace}_{name}_Lambda")
     function_name = f"{self.namespace}_{name}"
     # TODO: switch to aws_cdk.aws_lambda_python -> PythonFunction
     return lmbda.Function(scope=scope,
                           id=id,
                           function_name=function_name,
                           handler=f"{name}.handler",
                           runtime=lmbda.Runtime.PYTHON_3_8,
                           code=lmbda.Code.from_asset(f"lambdas/functions/{name}"),
                           environment=env,
                           timeout=Duration.seconds(duration_seconds),
                           layers=layers)
Пример #21
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        function = aws_lambda.Function(
            self,
            "SmileFunction",
            function_name="SmileFunction",
            code=aws_lambda.Code.from_asset("package"),
            handler="handler.main",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            tracing=aws_lambda.Tracing.ACTIVE,
        )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(
            self, "%name.PascalCased%Queue",
            visibility_timeout=Duration.seconds(300),
        )

        topic = sns.Topic(
            self, "%name.PascalCased%Topic"
        )

        topic.add_subscription(subs.SqsSubscription(queue))
Пример #23
0
    def create_lambda_check_image_status(self) -> Resource:
        """dockerイメージのビルド状況を確認するLambda

        Returns:
            Resource: lambda
        """

        lambdaFn_name = self.get_lambda_name("check_image_status")
        role_name = self.get_role_name("check_image_status")

        lambda_role = aws_iam.Role(
            self,
            id=role_name,
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name=role_name,
            path="/service-role/",
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ])
        lambda_role.attach_inline_policy(
            aws_iam.Policy(
                self,
                "AllowCodeBuildStatus",
                document=aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        actions=["codebuild:BatchGetBuilds"],
                        resources=[
                            self._docker_image_buildproject.project_arn
                        ]),
                    aws_iam.PolicyStatement(actions=[
                        "dynamodb:PutItem", "dynamodb:GetItem",
                        "dynamodb:UpdateItem"
                    ],
                                            resources=[self._table.table_arn])
                ])))

        lambdaFn_path = self.get_lambda_path("check_image_status")
        lambdaFn = aws_lambda.Function(
            self,
            id=lambdaFn_name,
            function_name=lambdaFn_name,
            code=aws_lambda.AssetCode(path=lambdaFn_path),
            handler="lambda_handler.handler",
            timeout=Duration.seconds(10),
            runtime=aws_lambda.Runtime.PYTHON_3_9,
            description="コンポーネント用のイメージのビルド結果を確認",
            role=lambda_role,
            environment={"TABLE_NAME": self._table.table_name})

        return lambdaFn
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        phoneNumber = CfnParameter(self,
                                   "phoneNumber",
                                   type="String",
                                   description="Recipient phone number")
        tenDLC = CfnParameter(self,
                              "tenDLC",
                              type="String",
                              description="10DLC origination number")

        # We create a log group so it will be gracefully cleaned up on a destroy event.  By default
        # logs never expire and won't be removed.
        lambdaLogGroup = logs.LogGroup(
            self,
            'SMSPublisherFunctionLogGroup',
            log_group_name='/aws/lambda/SMSPublisherFunction',
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.FIVE_DAYS,
        )

        SMSPublisherFunction = aws_lambda.Function(
            self,
            'SMSPublisherFunction',
            code=aws_lambda.Code.from_asset('src'),
            function_name='SMSPublisherFunction',
            handler='app.handler',
            runtime=aws_lambda.Runtime.NODEJS_12_X,
            timeout=Duration.seconds(3),
            memory_size=128,
            environment={
                'phoneNumber': phoneNumber.value_as_string,
                'tenDLC': tenDLC.value_as_string
            },
            initial_policy=[
                iam.PolicyStatement(actions=['sns:Publish'],
                                    effect=iam.Effect.DENY,
                                    resources=['arn:aws:sns:*:*:*']),
                iam.PolicyStatement(actions=['sns:Publish'],
                                    effect=iam.Effect.ALLOW,
                                    resources=['*'])
            ],
        )
        # Make sure the log group is created prior to the function so CDK doesn't create a new one
        SMSPublisherFunction.node.add_dependency(lambdaLogGroup)

        CfnOutput(self,
                  'SMSPublisherFunctionName',
                  description='SMSPublisherFunction function name',
                  value=SMSPublisherFunction.function_name)
Пример #25
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        lambda_code_bucket = s3.Bucket.from_bucket_attributes(
            self, 'LambdaCodeBucket', bucket_name='my-lambda-code-bucket')

        lambdaFn = lambda_.Function(self,
                                    'Singleton',
                                    handler='index.main',
                                    code=lambda_.S3Code(
                                        bucket=lambda_code_bucket,
                                        key='my-lambda.py'),
                                    runtime=lambda_.Runtime.PYTHON_3_7,
                                    timeout=Duration.seconds(300))
Пример #26
0
    def __init__(self, scope: Construct, id: str, vpc, asg_security_groups,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Ceate Aurora Cluster with 2 instances with CDK High Level API
        # Secrets Manager auto generate and keep the password, don't put password in cdk code directly
        # db_Aurora_cluster = rds.DatabaseCluster(self, "MyAurora",
        #                                         default_database_name="MyAurora",
        #                                         engine=rds.DatabaseClusterEngine.arora_mysql(
        #                                             version=rds.AuroraMysqlEngineVersion.VER_5_7_12
        #                                         )
        #                                         instance_props=rds.InstanceProps(
        #                                             vpc=vpc,
        #                                             vpc_subnets=ec2.SubnetSelection(subnet_group_name="DB"),
        #                                             instance_type=ec2.InstanceType(instance_type_identifier="t2.small")
        #                                         ),
        #                                         instances=2,
        #                                         parameter_group=rds.ClusterParameterGroup.from_parameter_group_name(
        #                                             self, "para-group-aurora",
        #                                             parameter_group_name="default.aurora-mysql5.7"
        #                                         ),
        #                                         )
        # for asg_sg in asg_security_groups:
        #     db_Aurora_cluster.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access Aurora")

        # Alternatively, create MySQL RDS with CDK High Level API
        db_mysql_easy = rds.DatabaseInstance(
            self,
            "MySQL_DB_easy",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.SMALL),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_group_name="DB"),
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self,
                "para-group-mysql",
                parameter_group_name="default.mysql5.7"))
        for asg_sg in asg_security_groups:
            db_mysql_easy.connections.allow_default_port_from(
                asg_sg, "EC2 Autoscaling Group access MySQL")
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create dynamo table
        demo_table = aws_dynamodb.Table(
            self,
            "demo_table",
            partition_key=aws_dynamodb.Attribute(
                name="id", type=aws_dynamodb.AttributeType.STRING))

        # create producer lambda function
        producer_lambda = aws_lambda.Function(
            self,
            "producer_lambda_function",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.from_asset("./lambda/producer"))

        producer_lambda.add_environment("TABLE_NAME", demo_table.table_name)

        # grant permission to lambda to write to demo table
        demo_table.grant_write_data(producer_lambda)

        # create consumer lambda function
        consumer_lambda = aws_lambda.Function(
            self,
            "consumer_lambda_function",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.from_asset("./lambda/consumer"))

        consumer_lambda.add_environment("TABLE_NAME", demo_table.table_name)

        # grant permission to lambda to read from demo table
        demo_table.grant_read_data(consumer_lambda)

        # create a Cloudwatch Event rule
        one_minute_rule = aws_events.Rule(
            self,
            "one_minute_rule",
            schedule=aws_events.Schedule.rate(Duration.minutes(1)),
        )

        # Add target to Cloudwatch Event
        one_minute_rule.add_target(
            aws_events_targets.LambdaFunction(producer_lambda))
        one_minute_rule.add_target(
            aws_events_targets.LambdaFunction(consumer_lambda))
Пример #28
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        lambdaFn = _lambda.Function(
            self,
            "IoTTriggerLambda",
            code=_lambda.InlineCode(handler_code),
            handler="index.main",
            timeout=Duration.seconds(10),
            runtime=_lambda.Runtime.PYTHON_3_9,
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(self,
                      'logs',
                      log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
                      removal_policy=RemovalPolicy.DESTROY,
                      retention=logs.RetentionDays.ONE_DAY)

        # IoT Thing
        iot_thing = iot.CfnThing(self, "IoTThing", thing_name="MyIotThing")

        # IoT Rule with SQL, which invokes a Lambda Function
        iot_topic_rule_sql = 'SELECT * FROM "$aws/things/MyIotThing/*"'
        iot_topic_rule = iot.CfnTopicRule(
            self,
            "IoTRule",
            topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty(
                sql=iot_topic_rule_sql,
                actions=[
                    iot.CfnTopicRule.ActionProperty(
                        lambda_=iot.CfnTopicRule.LambdaActionProperty(
                            function_arn=lambdaFn.function_arn))
                ]))

        # Lambda Resource Policy allows invocation from IoT Rule
        lambdaFn.add_permission(
            "GrantIoTRule",
            principal=iam.ServicePrincipal("iot.amazonaws.com"),
            source_arn=iot_topic_rule.attr_arn)
Пример #29
0
    def _create_lambda_for_set_experiment_info_env(self) -> Resource:
        """
        Ref: https://github.com/aws-samples/aws-cdk-examples/tree/master/python/lambda-cron
        """
        lambdaFn_id = f"{self.name_prefix}-set-experiment-info-env-lambda_handler"
        entry = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn_set_experiment_info_env/"

        lambdaFn = lambda_python.PythonFunction(
            scope=self,
            id=lambdaFn_id,
            entry=entry,
            index="lambda_handler.py",
            handler="lambda_handler",
            timeout=Duration.seconds(300),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            role=self.lambda_experiment_info_role)

        return lambdaFn
Пример #30
0
    def _create_lambda(self):
        role = iam.Role(
            self,
            "LambdaPrepareDbRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            description="Role for Lambda preparing RDS",
            role_name=f"{self.name_prefix}-lambda-prepare-db-role",
            managed_policies=[
                #iam.ManagedPolicy.from_aws_managed_policy_name("AWSLambdaBasicExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaVPCAccessExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "SecretsManagerReadWrite"),
            ],
        )

        lambda_function_id = f"{self.name_prefix}-prepare_db_function"
        lambda_function_path = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn/prepare_db_function/"
        lambda_layer_path = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn/lambda_layer/"

        layer = aws_lambda.LayerVersion(
            self, 'Layer', code=aws_lambda.AssetCode(lambda_layer_path))

        lambda_fn = aws_lambda.Function(
            scope=self,
            id=lambda_function_id,
            function_name=lambda_function_id,
            code=aws_lambda.AssetCode(path=lambda_function_path),
            handler="lambda_handler.lambda_handler",
            layers=[layer],
            timeout=Duration.seconds(300),
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            role=role,
            description="write some description for this lambda",
            security_groups=[self.security_group],
            vpc=self.vpc,
            vpc_subnets=self.subnet_selection)

        lambda_fn.add_environment('SECRETS_NAME', self.rds.secret.secret_arn)
        lambda_fn.add_environment('REGION_NAME', self.region)