Пример #1
0
 def get_task_definition(self, security_group):
     task_definition = FargateTaskDefinition(self._stack,
                                             self._name,
                                             memory_limit_mib=512,
                                             cpu=256)
     task_definition.add_volume(
         name=DATA_VOLUME,
         efs_volume_configuration=self.get_volume(security_group))
     image = ContainerImage.from_asset(directory=self._config.docker_dir)
     container = ContainerDefinition(
         self._stack,
         'obm_container',
         task_definition=task_definition,
         image=image,
         environment=self.get_environment(),
         logging=AwsLogDriver(stream_prefix=self._config.service_name),
     )
     container.add_mount_points(self.get_mount_point())
     container.add_port_mappings(
         PortMapping(container_port=HTTP_PORT, host_port=HTTP_PORT),
         PortMapping(container_port=HTTPS_PORT, host_port=HTTPS_PORT),
         PortMapping(container_port=SSH_PORT, host_port=SSH_PORT),
     )
     self._tag_it(container)
     self._tag_it(task_definition)
     return task_definition
Пример #2
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: _Cluster,
        shared_airflow_env: dict,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)
        task_definition = FargateTaskDefinition(
            self,
            "task-def",
            cpu=512,
            memory_limit_mib=1024,
        )

        task_definition.add_container(
            "container",
            image=ContainerImage.from_registry("apache/airflow:1.10.12-python3.8"),
            command=["scheduler"],
            logging=LogDriver.aws_logs(stream_prefix="scheduler"),
            environment=shared_airflow_env,
        )

        FargateService(
            self,
            "service",
            cluster=cluster,
            task_definition=task_definition,
        )
Пример #3
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: _Cluster,
        shared_airflow_env: dict,
        vpc: _Vpc,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)
        task_definition = FargateTaskDefinition(
            self, "task-def", cpu=512, memory_limit_mib=1024
        )
        container = task_definition.add_container(
            "container",
            image=ContainerImage.from_registry("apache/airflow:1.10.12-python3.8"),
            command=["webserver"],
            logging=LogDriver.aws_logs(stream_prefix="webserver"),
            environment=shared_airflow_env,
        )

        port_mapping = PortMapping(container_port=8080, host_port=8080)
        container.add_port_mappings(port_mapping)

        service = FargateService(
            self,
            "service",
            cluster=cluster,
            task_definition=task_definition,
        )

        lb = ApplicationLoadBalancer(self, "lb", vpc=vpc, internet_facing=True)
        listener = lb.add_listener("public_listener", port=80, open=True)

        health_check = HealthCheck(
            interval=Duration.seconds(60),
            path="/health",
            timeout=Duration.seconds(5),
        )

        listener.add_targets(
            "webserver",
            port=8080,
            targets=[service],
            health_check=health_check,
        )

        CfnOutput(self, "LoadBalancerDNS", value=lb.load_balancer_dns_name)
Пример #4
0
 def __init__(self, scope: Construct, _id: str, **kwargs) -> None:
     super().__init__(scope, _id, **kwargs)
     task_definition = FargateTaskDefinition(
         self,
         'TaskDefinition',
         cpu=256,
         memory_limit_mib=512,
         execution_role=Role(
             self,
             'ExecutionRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com'))
         ),
         task_role=Role(
             self,
             'TaskRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com')),
             managed_policies=[
                 ManagedPolicy.from_aws_managed_policy_name('AmazonSESFullAccess')
             ]
         )
     )
     task_definition.add_container(
         'Container',
         image=ContainerImage.from_asset(
             getcwd(),
             file='Dockerfile',
             repository_name='jqbx-bot',
             exclude=['cdk.out']
         ),
         command=['pipenv', 'run', 'python', '-u', '-m', 'src.main'],
         environment={
             'SPOTIFY_USER_ID': environ.get('SPOTIFY_USER_ID'),
             'JQBX_ROOM_ID': environ.get('JQBX_ROOM_ID'),
             'JQBX_BOT_DISPLAY_NAME': environ.get('JQBX_BOT_DISPLAY_NAME'),
             'JQBX_BOT_IMAGE_URL': environ.get('JQBX_BOT_IMAGE_URL'),
             'DATA_SERVICE_BASE_URL': environ.get('DATA_SERVICE_BASE_URL')
         },
         logging=AwsLogDriver(
             stream_prefix='jqbx-bot',
             log_group=LogGroup(self, 'LogGroup')
         )
     )
     cluster = Cluster(self, '%sCluster' % _id)
     FargateService(self, '%sService' % _id, cluster=cluster, task_definition=task_definition, desired_count=1)
Пример #5
0
    def __init__(
        self, scope: Construct, id: str, shared_airflow_env: dict, **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)
        task_definition = FargateTaskDefinition(
            self,
            "initdb",
            cpu=512,
            memory_limit_mib=1024,
        )
        task_definition.add_container(
            "initdb",
            image=ContainerImage.from_registry("apache/airflow:1.10.12-python3.8"),
            command=["initdb"],
            logging=LogDriver.aws_logs(stream_prefix="initdb"),
            environment=shared_airflow_env,
        )

        CfnOutput(self, "init-task-def", value=task_definition.task_definition_arn)
Пример #6
0
 def __init__(self, scope: core.Construct, id: str, vpc, ecr_repo,
              **kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     task_definition = FargateTaskDefinition(self,
                                             "chatTaskDefinition",
                                             memory_limit_mib=2048,
                                             cpu=512)
     ecs_cluster = Cluster(self,
                           "chatCluster",
                           cluster_name="chat-cluster",
                           vpc=vpc)
     fargate_service = ApplicationLoadBalancedFargateService(
         self,
         "FargateService",
         cluster=ecs_cluster,
         task_definition=task_definition,
         task_image_options=ApplicationLoadBalancedTaskImageOptions(
             image=ContainerImage.from_ecr_repository(ecr_repo)),
         desired_count=3,
         service_name="chat-service",
         memory_limit_mib=2048,
         cpu=512)
     fargate_service.service.auto_scale_task_count(min_capacity=2,
                                                   max_capacity=5)
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = Bucket(self, "s3-bucket-altimeter", 
            bucket_name=config["s3_bucket"],
            encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account.
            block_public_access=BlockPublicAccess.BLOCK_ALL
        )

        cluster = Cluster(self, "ecs-cluster-altimeter", 
            cluster_name="ecsclstr-altimeter--default",
            vpc=vpc               
        )

        task_role = Role(self, "iam-role-altimeter-task-role",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role).      
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'),
                ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess')
            ]
        )

        task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter",
            task_role=task_role,
            memory_limit_mib=self.MEMORY_LIMIT,
            cpu=self.CPU
        )

        docker_path = os.path.join(os.path.curdir,"..")

        image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', 
            directory=docker_path,
            file="scanner.Dockerfile"
        )            

        task_definition.add_container("ecs-container-altimeter",            
            image= ContainerImage.from_docker_image_asset(image_asset),
            # memory_limit_mib=self.MEMORY_LIMIT,
            # cpu=self.CPU,
            environment= {
                "CONFIG_PATH": config["altimeter_config_path"],
                "S3_BUCKET": config["s3_bucket"]
            },
            logging= AwsLogDriver(
                stream_prefix= 'altimeter',
                log_retention= RetentionDays.TWO_WEEKS
            )
        )

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["arn:aws:iam::*:role/"+config["account_execution_role"]],
            actions=['sts:AssumeRole']
        ))

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=[
                "arn:aws:s3:::"+config["s3_bucket"],
                "arn:aws:s3:::"+config["s3_bucket"]+"/*"
            ],
            actions=["s3:GetObject*",
                "s3:GetBucket*",
                "s3:List*",
                "s3:DeleteObject*",
                "s3:PutObject",
                "s3:Abort*",
                "s3:PutObjectTagging"]
        ))

        # Grant the ability to record the stdout to CloudWatch Logs
        # TODO: Refine
        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["*"],
            actions=['logs:*']
        ))

        # Trigger task every 24 hours
        Rule(self, "events-rule-altimeter-daily-scan",
            rule_name="evrule--altimeter-daily-scan",
            schedule=Schedule.cron(hour="0", minute="0"),
            description="Daily altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )

        # Trigger task manually via event
        Rule(self, "events-rule-altimeter-manual-scan",
            rule_name="evrule--altimeter-manual-scan",
            event_pattern=EventPattern(source=['altimeter']), 
            description="Manual altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )        


        # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket)



        neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer',
            function_name="function-altimeter--neo4j-importer",             
            entry="../neo4j-importer",
            index="app.py",
            handler="lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            memory_size=256,
            timeout=cdk.Duration.seconds(60),
            vpc=vpc,
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            environment={
                "neo4j_address": instance.instance_private_ip,
                "neo4j_user_secret_name": neo4j_user_secret.secret_name
            }
        )

        neo4j_importer_function.add_event_source(
            S3EventSource(bucket,
                events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED],
                filters= [ { "prefix": "raw/", "suffix": ".rdf"}]
            )
        )

        # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri
        bucket.grant_read_write(neo4j_importer_function.role)
        # Grant lambda read access to the neo4j user secret
        neo4j_user_secret.grant_read(neo4j_importer_function.role)
Пример #8
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        *,
        subdomain_name: str,
        deployment: Deployment,
        policy: Policy,
        application_name: str,
        image_name: str,
        port: int,
        memory_limit_mib: int,
        desired_count: int,
        cluster: ICluster,
        priority: int,
        path_pattern: Optional[str] = None,
        health_check_grace_period: Optional[Duration] = None,
        allow_via_http: Optional[bool] = False,
        command: Optional[List[str]] = None,
        environment: Mapping[str, str] = {},
        secrets: Mapping[str, Secret] = {},
        volumes: Mapping[str, Volume] = {},
    ) -> None:
        super().__init__(scope, id)

        full_application_name = f"{deployment.value}-{application_name}"

        log_group = tasks.add_logging(full_application_name)
        self.task_role = tasks.add_role(full_application_name)
        policy.add_role(self.task_role)

        logging = LogDrivers.aws_logs(
            stream_prefix=full_application_name,
            log_group=log_group,
        )

        image = ImageFromParameterStore(
            self,
            "ImageName",
            parameter_name=f"/Version/{deployment.value}/{application_name}",
            image_name=image_name,
            policy=policy,
        )

        task_definition = Ec2TaskDefinition(
            self,
            "TaskDef",
            network_mode=NetworkMode.BRIDGE,
            execution_role=self.task_role,
            task_role=self.task_role,
            volumes=list(volumes.values()),
        )

        self.container = task_definition.add_container(
            "Container",
            image=ContainerImage.from_registry(image.image_ref),
            memory_limit_mib=memory_limit_mib,
            logging=logging,
            environment=environment,
            secrets=secrets,
            command=command,
        )
        self.container.add_mount_points(*[
            MountPoint(
                container_path=path,
                read_only=False,
                source_volume=volume.name,
            ) for path, volume in volumes.items()
        ])

        self.add_port(port=port, )

        self.service = Ec2Service(
            self,
            "Service",
            cluster=cluster,
            task_definition=task_definition,
            desired_count=desired_count,
            health_check_grace_period=health_check_grace_period,
        )
        policy.add_service(self.service)
        policy.add_cluster(cluster)

        self.service.add_placement_strategies(
            PlacementStrategy.spread_across(
                BuiltInAttributes.AVAILABILITY_ZONE), )

        self.add_target(
            subdomain_name=subdomain_name,
            port=port,
            priority=priority,
            path_pattern=path_pattern,
            allow_via_http=allow_via_http,
        )

        # Remove the security group from this stack, and add it to the ALB stack
        self.service.node.try_remove_child("SecurityGroup1")
        for security_group in cluster.connections.security_groups:
            self.service.connections.add_security_group(security_group)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        vpc = Vpc(self, "MyVpc", max_azs=2)

        ecs_cluster = Cluster(self, 'FagateCluster', vpc=vpc)

        alb = ApplicationLoadBalancer(self,
                                      'EcsLb',
                                      vpc=vpc,
                                      internet_facing=True)

        listener = alb.add_listener('EcsListener', port=80)

        listener.add_fixed_response('Default-Fix', status_code='404')
        listener.node.default_child.default_action = [{
            "type": "fixed-response",
            "fixedResponseConfig": {
                "statusCode": "404"
            }
        }]

        website_bucket = Bucket(self,
                                'PetclinicWebsite',
                                website_index_document='index.html',
                                public_read_access=True,
                                removal_policy=core.RemovalPolicy.DESTROY)

        deployment = BucketDeployment(
            self,
            'PetclinicDeployWebsite',
            sources=[Source.asset('./spring-petclinic-static')],
            destination_bucket=website_bucket,
            retain_on_delete=False
            #destination_key_prefix='web/static'
        )

        # Modify the config.js with CF custome resource
        modify_policy = [
            PolicyStatement(actions=[
                "s3:PutObject", "s3:PutObjectAcl", "s3:PutObjectVersionAcl",
                "s3:GetObject"
            ],
                            effect=Effect.ALLOW,
                            resources=[website_bucket.bucket_arn + "/*"]),
            PolicyStatement(actions=["s3:ListBucket"],
                            effect=Effect.ALLOW,
                            resources=[website_bucket.bucket_arn]),
            PolicyStatement(actions=["dynamodb:*"],
                            effect=Effect.ALLOW,
                            resources=[
                                "arn:aws:dynamodb:" + self.region + ":" +
                                self.account + ":*"
                            ])
        ]

        with open("custom-resource-code/init.py", encoding="utf-8") as fp:
            code_body = fp.read()

        dynamodb_tables = []

        for s in ['customers', 'vets', 'visits']:
            table = Table(
                self,
                s.capitalize() + 'Table',
                partition_key={
                    'name': 'id',
                    'type': AttributeType.STRING
                },
                removal_policy=core.RemovalPolicy.DESTROY,
                read_capacity=5,
                write_capacity=5,
            )

            dynamodb_tables.append(table.table_name)

            asset = DockerImageAsset(
                self,
                'spring-petclinic-' + s,
                repository_name=self.stack_name + '-' + s,
                directory='./spring-petclinic-serverless/spring-petclinic-' +
                s + '-serverless',
                build_args={
                    'JAR_FILE':
                    'spring-petclinic-' + s + '-serverless-2.0.7.jar'
                })

            ecs_task = FargateTaskDefinition(self,
                                             'TaskDef-Fargate-' + s,
                                             memory_limit_mib=512,
                                             cpu=256)

            ecs_task.add_to_task_role_policy(
                PolicyStatement(actions=["dynamodb:*"],
                                effect=Effect.ALLOW,
                                resources=[table.table_arn]))

            ecs_task.add_to_task_role_policy(
                PolicyStatement(actions=['xray:*'],
                                effect=Effect.ALLOW,
                                resources=['*']))

            env = {
                'DYNAMODB_TABLE_NAME': table.table_name,
                'SERVER_SERVLET_CONTEXT_PATH': '/api/' + s.rstrip('s')
            }

            ecs_container = ecs_task.add_container(
                'Container-' + s,
                image=ContainerImage.from_docker_image_asset(asset),
                logging=LogDriver.aws_logs(stream_prefix=s),
                environment=env)

            ecs_container.add_port_mappings(PortMapping(container_port=8080))

            # Sidecare Container for X-Ray
            ecs_sidecar_container = ecs_task.add_container(
                'Sidecar-Xray-' + s,
                image=ContainerImage.from_registry('amazon/aws-xray-daemon'))

            ecs_sidecar_container.add_port_mappings(
                PortMapping(container_port=2000, protocol=Protocol.UDP))

            ecs_service = FargateService(self,
                                         'FargateService-' + s,
                                         cluster=ecs_cluster,
                                         service_name='spring-petclinic-' + s,
                                         desired_count=2,
                                         task_definition=ecs_task)

            parttern = '/api/' + s.rstrip('s') + '/*'
            priority = randint(1, 10) * len(s)
            check = HealthCheck(
                path='/api/' + s.rstrip('s') + '/manage',
                healthy_threshold_count=2,
                unhealthy_threshold_count=3,
            )

            target = listener.add_targets('ECS-' + s,
                                          path_pattern=parttern,
                                          priority=priority,
                                          port=80,
                                          targets=[ecs_service],
                                          health_check=check)

        resource = CustomResource(
            self,
            "S3ModifyCustomResource",
            provider=CustomResourceProvider.lambda_(
                SingletonFunction(self,
                                  "CustomResourceSingleton",
                                  uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                                  code=InlineCode(code_body),
                                  handler="index.handler",
                                  timeout=core.Duration.seconds(300),
                                  runtime=Runtime.PYTHON_3_7,
                                  initial_policy=modify_policy)),
            properties={
                "Bucket": website_bucket.bucket_name,
                "InvokeUrl": 'http://' + alb.load_balancer_dns_name + '/',
                "DynamoDBTables": dynamodb_tables
            })

        core.CfnOutput(self,
                       "FagateALBUrl",
                       export_name="FagateALBUrl",
                       value=alb.load_balancer_dns_name)
        core.CfnOutput(self,
                       "FagatePetclinicWebsiteUrl",
                       export_name="FagatePetclinicWebsiteUrl",
                       value=website_bucket.bucket_website_url)
Пример #10
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        table = dynamodb.Table(
            self,
            "TheTable",
            table_name="cdk-table",
            partition_key=dynamodb.Attribute(
                name="id", type=dynamodb.AttributeType.STRING),
            removal_policy=cdk.RemovalPolicy.DESTROY,
        )

        # compute_environment = batch.ComputeEnvironment(
        #     self,
        #     "MyComputeEnvironment",
        #     compute_environment_name="cdk-env",
        #     compute_resources=batch.ComputeResources(
        #         vpc=Vpc.from_lookup(self, "VPC", is_default=True),
        #     ),
        #     enabled=True,
        #     managed=True,
        # )

        job_role = Role(
            self,
            "BatchJobRole",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="Role for a container in a Batch job",
            role_name="CDK-BatchJobRole",
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name="AmazonDynamoDBFullAccess"),
            ],
        )

        repository = Repository(
            self,
            "MyRepository",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            repository_name="cdk-my-repository",
            lifecycle_rules=[
                LifecycleRule(max_image_count=5, description="Max 5 images")
            ],
        )

        image: ContainerImage = ContainerImage.from_ecr_repository(
            repository=repository,
            tag="latest",
        )

        container = batch.JobDefinitionContainer(
            image=image,
            job_role=job_role,
            command=["python", "run.py", "--help"],
            environment={
                "READINGS_TABLE": table.table_name,
                "AWS_REGION": self.region,
            },
            vcpus=1,
            log_configuration=batch.LogConfiguration(
                log_driver=batch.LogDriver.AWSLOGS),
            memory_limit_mib=2048,
        )

        batch.JobDefinition(
            self,
            "JobDefinitionCreate",
            container=container,
            job_definition_name="create",
            retry_attempts=1,
        )
Пример #11
0
    def __init__(
        self,
        scope: App,
        id: str,
        envs: EnvSettings,
        components: ComponentsStack,
        base_resources: BaseResources,
    ):
        super().__init__(scope, id)

        self.db_secret_arn = Fn.import_value(
            BaseResources.get_database_secret_arn_output_export_name(envs))

        self.job_processing_queues = components.data_processing_queues
        self.vpc = base_resources.vpc
        self.db = base_resources.db

        self.app_bucket = Bucket(self, "App", versioned=True)

        if self.app_bucket.bucket_arn:
            CfnOutput(
                self,
                id="AppBucketOutput",
                export_name=self.get_app_bucket_arn_output_export_name(envs),
                value=self.app_bucket.bucket_arn,
            )

        self.pages_bucket = Bucket(self, "Pages", public_read_access=True)

        self.domain_name = StringParameter.from_string_parameter_name(
            self,
            "DomainNameParameter",
            string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value

        self.certificate_arn = StringParameter.from_string_parameter_name(
            self,
            "CertificateArnParameter",
            string_parameter_name="/schema-cms-app/CERTIFICATE_ARN"
        ).string_value

        django_secret = Secret(self,
                               "DjangoSecretKey",
                               secret_name="SCHEMA_CMS_DJANGO_SECRET_KEY")
        lambda_auth_token_secret = Secret(
            self,
            "LambdaAuthToken",
            secret_name="SCHEMA_CMS_LAMBDA_AUTH_TOKEN")

        if lambda_auth_token_secret.secret_arn:
            CfnOutput(
                self,
                id="lambdaAuthTokenArnOutput",
                export_name=self.get_lambda_auth_token_arn_output_export_name(
                    envs),
                value=lambda_auth_token_secret.secret_arn,
            )

        self.django_secret_key = EcsSecret.from_secrets_manager(django_secret)
        self.lambda_auth_token = EcsSecret.from_secrets_manager(
            lambda_auth_token_secret)

        tag_from_context = self.node.try_get_context("app_image_tag")
        tag = tag_from_context if tag_from_context != "undefined" else None

        api_image = ContainerImage.from_ecr_repository(
            repository=Repository.from_repository_name(
                self,
                id="BackendRepository",
                repository_name=BaseECR.get_backend_repository_name(envs)),
            tag=tag,
        )
        nginx_image = ContainerImage.from_ecr_repository(
            repository=Repository.from_repository_name(
                self,
                id="NginxRepository",
                repository_name=BaseECR.get_nginx_repository_name(envs)),
            tag=tag,
        )

        self.api = ApplicationLoadBalancedFargateService(
            self,
            "ApiService",
            service_name=f"{envs.project_name}-api-service",
            cluster=Cluster.from_cluster_attributes(
                self,
                id="WorkersCluster",
                cluster_name="schema-ecs-cluster",
                vpc=self.vpc,
                security_groups=[],
            ),
            task_image_options=ApplicationLoadBalancedTaskImageOptions(
                image=nginx_image,
                container_name="nginx",
                container_port=80,
                enable_logging=True,
            ),
            desired_count=1,
            cpu=512,
            memory_limit_mib=1024,
            certificate=Certificate.from_certificate_arn(
                self, "Cert", certificate_arn=self.certificate_arn),
            domain_name=self.domain_name,
            domain_zone=PrivateHostedZone(
                self,
                "zone",
                vpc=self.vpc,
                zone_name=self.domain_name,
            ),
        )

        self.api.task_definition.add_container(
            "backend",
            image=api_image,
            command=[
                "sh", "-c",
                "/bin/chamber exec $CHAMBER_SERVICE_NAME -- ./scripts/run.sh"
            ],
            logging=AwsLogDriver(stream_prefix="backend-container"),
            environment={
                "POSTGRES_DB": envs.data_base_name,
                "AWS_STORAGE_BUCKET_NAME": self.app_bucket.bucket_name,
                "AWS_STORAGE_PAGES_BUCKET_NAME": self.pages_bucket.bucket_name,
                "SQS_WORKER_QUEUE_URL":
                self.job_processing_queues[0].queue_url,
                "SQS_WORKER_EXT_QUEUE_URL":
                self.job_processing_queues[1].queue_url,
                "SQS_WORKER_MAX_QUEUE_URL":
                self.job_processing_queues[2].queue_url,
                "CHAMBER_SERVICE_NAME": "schema-cms-app",
                "CHAMBER_KMS_KEY_ALIAS": envs.project_name,
            },
            secrets={
                "DB_CONNECTION":
                EcsSecret.from_secrets_manager(
                    Secret.from_secret_arn(self,
                                           id="DbSecret",
                                           secret_arn=self.db_secret_arn)),
                "DJANGO_SECRET_KEY":
                self.django_secret_key,
                "LAMBDA_AUTH_TOKEN":
                self.lambda_auth_token,
            },
            cpu=512,
            memory_limit_mib=1024,
        )

        self.django_secret_key.grant_read(
            self.api.service.task_definition.task_role)

        self.app_bucket.grant_read_write(
            self.api.service.task_definition.task_role)
        self.pages_bucket.grant_read_write(
            self.api.service.task_definition.task_role)

        for queue in self.job_processing_queues:
            queue.grant_send_messages(
                self.api.service.task_definition.task_role)

        self.api.service.connections.allow_to(self.db.connections,
                                              Port.tcp(5432))
        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=["ses:SendRawEmail", "ses:SendBulkTemplatedEmail"],
                resources=["*"],
            ))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=[
                    "kms:Get*", "kms:Describe*", "kms:List*", "kms:Decrypt"
                ],
                resources=[
                    Fn.import_value(
                        BaseKMS.get_kms_arn_output_export_name(envs))
                ],
            ))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(actions=["ssm:DescribeParameters"],
                            resources=["*"]))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=["ssm:GetParameters*"],
                resources=[
                    f"arn:aws:ssm:{self.region}:{self.account}:parameter/schema-cms-app/*"
                ],
            ))