コード例 #1
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: _Cluster,
        shared_airflow_env: dict,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)
        task_definition = FargateTaskDefinition(
            self,
            "task-def",
            cpu=512,
            memory_limit_mib=1024,
        )

        task_definition.add_container(
            "container",
            image=ContainerImage.from_registry("apache/airflow:1.10.12-python3.8"),
            command=["scheduler"],
            logging=LogDriver.aws_logs(stream_prefix="scheduler"),
            environment=shared_airflow_env,
        )

        FargateService(
            self,
            "service",
            cluster=cluster,
            task_definition=task_definition,
        )
コード例 #2
0
 def get_task_definition(self, security_group):
     task_definition = FargateTaskDefinition(self._stack,
                                             self._name,
                                             memory_limit_mib=512,
                                             cpu=256)
     task_definition.add_volume(
         name=DATA_VOLUME,
         efs_volume_configuration=self.get_volume(security_group))
     image = ContainerImage.from_asset(directory=self._config.docker_dir)
     container = ContainerDefinition(
         self._stack,
         'obm_container',
         task_definition=task_definition,
         image=image,
         environment=self.get_environment(),
         logging=AwsLogDriver(stream_prefix=self._config.service_name),
     )
     container.add_mount_points(self.get_mount_point())
     container.add_port_mappings(
         PortMapping(container_port=HTTP_PORT, host_port=HTTP_PORT),
         PortMapping(container_port=HTTPS_PORT, host_port=HTTPS_PORT),
         PortMapping(container_port=SSH_PORT, host_port=SSH_PORT),
     )
     self._tag_it(container)
     self._tag_it(task_definition)
     return task_definition
コード例 #3
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: _Cluster,
        shared_airflow_env: dict,
        vpc: _Vpc,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)
        task_definition = FargateTaskDefinition(
            self, "task-def", cpu=512, memory_limit_mib=1024
        )
        container = task_definition.add_container(
            "container",
            image=ContainerImage.from_registry("apache/airflow:1.10.12-python3.8"),
            command=["webserver"],
            logging=LogDriver.aws_logs(stream_prefix="webserver"),
            environment=shared_airflow_env,
        )

        port_mapping = PortMapping(container_port=8080, host_port=8080)
        container.add_port_mappings(port_mapping)

        service = FargateService(
            self,
            "service",
            cluster=cluster,
            task_definition=task_definition,
        )

        lb = ApplicationLoadBalancer(self, "lb", vpc=vpc, internet_facing=True)
        listener = lb.add_listener("public_listener", port=80, open=True)

        health_check = HealthCheck(
            interval=Duration.seconds(60),
            path="/health",
            timeout=Duration.seconds(5),
        )

        listener.add_targets(
            "webserver",
            port=8080,
            targets=[service],
            health_check=health_check,
        )

        CfnOutput(self, "LoadBalancerDNS", value=lb.load_balancer_dns_name)
コード例 #4
0
ファイル: main.py プロジェクト: jqbx-bot/bot
 def __init__(self, scope: Construct, _id: str, **kwargs) -> None:
     super().__init__(scope, _id, **kwargs)
     task_definition = FargateTaskDefinition(
         self,
         'TaskDefinition',
         cpu=256,
         memory_limit_mib=512,
         execution_role=Role(
             self,
             'ExecutionRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com'))
         ),
         task_role=Role(
             self,
             'TaskRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com')),
             managed_policies=[
                 ManagedPolicy.from_aws_managed_policy_name('AmazonSESFullAccess')
             ]
         )
     )
     task_definition.add_container(
         'Container',
         image=ContainerImage.from_asset(
             getcwd(),
             file='Dockerfile',
             repository_name='jqbx-bot',
             exclude=['cdk.out']
         ),
         command=['pipenv', 'run', 'python', '-u', '-m', 'src.main'],
         environment={
             'SPOTIFY_USER_ID': environ.get('SPOTIFY_USER_ID'),
             'JQBX_ROOM_ID': environ.get('JQBX_ROOM_ID'),
             'JQBX_BOT_DISPLAY_NAME': environ.get('JQBX_BOT_DISPLAY_NAME'),
             'JQBX_BOT_IMAGE_URL': environ.get('JQBX_BOT_IMAGE_URL'),
             'DATA_SERVICE_BASE_URL': environ.get('DATA_SERVICE_BASE_URL')
         },
         logging=AwsLogDriver(
             stream_prefix='jqbx-bot',
             log_group=LogGroup(self, 'LogGroup')
         )
     )
     cluster = Cluster(self, '%sCluster' % _id)
     FargateService(self, '%sService' % _id, cluster=cluster, task_definition=task_definition, desired_count=1)
コード例 #5
0
    def __init__(
        self, scope: Construct, id: str, shared_airflow_env: dict, **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)
        task_definition = FargateTaskDefinition(
            self,
            "initdb",
            cpu=512,
            memory_limit_mib=1024,
        )
        task_definition.add_container(
            "initdb",
            image=ContainerImage.from_registry("apache/airflow:1.10.12-python3.8"),
            command=["initdb"],
            logging=LogDriver.aws_logs(stream_prefix="initdb"),
            environment=shared_airflow_env,
        )

        CfnOutput(self, "init-task-def", value=task_definition.task_definition_arn)
コード例 #6
0
 def __init__(self, scope: core.Construct, id: str, vpc, ecr_repo,
              **kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     task_definition = FargateTaskDefinition(self,
                                             "chatTaskDefinition",
                                             memory_limit_mib=2048,
                                             cpu=512)
     ecs_cluster = Cluster(self,
                           "chatCluster",
                           cluster_name="chat-cluster",
                           vpc=vpc)
     fargate_service = ApplicationLoadBalancedFargateService(
         self,
         "FargateService",
         cluster=ecs_cluster,
         task_definition=task_definition,
         task_image_options=ApplicationLoadBalancedTaskImageOptions(
             image=ContainerImage.from_ecr_repository(ecr_repo)),
         desired_count=3,
         service_name="chat-service",
         memory_limit_mib=2048,
         cpu=512)
     fargate_service.service.auto_scale_task_count(min_capacity=2,
                                                   max_capacity=5)
コード例 #7
0
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = Bucket(self, "s3-bucket-altimeter", 
            bucket_name=config["s3_bucket"],
            encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account.
            block_public_access=BlockPublicAccess.BLOCK_ALL
        )

        cluster = Cluster(self, "ecs-cluster-altimeter", 
            cluster_name="ecsclstr-altimeter--default",
            vpc=vpc               
        )

        task_role = Role(self, "iam-role-altimeter-task-role",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role).      
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'),
                ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess')
            ]
        )

        task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter",
            task_role=task_role,
            memory_limit_mib=self.MEMORY_LIMIT,
            cpu=self.CPU
        )

        docker_path = os.path.join(os.path.curdir,"..")

        image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', 
            directory=docker_path,
            file="scanner.Dockerfile"
        )            

        task_definition.add_container("ecs-container-altimeter",            
            image= ContainerImage.from_docker_image_asset(image_asset),
            # memory_limit_mib=self.MEMORY_LIMIT,
            # cpu=self.CPU,
            environment= {
                "CONFIG_PATH": config["altimeter_config_path"],
                "S3_BUCKET": config["s3_bucket"]
            },
            logging= AwsLogDriver(
                stream_prefix= 'altimeter',
                log_retention= RetentionDays.TWO_WEEKS
            )
        )

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["arn:aws:iam::*:role/"+config["account_execution_role"]],
            actions=['sts:AssumeRole']
        ))

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=[
                "arn:aws:s3:::"+config["s3_bucket"],
                "arn:aws:s3:::"+config["s3_bucket"]+"/*"
            ],
            actions=["s3:GetObject*",
                "s3:GetBucket*",
                "s3:List*",
                "s3:DeleteObject*",
                "s3:PutObject",
                "s3:Abort*",
                "s3:PutObjectTagging"]
        ))

        # Grant the ability to record the stdout to CloudWatch Logs
        # TODO: Refine
        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["*"],
            actions=['logs:*']
        ))

        # Trigger task every 24 hours
        Rule(self, "events-rule-altimeter-daily-scan",
            rule_name="evrule--altimeter-daily-scan",
            schedule=Schedule.cron(hour="0", minute="0"),
            description="Daily altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )

        # Trigger task manually via event
        Rule(self, "events-rule-altimeter-manual-scan",
            rule_name="evrule--altimeter-manual-scan",
            event_pattern=EventPattern(source=['altimeter']), 
            description="Manual altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )        


        # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket)



        neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer',
            function_name="function-altimeter--neo4j-importer",             
            entry="../neo4j-importer",
            index="app.py",
            handler="lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            memory_size=256,
            timeout=cdk.Duration.seconds(60),
            vpc=vpc,
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            environment={
                "neo4j_address": instance.instance_private_ip,
                "neo4j_user_secret_name": neo4j_user_secret.secret_name
            }
        )

        neo4j_importer_function.add_event_source(
            S3EventSource(bucket,
                events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED],
                filters= [ { "prefix": "raw/", "suffix": ".rdf"}]
            )
        )

        # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri
        bucket.grant_read_write(neo4j_importer_function.role)
        # Grant lambda read access to the neo4j user secret
        neo4j_user_secret.grant_read(neo4j_importer_function.role)
コード例 #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        vpc = Vpc(self, "MyVpc", max_azs=2)

        ecs_cluster = Cluster(self, 'FagateCluster', vpc=vpc)

        alb = ApplicationLoadBalancer(self,
                                      'EcsLb',
                                      vpc=vpc,
                                      internet_facing=True)

        listener = alb.add_listener('EcsListener', port=80)

        listener.add_fixed_response('Default-Fix', status_code='404')
        listener.node.default_child.default_action = [{
            "type": "fixed-response",
            "fixedResponseConfig": {
                "statusCode": "404"
            }
        }]

        website_bucket = Bucket(self,
                                'PetclinicWebsite',
                                website_index_document='index.html',
                                public_read_access=True,
                                removal_policy=core.RemovalPolicy.DESTROY)

        deployment = BucketDeployment(
            self,
            'PetclinicDeployWebsite',
            sources=[Source.asset('./spring-petclinic-static')],
            destination_bucket=website_bucket,
            retain_on_delete=False
            #destination_key_prefix='web/static'
        )

        # Modify the config.js with CF custome resource
        modify_policy = [
            PolicyStatement(actions=[
                "s3:PutObject", "s3:PutObjectAcl", "s3:PutObjectVersionAcl",
                "s3:GetObject"
            ],
                            effect=Effect.ALLOW,
                            resources=[website_bucket.bucket_arn + "/*"]),
            PolicyStatement(actions=["s3:ListBucket"],
                            effect=Effect.ALLOW,
                            resources=[website_bucket.bucket_arn]),
            PolicyStatement(actions=["dynamodb:*"],
                            effect=Effect.ALLOW,
                            resources=[
                                "arn:aws:dynamodb:" + self.region + ":" +
                                self.account + ":*"
                            ])
        ]

        with open("custom-resource-code/init.py", encoding="utf-8") as fp:
            code_body = fp.read()

        dynamodb_tables = []

        for s in ['customers', 'vets', 'visits']:
            table = Table(
                self,
                s.capitalize() + 'Table',
                partition_key={
                    'name': 'id',
                    'type': AttributeType.STRING
                },
                removal_policy=core.RemovalPolicy.DESTROY,
                read_capacity=5,
                write_capacity=5,
            )

            dynamodb_tables.append(table.table_name)

            asset = DockerImageAsset(
                self,
                'spring-petclinic-' + s,
                repository_name=self.stack_name + '-' + s,
                directory='./spring-petclinic-serverless/spring-petclinic-' +
                s + '-serverless',
                build_args={
                    'JAR_FILE':
                    'spring-petclinic-' + s + '-serverless-2.0.7.jar'
                })

            ecs_task = FargateTaskDefinition(self,
                                             'TaskDef-Fargate-' + s,
                                             memory_limit_mib=512,
                                             cpu=256)

            ecs_task.add_to_task_role_policy(
                PolicyStatement(actions=["dynamodb:*"],
                                effect=Effect.ALLOW,
                                resources=[table.table_arn]))

            ecs_task.add_to_task_role_policy(
                PolicyStatement(actions=['xray:*'],
                                effect=Effect.ALLOW,
                                resources=['*']))

            env = {
                'DYNAMODB_TABLE_NAME': table.table_name,
                'SERVER_SERVLET_CONTEXT_PATH': '/api/' + s.rstrip('s')
            }

            ecs_container = ecs_task.add_container(
                'Container-' + s,
                image=ContainerImage.from_docker_image_asset(asset),
                logging=LogDriver.aws_logs(stream_prefix=s),
                environment=env)

            ecs_container.add_port_mappings(PortMapping(container_port=8080))

            # Sidecare Container for X-Ray
            ecs_sidecar_container = ecs_task.add_container(
                'Sidecar-Xray-' + s,
                image=ContainerImage.from_registry('amazon/aws-xray-daemon'))

            ecs_sidecar_container.add_port_mappings(
                PortMapping(container_port=2000, protocol=Protocol.UDP))

            ecs_service = FargateService(self,
                                         'FargateService-' + s,
                                         cluster=ecs_cluster,
                                         service_name='spring-petclinic-' + s,
                                         desired_count=2,
                                         task_definition=ecs_task)

            parttern = '/api/' + s.rstrip('s') + '/*'
            priority = randint(1, 10) * len(s)
            check = HealthCheck(
                path='/api/' + s.rstrip('s') + '/manage',
                healthy_threshold_count=2,
                unhealthy_threshold_count=3,
            )

            target = listener.add_targets('ECS-' + s,
                                          path_pattern=parttern,
                                          priority=priority,
                                          port=80,
                                          targets=[ecs_service],
                                          health_check=check)

        resource = CustomResource(
            self,
            "S3ModifyCustomResource",
            provider=CustomResourceProvider.lambda_(
                SingletonFunction(self,
                                  "CustomResourceSingleton",
                                  uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                                  code=InlineCode(code_body),
                                  handler="index.handler",
                                  timeout=core.Duration.seconds(300),
                                  runtime=Runtime.PYTHON_3_7,
                                  initial_policy=modify_policy)),
            properties={
                "Bucket": website_bucket.bucket_name,
                "InvokeUrl": 'http://' + alb.load_balancer_dns_name + '/',
                "DynamoDBTables": dynamodb_tables
            })

        core.CfnOutput(self,
                       "FagateALBUrl",
                       export_name="FagateALBUrl",
                       value=alb.load_balancer_dns_name)
        core.CfnOutput(self,
                       "FagatePetclinicWebsiteUrl",
                       export_name="FagatePetclinicWebsiteUrl",
                       value=website_bucket.bucket_website_url)