Exemplo n.º 1
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            image=aws_ecs.ContainerImage.from_registry(
                "adam9098/ecsdemo-capacityproviders:latest"),
            container_port=5000,
        )

        self.load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "FargateCapacityProviderService",
            service_name='ecsdemo-capacityproviders-fargate',
            cluster=self.base_platform.ecs_cluster,
            cpu=256,
            memory_limit_mib=512,
            desired_count=10,
            public_load_balancer=True,
            task_image_options=self.task_image,
            platform_version=aws_ecs.FargatePlatformVersion.VERSION1_4)

        # This should work, but the default child is not the service cfn, it's a list of cfn service and sec group
        #self.cfn_resource = self.load_balanced_service.service.node.default_child
        self.cfn_resource = self.load_balanced_service.service.node.children[0]

        self.cfn_resource.add_deletion_override("Properties.LaunchType")

        self.load_balanced_service.task_definition.add_to_task_role_policy(
            aws_iam.PolicyStatement(
                actions=['ecs:ListTasks', 'ecs:DescribeTasks'],
                resources=['*']))
Exemplo n.º 2
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.fargate_task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            image=aws_ecs.ContainerImage.from_registry(
                "brentley/ecsdemo-frontend"),
            container_port=3000,
            environment={
                "CRYSTAL_URL": "http://ecsdemo-crystal.service:3000/crystal",
                "NODEJS_URL": "http://ecsdemo-nodejs.service:3000"
            },
        )

        self.fargate_load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "FrontendFargateLBService",
            cluster=self.base_platform.ecs_cluster,
            cpu=256,
            memory_limit_mib=512,
            desired_count=1,
            public_load_balancer=True,
            cloud_map_options=self.base_platform.sd_namespace,
            task_image_options=self.fargate_task_image)

        self.fargate_load_balanced_service.service.connections.allow_to(
            self.base_platform.services_sec_grp,
            port_range=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP,
                                    string_representation="frontendtobackend",
                                    from_port=3000,
                                    to_port=3000))
Exemplo n.º 3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        _myVpc = _ec2.Vpc(self, "myVpcID", max_azs=2, nat_gateways=1)
        _ecs_cluster_2 = _ecs.Cluster(self,
                                      'cdkFargateCluster',
                                      vpc=_myVpc,
                                      cluster_name="CdkFargate_Cluser_2")
        _UI_SERVICE = 'thiethaa/alc-autobots-ui:latest'

        # ecr.amazonaws.com
        # ecs-tasks.amazonaws.com
        # ecs.amazonaws.com

        _container_image = _ecs.ContainerImage.from_registry(_UI_SERVICE)
        _task_role = _iam.Role(
            self,
            "taskExeRoleID",
            # role_name="ecsTaskExecutionRole",
            assumed_by=_iam.ServicePrincipal(
                service='ecs-tasks.amazonaws.com'))

        alb_task_image_options = _ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            # "taskImageOptions",
            image=_container_image,
            container_port=4040,
            container_name="autobots-UI-container",
            task_role=_task_role,
            enable_logging=True)

        # A Fargate service running on an ECS cluster fronted by an application load balancer
        app_load_balanced_fargate_service = _ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            'FargateServiceALB',
            cpu=256,
            memory_limit_mib=512,
            cluster=_ecs_cluster_2,
            desired_count=1,
            # listener_port=4040,
            open_listener=True,
            min_healthy_percent=0,
            protocol=elb.ApplicationProtocol.HTTP,
            service_name='autobots_ui',
            task_image_options=alb_task_image_options)

        # app_load_balanced_fargate_service.task_definition.execution_role.role_name("ecsTaskExecutionRole")

        app_load_balancer_dns_name = app_load_balanced_fargate_service.load_balancer.load_balancer_dns_name

        alb_cfn_output = core.CfnOutput(
            self,
            "albDNSName",
            value=F"{app_load_balancer_dns_name}",
            description="Application load balancer DNS Name for autobots UI")
        listener_output = core.CfnOutput(
            self,
            "albListener",
            value=F"{app_load_balanced_fargate_service.listener}",
            description="app_load_balanced_fargate_service.listener")
Exemplo n.º 4
0
    def __init__(self, scope: core.Construct, construct_id: str, cluster, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        ecsPatterns.ApplicationLoadBalancedFargateService(self, "nginx",
                                                          cluster=cluster,
                                                          memory_limit_mib=1024,
                                                          cpu=512,
                                                          task_image_options=ecsPatterns.ApplicationLoadBalancedTaskImageOptions(
                                                              image=ecs.ContainerImage.from_registry("nginx:stable"),
                                                          )
                                                          )
Exemplo n.º 5
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        ecsp.ApplicationLoadBalancedFargateService(
            self,
            "MyWebServer",
            task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions(
                # image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")),
                image=ecs.ContainerImage.from_registry(
                    "public.ecr.aws/f0e4c8e7/nginx-test:latest")),
            public_load_balancer=True)
Exemplo n.º 6
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            image=aws_ecs.ContainerImage.from_registry("adam9098/ecsdemo-capacityproviders:latest"),
            container_port=5000,
            environment={
                'AWS_DEFAULT_REGION': getenv('AWS_DEFAULT_REGION')
            }
        )

        self.load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedEc2Service(
            self, "EC2CapacityProviderService",
            service_name='ecsdemo-capacityproviders-ec2',
            cluster=self.base_platform.ecs_cluster,
            cpu=256,
            memory_limit_mib=512,
            desired_count=1,
            #desired_count=10,
            public_load_balancer=True,
            task_image_options=self.task_image,
        )
        
        # Update Target group settings for Spot instances to adjust deregistration delay to less than 120 sec.
        # Adjust healthy threshold  to 2 to reduce the time for a new task to be healthy in 1 minute
        
        self.cfn_target_group = self.load_balanced_service.node.find_child('LB'
                                ).node.find_child('PublicListener'
                                ).node.find_child('ECSGroup'
                                ).node.default_child
        self.cfn_target_group.target_group_attributes = [{ "key" : "deregistration_delay.timeout_seconds", "value": "90" }]
        self.cfn_target_group.healthy_threshold_count = 2

        # This should work, but the default child is not the service cfn, it's a list of cfn service and sec group
        #self.cfn_resource = self.load_balanced_service.service.node.default_child
        self.cfn_resource = self.load_balanced_service.service.node.children[0]
        
        self.cfn_resource.add_deletion_override("Properties.LaunchType")
            
        self.load_balanced_service.task_definition.add_to_task_role_policy(
            aws_iam.PolicyStatement(
                actions=[
                    'ecs:ListTasks',
                    'ecs:DescribeTasks'
                ],
                resources=['*']
            )
        )
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.fargate_task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            image=aws_ecs.ContainerImage.from_registry(
                "adam9098/ecsdemo-frontend"),
            container_port=3000,
            environment={
                "CRYSTAL_URL": "http://ecsdemo-crystal.service:3000/crystal",
                "NODEJS_URL": "http://ecsdemo-nodejs.service:3000",
                "REGION": getenv('AWS_DEFAULT_REGION')
            },
        )

        self.fargate_load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "FrontendFargateLBService",
            service_name='ecsdemo-frontend',
            cluster=self.base_platform.ecs_cluster,
            cpu=256,
            memory_limit_mib=512,
            desired_count=3,
            public_load_balancer=True,
            cloud_map_options=self.base_platform.sd_namespace,
            task_image_options=self.fargate_task_image)

        self.fargate_load_balanced_service.task_definition.add_to_task_role_policy(
            aws_iam.PolicyStatement(actions=['ec2:DescribeSubnets'],
                                    resources=['*']))

        self.fargate_load_balanced_service.service.connections.allow_to(
            self.base_platform.services_sec_grp,
            port_range=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP,
                                    string_representation="frontendtobackend",
                                    from_port=3000,
                                    to_port=3000))

        # Enable Service Autoscaling
        self.autoscale = self.fargate_load_balanced_service.service.auto_scale_task_count(
            min_capacity=1, max_capacity=10)

        self.autoscale.scale_on_cpu_utilization(
            "CPUAutoscaling",
            target_utilization_percent=50,
            scale_in_cooldown=core.Duration.seconds(30),
            scale_out_cooldown=core.Duration.seconds(30))
Exemplo n.º 8
0
    def create_fagate_ALB(self, vpc):
        # Create ECS
        cluster = ecs.Cluster(self, "GWCluster", vpc=vpc)

        ecs_patterns.ApplicationLoadBalancedFargateService(
            self, 
            "GWService",
            cluster=cluster,            # Required
            cpu=256,                    # Default is 256
            desired_count=1,            # Default is 1
            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry("856419311962.dkr.ecr.cn-north-1.amazonaws.com.cn/airflow_analyze"),
                container_port=80
            ),
            memory_limit_mib=512,      # Default is 512
            public_load_balancer=True
        )
Exemplo n.º 9
0
    def __init__(self, scope, name, **kwargs):
        super().__init__(scope, name, **kwargs)
        
        vpc = ec2.Vpc(self, 'WhereAreTheBedsStack-vpc', max_azs=2)

        cluster = ecs.Cluster(self, 'WhereAreTheBedsStack-cluster', vpc=vpc)
                
        fargate = ecs_patterns.ApplicationLoadBalancedFargateService(self, 'WhereAreTheBedsStack-fargate',
            cluster=cluster,
            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset(path.join(path.dirname(__file__), 'api')),
                container_port=80
            )
        )

        core.CfnOutput(
            self, "WhereAreTheBedsStack-dns",
            value=fargate.load_balancer.load_balancer_dns_name
        )
Exemplo n.º 10
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        myVpc = ec2.Vpc(self, "MyVPC", max_azs=2)
        # cdk deploy
        myCluster = ecs.Cluster(self, "MyCluster", vpc=myVpc)
        # cdk diff
        fargateService = ecs_patterns. \
            ApplicationLoadBalancedFargateService(
                self,
                "MyFargateApp",
                cluster=myCluster,
                listener_port=80,  # the default container port is port 80
                memory_limit_mib=1024,
                task_image_options=
                ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                    image=ecs.ContainerImage.from_asset("./app/"),
                    environment=dict(ENV_VAR1="test environment variable 1 value",
                                     ENV_VAR2="test environment variable 1 value")
                )

            )
Exemplo n.º 11
0
    def __init__(self, scope: core.Construct, id: str, cluster: ecs.Cluster, rds: rds.DatabaseInstance,
                 config: dict, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        postgraphile_task_image_options = ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            image=ecs.ContainerImage.from_registry(name=config['VDB_POSTGRAPHILE_IMAGE']),
            container_name="vdb-postgraphile",
            container_port=5000,
            environment={
                "SCHEMAS": "api,maker,public",
                "DATABASE_URL": f"postgresql://{config['DATABASE_USER']}:{config['DATABASE_PASSWORD']}"
                f"@{rds.db_instance_endpoint_address}:{rds.db_instance_endpoint_port}/{config['DATABASE_NAME']}",
            }
        )

        ecs_patterns.ApplicationLoadBalancedFargateService(
            self, "VdbPostgraphileAlbService",
            service_name="VdbPostgraphileService",
            cluster=cluster,
            task_image_options=postgraphile_task_image_options,
            cpu=1024,
            memory_limit_mib=2048
        )
Exemplo n.º 12
0
    def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.__datalake = datalake
        self.security_group = ec2.SecurityGroup(
            self,
            'SecurityGroup',
            vpc=self.datalake.vpc,
            allow_all_outbound=True,
            description='SonarQube Security Group')

        self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                                             connection=ec2.Port.all_traffic(),
                                             description='Allow any traffic')

        self.sonarqube_svr_ecr = ecr.DockerImageAsset(
            self,
            'Repo',
            directory=os.path.join(root_dir, 'images/sonarqube-server'),
            repository_name='sonarqube')

        self.sonarqube_cli_ecr = ecr.DockerImageAsset(
            self,
            'Cli',
            directory=os.path.join(root_dir, 'images/sonarqube-scanner'),
            repository_name='sonarqube-cli')

        self.database = rds.DatabaseCluster(
            self,
            'Database',
            engine=rds.DatabaseClusterEngine.aurora_postgres(
                version=rds.AuroraPostgresEngineVersion.VER_11_9),
            default_database_name='sonarqube',
            removal_policy=core.RemovalPolicy.DESTROY,
            credentials=rds.Credentials.from_username(
                username='******',
                password=core.SecretValue(value='postgres')),
            instance_props=rds.InstanceProps(
                vpc=self.datalake.vpc,
                security_groups=[self.security_group],
                instance_type=ec2.InstanceType('r6g.xlarge')))

        # self.ecs_cluster = ecs.Cluster(self,'SonarCluster',
        #   container_insights=True,
        #   vpc=self.datalake.vpc,
        #   capacity=ecs.AddCapacityOptions(
        #     machine_image_type= ecs.MachineImageType.AMAZON_LINUX_2,
        #     instance_type=ec2.InstanceType('m5.xlarge'),
        #     allow_all_outbound=True,
        #     associate_public_ip_address=False,
        #     vpc_subnets= ec2.SubnetSelection(subnet_type= ec2.SubnetType.PUBLIC),
        #     desired_capacity=2))

        # self.service = ecsp.ApplicationLoadBalancedEc2Service(self,'SonarEc2',
        #   cluster=self.ecs_cluster,
        #   desired_count=1,
        #   listener_port=80,
        #   memory_reservation_mib= 4 * 1024,
        #   task_image_options= ecsp.ApplicationLoadBalancedTaskImageOptions(
        #     image= ecs.ContainerImage.from_docker_image_asset(asset=self.sonarqube_svr_ecr),
        #     container_name='sonarqube-svr',
        #     container_port=9000,
        #     enable_logging=True,
        #     environment={
        #       '_SONAR_JDBC_URL':'jdbc:postgresql://{}/sonarqube'.format(
        #           self.database.cluster_endpoint.hostname),
        #       '_SONAR_JDBC_USERNAME':'******',
        #       '_SONAR_JDBC_PASSWORD':'******'
        #     }))

        self.service = ecsp.ApplicationLoadBalancedFargateService(
            self,
            'Server',
            assign_public_ip=True,
            vpc=self.datalake.vpc,
            desired_count=1,
            cpu=4096,
            memory_limit_mib=8 * 1024,
            listener_port=80,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4,
            security_groups=[self.security_group, self.datalake.efs_sg],
            task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_docker_image_asset(
                    asset=self.sonarqube_svr_ecr),
                container_name='sonarqube-svr',
                container_port=9000,
                enable_logging=True,
                environment={
                    '_SONAR_JDBC_URL':
                    'jdbc:postgresql://{}/sonarqube'.format(
                        self.database.cluster_endpoint.hostname),
                    '_SONAR_JDBC_USERNAME':
                    '******',
                    '_SONAR_JDBC_PASSWORD':
                    '******'
                }))

        for name in ['AmazonElasticFileSystemClientFullAccess']:
            self.service.task_definition.task_role.add_managed_policy(
                iam.ManagedPolicy.from_aws_managed_policy_name(name))

        # Override container specific settings
        container = self.service.task_definition.default_container

        # Required to start remote sql
        container.add_ulimits(
            ecs.Ulimit(name=ecs.UlimitName.NOFILE,
                       soft_limit=262145,
                       hard_limit=262145))

        for folder in ['data', 'logs']:
            efs_ap = self.datalake.efs.add_access_point(
                'sonarqube-' + folder,
                create_acl=efs.Acl(owner_gid="0",
                                   owner_uid="0",
                                   permissions="777"),
                path='/sonarqube/' + folder)

            self.service.task_definition.add_volume(
                name=folder,
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                    file_system_id=self.datalake.efs.file_system_id,
                    transit_encryption='ENABLED',
                    authorization_config=ecs.AuthorizationConfig(
                        access_point_id=efs_ap.access_point_id,
                        iam='DISABLED')))

            container.add_mount_points(
                ecs.MountPoint(container_path='/opt/sonarqube/' + folder,
                               source_volume=folder,
                               read_only=False))
Exemplo n.º 13
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 vpc="",
                 health_check_path="/",
                 env_level="prd",
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        web_asset = ecr_assets.DockerImageAsset(self,
                                                'web_asset',
                                                directory="docker",
                                                file="web/Dockerfile")
        app_asset = ecr_assets.DockerImageAsset(self,
                                                'app_asset',
                                                directory="docker",
                                                file="app/Dockerfile")

        cluster = ecs.Cluster(self, "Cluster", vpc=vpc)

        alb_options = ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            image=ecs.ContainerImage.from_ecr_repository(
                web_asset.repository, web_asset.image_uri[-64:]),
            environment={
                "ENV": env_level,
            })

        service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "fargate",
            cluster=cluster,
            desired_count=1,
            task_image_options=alb_options,
            public_load_balancer=True,
            listener_port=80)

        service.target_group.configure_health_check(
            path=health_check_path,
            healthy_threshold_count=2,
            healthy_http_codes="200-399",
            unhealthy_threshold_count=2,
            timeout=core.Duration.seconds(10),
            interval=core.Duration.seconds(15))

        service.target_group.enable_cookie_stickiness(core.Duration.hours(1))

        service.target_group.set_attribute(
            "deregistration_delay.timeout_seconds", "10")

        service.task_definition.add_container(
            "app",
            image=ecs.ContainerImage.from_ecr_repository(
                app_asset.repository, app_asset.image_uri[-64:]),
            logging=ecs.LogDriver.aws_logs(stream_prefix="fargate"),
            environment={
                "ENV": env_level,
            })

        scalable_target = service.service.auto_scale_task_count(
            max_capacity=20)

        scalable_target.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=50,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(10))

        scalable_target.scale_on_memory_utilization(
            "MemoryScaling",
            target_utilization_percent=80,
            scale_in_cooldown=core.Duration.seconds(60),
            scale_out_cooldown=core.Duration.seconds(10))
Exemplo n.º 14
0
    def __init__(self, scope: core.Stack, id: str, cluster, vpc, worker,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.cluster = cluster
        self.vpc = vpc
        self.worker = worker

        # Building a custom image for jenkins master.
        self.container_image = ecr.DockerImageAsset(
            self, "JenkinsMasterDockerImage", directory='./docker/master/')

        if config['DEFAULT']['fargate_enabled'] == "yes" or not config[
                'DEFAULT']['ec2_enabled'] == "yes":
            # Task definition details to define the Jenkins master container
            self.jenkins_task = ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                # image=ecs.ContainerImage.from_ecr_repository(self.container_image.repository),
                image=ecs.ContainerImage.from_docker_image_asset(
                    self.container_image),
                container_port=8080,
                enable_logging=True,
                environment={
                    # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters
                    'JAVA_OPTS':
                    '-Djenkins.install.runSetupWizard=false',
                    # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/README.md#getting-started
                    'CASC_JENKINS_CONFIG':
                    '/config-as-code.yaml',
                    'network_stack':
                    self.vpc.stack_name,
                    'cluster_stack':
                    self.cluster.stack_name,
                    'worker_stack':
                    self.worker.stack_name,
                    'cluster_arn':
                    self.cluster.cluster.cluster_arn,
                    'aws_region':
                    config['DEFAULT']['region'],
                    'jenkins_url':
                    config['DEFAULT']['jenkins_url'],
                    'subnet_ids':
                    ",".join(
                        [x.subnet_id for x in self.vpc.vpc.private_subnets]),
                    'security_group_ids':
                    self.worker.worker_security_group.security_group_id,
                    'execution_role_arn':
                    self.worker.worker_execution_role.role_arn,
                    'task_role_arn':
                    self.worker.worker_task_role.role_arn,
                    'worker_log_group':
                    self.worker.worker_logs_group.log_group_name,
                    'worker_log_stream_prefix':
                    self.worker.worker_log_stream.log_stream_name
                },
            )

            # Create the Jenkins master service
            self.jenkins_master_service_main = ecs_patterns.ApplicationLoadBalancedFargateService(
                self,
                "JenkinsMasterService",
                cpu=int(config['DEFAULT']['fargate_cpu']),
                memory_limit_mib=int(
                    config['DEFAULT']['fargate_memory_limit_mib']),
                cluster=self.cluster.cluster,
                desired_count=1,
                enable_ecs_managed_tags=True,
                task_image_options=self.jenkins_task,
                cloud_map_options=ecs.CloudMapOptions(
                    name="master", dns_record_type=sd.DnsRecordType('A')))

            self.jenkins_master_service = self.jenkins_master_service_main.service
            self.jenkins_master_task = self.jenkins_master_service.task_definition

        if config['DEFAULT']['ec2_enabled'] == "yes":
            self.jenkins_load_balancer = elb.ApplicationLoadBalancer(
                self,
                "JenkinsMasterELB",
                vpc=self.vpc.vpc,
                internet_facing=True,
            )

            self.listener = self.jenkins_load_balancer.add_listener("Listener",
                                                                    port=80)

            self.jenkins_master_task = ecs.Ec2TaskDefinition(
                self,
                "JenkinsMasterTaskDef",
                network_mode=ecs.NetworkMode.AWS_VPC,
                volumes=[
                    ecs.Volume(name="efs_mount",
                               host=ecs.Host(source_path='/mnt/efs'))
                ],
            )

            self.jenkins_master_task.add_container(
                "JenkinsMasterContainer",
                image=ecs.ContainerImage.from_ecr_repository(
                    self.container_image.repository),
                cpu=int(config['DEFAULT']['ec2_cpu']),
                memory_limit_mib=int(
                    config['DEFAULT']['ec2_memory_limit_mib']),
                environment={
                    # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters
                    'JAVA_OPTS':
                    '-Djenkins.install.runSetupWizard=false',
                    'CASC_JENKINS_CONFIG':
                    '/config-as-code.yaml',
                    'network_stack':
                    self.vpc.stack_name,
                    'cluster_stack':
                    self.cluster.stack_name,
                    'worker_stack':
                    self.worker.stack_name,
                    'cluster_arn':
                    self.cluster.cluster.cluster_arn,
                    'aws_region':
                    config['DEFAULT']['region'],
                    'jenkins_url':
                    config['DEFAULT']['jenkins_url'],
                    'subnet_ids':
                    ",".join(
                        [x.subnet_id for x in self.vpc.vpc.private_subnets]),
                    'security_group_ids':
                    self.worker.worker_security_group.security_group_id,
                    'execution_role_arn':
                    self.worker.worker_execution_role.role_arn,
                    'task_role_arn':
                    self.worker.worker_task_role.role_arn,
                    'worker_log_group':
                    self.worker.worker_logs_group.log_group_name,
                    'worker_log_stream_prefix':
                    self.worker.worker_log_stream.log_stream_name
                },
                logging=ecs.LogDriver.aws_logs(
                    stream_prefix="JenkinsMaster",
                    log_retention=logs.RetentionDays.ONE_WEEK),
            )

            self.jenkins_master_task.default_container.add_mount_points(
                ecs.MountPoint(container_path='/var/jenkins_home',
                               source_volume="efs_mount",
                               read_only=False))

            self.jenkins_master_task.default_container.add_port_mappings(
                ecs.PortMapping(container_port=8080, host_port=8080))

            self.jenkins_master_service = ecs.Ec2Service(
                self,
                "EC2MasterService",
                task_definition=self.jenkins_master_task,
                cloud_map_options=ecs.CloudMapOptions(
                    name="master", dns_record_type=sd.DnsRecordType('A')),
                desired_count=1,
                min_healthy_percent=0,
                max_healthy_percent=100,
                enable_ecs_managed_tags=True,
                cluster=self.cluster.cluster,
            )

            self.target_group = self.listener.add_targets(
                "JenkinsMasterTarget",
                port=80,
                targets=[
                    self.jenkins_master_service.load_balancer_target(
                        container_name=self.jenkins_master_task.
                        default_container.container_name,
                        container_port=8080,
                    )
                ],
                deregistration_delay=core.Duration.seconds(10))

        # Opening port 5000 for master <--> worker communications
        self.jenkins_master_service.task_definition.default_container.add_port_mappings(
            ecs.PortMapping(container_port=50000, host_port=50000))

        # Enable connection between Master and Worker
        self.jenkins_master_service.connections.allow_from(
            other=self.worker.worker_security_group,
            port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation='Master to Worker 50000',
                                from_port=50000,
                                to_port=50000))

        # Enable connection between Master and Worker on 8080
        self.jenkins_master_service.connections.allow_from(
            other=self.worker.worker_security_group,
            port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation='Master to Worker 8080',
                                from_port=8080,
                                to_port=8080))

        # IAM Statements to allow jenkins ecs plugin to talk to ECS as well as the Jenkins cluster #
        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(
                actions=[
                    "ecs:RegisterTaskDefinition",
                    "ecs:DeregisterTaskDefinition", "ecs:ListClusters",
                    "ecs:DescribeContainerInstances",
                    "ecs:ListTaskDefinitions", "ecs:DescribeTaskDefinition",
                    "ecs:DescribeTasks"
                ],
                resources=["*"],
            ))

        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(actions=["ecs:ListContainerInstances"],
                                resources=[self.cluster.cluster.cluster_arn]))

        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(
                actions=["ecs:RunTask"],
                resources=[
                    "arn:aws:ecs:{0}:{1}:task-definition/fargate-workers*".
                    format(
                        self.region,
                        self.account,
                    )
                ]))

        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(actions=["ecs:StopTask"],
                                resources=[
                                    "arn:aws:ecs:{0}:{1}:task/*".format(
                                        self.region, self.account)
                                ],
                                conditions={
                                    "ForAnyValue:ArnEquals": {
                                        "ecs:cluster":
                                        self.cluster.cluster.cluster_arn
                                    }
                                }))

        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(actions=["iam:PassRole"],
                                resources=[
                                    self.worker.worker_task_role.role_arn,
                                    self.worker.worker_execution_role.role_arn
                                ]))
        # END OF JENKINS ECS PLUGIN IAM POLICIES #
        self.jenkins_master_task.add_to_task_role_policy(
            iam.PolicyStatement(
                actions=["*"],
                resources=[self.worker.worker_logs_group.log_group_arn]))
Exemplo n.º 15
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        cpu: Union[int, float] = 256,
        memory: Union[int, float] = 512,
        mincount: int = 1,
        maxcount: int = 50,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        env: Optional[Dict] = None,
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, *kwargs)

        permissions = permissions or []
        env = env or {}

        vpc = ec2.Vpc(self, f"{id}-vpc", max_azs=2)

        cluster = ecs.Cluster(self, f"{id}-cluster", vpc=vpc)

        task_env = env.copy()
        task_env.update(dict(LOG_LEVEL="error"))

        # GUNICORN configuration
        if settings.workers_per_core:
            task_env.update({"WORKERS_PER_CORE": str(settings.workers_per_core)})
        if settings.max_workers:
            task_env.update({"MAX_WORKERS": str(settings.max_workers)})
        if settings.web_concurrency:
            task_env.update({"WEB_CONCURRENCY": str(settings.web_concurrency)})

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            f"{id}-service",
            cluster=cluster,
            cpu=cpu,
            memory_limit_mib=memory,
            desired_count=mincount,
            public_load_balancer=True,
            listener_port=80,
            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry(
                    f"public.ecr.aws/developmentseed/titiler:{settings.image_version}",
                ),
                container_port=80,
                environment=task_env,
            ),
        )
        fargate_service.target_group.configure_health_check(path="/ping")

        for perm in permissions:
            fargate_service.task_definition.task_role.add_to_policy(perm)

        scalable_target = fargate_service.service.auto_scale_task_count(
            min_capacity=mincount, max_capacity=maxcount
        )

        # https://github.com/awslabs/aws-rails-provisioner/blob/263782a4250ca1820082bfb059b163a0f2130d02/lib/aws-rails-provisioner/scaling.rb#L343-L387
        scalable_target.scale_on_request_count(
            "RequestScaling",
            requests_per_target=50,
            scale_in_cooldown=core.Duration.seconds(240),
            scale_out_cooldown=core.Duration.seconds(30),
            target_group=fargate_service.target_group,
        )

        # scalable_target.scale_on_cpu_utilization(
        #     "CpuScaling", target_utilization_percent=70,
        # )

        fargate_service.service.connections.allow_from_any_ipv4(
            port_range=ec2.Port(
                protocol=ec2.Protocol.ALL,
                string_representation="All port 80",
                from_port=80,
            ),
            description="Allows traffic on port 80 from ALB",
        )
Exemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #create VPC
        self.vpc = ec2.Vpc(
            self, 'SonarVPC',
            max_azs=3
        )
        
        #DB Security Group with required ingress rules
        self.sg= ec2.SecurityGroup(
            self, "SonarQubeSG",
            vpc=self.vpc,
            allow_all_outbound=True,
            description="Aurora Security Group"
        )
        self.sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432), "SonarDBAurora")
        pgroup = rds.ParameterGroup.from_parameter_group_name(
            self, "SonarDBParamGroup",
            parameter_group_name='default.aurora-postgresql11'
        )

        #create RDS Cluster
        self.db= rds.DatabaseCluster(self, 'SonarDBCluster',
            engine= rds.DatabaseClusterEngine.aurora_postgres(version=rds.AuroraPostgresEngineVersion.VER_11_6),
            default_database_name= 'sonarqube',
            parameter_group= pgroup,
            master_user=rds.Login(username= "******"),
            instance_props= rds.InstanceProps(
                instance_type= ec2.InstanceType.of(
                    ec2.InstanceClass.BURSTABLE3,
                    ec2.InstanceSize.MEDIUM
                ),
                security_groups= [self.sg],
                vpc= self.vpc
            )
        )

        #create Cluster
        self.cluster= ecs.Cluster(self, "SonarCluster",
            capacity= ecs.AddCapacityOptions(
            instance_type= ec2.InstanceType('m5.large')),
            vpc= self.vpc
        )

        asg= self.cluster.autoscaling_group
        user_data= asg.user_data
        user_data.add_commands('sysctl -qw vm.max_map_count=262144')
        user_data.add_commands('sysctl -w fs.file-max=65536')
        user_data.add_commands('ulimit -n 65536')
        user_data.add_commands('ulimit -u 4096')

        #Create iam Role for Task
        self.task_role = iam.Role(
            self,
            id= "SonarTaskRole",
            role_name= "SonarTaskRole",
            assumed_by= iam.ServicePrincipal(service= "ecs-tasks.amazonaws.com"),
            managed_policies= [
                iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy")
            ]
        )
        #Grant permission for Task to read secret from SecretsManager
        self.db.secret.grant_read(self.task_role)

        url = 'jdbc:postgresql://{}/sonarqube'.format(self.db.cluster_endpoint.socket_address)
        #create task
        task= ecs_patterns.ApplicationLoadBalancedEc2Service(self, "SonarService",
            # if a cluster is provided use the same vpc
            cluster= self.cluster,            
            cpu=512,
            desired_count=1, 
            task_image_options= ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry("sonarqube:8.2-community"),
                container_port=9000,
                secrets={
                    "sonar.jdbc.username": ecs.Secret.from_secrets_manager(self.db.secret, field="username"),
                    "sonar.jdbc.password": ecs.Secret.from_secrets_manager(self.db.secret, field="password")
                },
                environment={
                    'sonar.jdbc.url': url
                },
                task_role= self.task_role
            ),
            memory_limit_mib=2048,
            public_load_balancer=True
        )

        container = task.task_definition.default_container
        container.add_ulimits(
            ecs.Ulimit(
                name=ecs.UlimitName.NOFILE,
                soft_limit=65536,
                hard_limit=65536
            )
        )
Exemplo n.º 17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Let's start with creating an IAM Service Role, later to be assumed by our ECS Fargate Container
        # After creating any resource, we'll be attaching IAM policies to this role using the `fargate_role`.
        fargate_role = iam.Role(
            self,
            "ecsTaskExecutionRole",
            role_name="ecsTaskExecutionRole",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="Custom Role assumed by ECS Fargate (container)"
        )

        # S3: Create a Bucket for Unicorn Pursuit web page, and grant public read:
        bucket = s3.Bucket(self, "www.unicornpursuit.com",
                        bucket_name="www.unicornpursuit.com",
                        access_control=s3.BucketAccessControl.PUBLIC_READ,
                        )

        # Grant public read access to the bucket
        bucket.grant_public_access()


        # Grant S3 Read/Write access to our Fargate Container
        fargate_role.add_to_policy(statement=iam.PolicyStatement(
            resources=[bucket.bucket_arn],
            actions=["s3:*"]
        ))

        # DynamoDB: Create Table for Project Info (ID, Owner, Content, Photo and Votes)
        voting_ddb = ddb.CfnTable(
            self, "UnicornDynamoDBVoting",
            table_name="UnicornDynamoDBVoting",
            key_schema=[
                ddb.CfnTable.KeySchemaProperty(attribute_name="id",key_type="HASH"),
                ddb.CfnTable.KeySchemaProperty(attribute_name="owner",key_type="RANGE"),
            ],
            
        # In the new DynamoDB, you can't create AttDefProperty for non-key attributes.
            attribute_definitions=[
                ddb.CfnTable.AttributeDefinitionProperty(attribute_name="id",attribute_type="N"),
                ddb.CfnTable.AttributeDefinitionProperty(attribute_name="owner",attribute_type="S"),
            ],
            provisioned_throughput=ddb.CfnTable.ProvisionedThroughputProperty(
                read_capacity_units=5,
                write_capacity_units=5
            )
        )
        

        # Second DynamoDB table called "users" for storing who voted for whom
        # Example: [email protected] gave 5 points to project 323, 4 points to 111 etc.
        users_ddb = ddb.Table(
            self, "UnicornDynamoDBUsers",
            table_name="UnicornDynamoDBUsers",
            partition_key=ddb.Attribute(
                name="Owner",
                type=ddb.AttributeType.STRING
            )
        )

        # Grant RW writes for Unicorn App in Fargate
        fargate_role.add_to_policy(statement=iam.PolicyStatement(
            resources=[voting_ddb.attr_arn,
            users_ddb.table_arn],
            actions=["dynamodb:*"]
        ))

        # Cognito: Create User Pool
        userpool = cognito.UserPool(
            self, "CognitoUnicornUserPool",
            user_pool_name="CognitoUnicornUserPool",
            self_sign_up_enabled=True,
            
            ## Require username or email for users to sign in
            sign_in_aliases=cognito.SignInAliases(
                username=False,
                email=True,
            ),
            # Require users to give their full name when signing up
            required_attributes=cognito.RequiredAttributes(
                fullname=True,
                email=True,
                phone_number=True
            ),
            # Verify new sign ups using email
            auto_verify=cognito.AutoVerifiedAttrs(
                email=False,
                phone=True,
            ),
            # Configure OTP Settings ()
            user_verification=cognito.UserVerificationConfig(
                sms_message="Hey Unicorn Hunter, welcome to Unicorn Pursuit! Your OTP is {####}",
            ),
            # Set up required password policy
            password_policy=cognito.PasswordPolicy(
                min_length=12,
                require_symbols=True,
                require_lowercase=True,
                require_uppercase=True,
                require_digits=True,
            )
        )

        ## Cognito: Create App Client & create Authentication Flow with User and Password
        userpool.add_client(
            "UnicornAppClient",
            user_pool_client_name="UnicornAppClient",
            generate_secret=False,
            
            ## We'll allow both Flows, Implicit and Authorization Code, and decide in the app which to use.
            auth_flows=cognito.AuthFlow(
                admin_user_password=False,
                custom=False,
                refresh_token=True,
                user_password=True,
                user_srp=False
                ),
        )

        # Grant Cognito Access to Fargate. Include SSM, so Client App ID can be retrived.
        fargate_role.add_to_policy(statement=iam.PolicyStatement(
            resources=["*"],
            actions=["ssm:*"]
        ))

        fargate_role.add_to_policy(statement=iam.PolicyStatement(
            resources=[userpool.user_pool_arn],
            actions=["cognito-identity:*","cognito-idp:*","cognito-sync:*"]
        ))

        ## Fargate: Create ECS:Fargate with ECR uploaded image
        vpc = ec2.Vpc(self, "UnicornVPC", max_azs=2, nat_gateways=None)

        """   VPC with optimal NAT GW usage
        vpc_lowcost = ec2.Vpc(self, "LowCostVPC",
            max_azs=2,
            cidr="10.7.0.0/16",
            nat_gateways=None,
            subnet_configuration=[ec2.SubnetConfiguration(
                               subnet_type=ec2.SubnetType.PUBLIC,
                               name="Public",
                               cidr_mask=24,
                           ), ec2.SubnetConfiguration(
                               subnet_type=ec2.SubnetType.ISOLATED,
                               name="Private",
                               cidr_mask=24,
                           )
                           ],     
        ) """
        
        linux_ami = ec2.AmazonLinuxImage(generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX,
                                 edition=ec2.AmazonLinuxEdition.STANDARD,
                                 virtualization=ec2.AmazonLinuxVirt.HVM,
                                 storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
                                 )

        nat_ec2 = ec2.Instance(self, "NAT", 
            instance_name="NAT",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            instance_type=ec2.InstanceType(instance_type_identifier="t3.nano"),
            machine_image=linux_ami,
            user_data=ec2.UserData.for_linux(),
            source_dest_check=False,
        )

        # Configure Linux Instance to act as NAT Instance
        nat_ec2.user_data.add_commands("sysctl -w net.ipv4.ip_forward=1")
        nat_ec2.user_data.add_commands("/sbin/iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE")

        # Add a static route to the ISOLATED subnet, pointing 0.0.0.0/0 to a NAT EC2
        selection = vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE
        )
    
        for subnet in selection.subnets:
            subnet.add_route("DefaultNAT", router_id=nat_ec2.instance_id, router_type=ec2.RouterType.INSTANCE, destination_cidr_block="0.0.0.0/0") 

        # Create ECS Cluster
        cluster = ecs.Cluster(self, "UnicornCluster", vpc=vpc)
        ecr.Repository(self, "unicorn", repository_name="unicorn")

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, "UnicornFargateService",
            cluster=cluster,
            cpu=512,
            desired_count=1,
            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry("057097267726.dkr.ecr.eu-west-1.amazonaws.com/unicorn"),
                # image=ecs.ContainerImage.from_registry(repo.repository_uri_for_tag()),
                container_port=8080,
                container_name="unicorn",
                execution_role=fargate_role,
                ),
                
            memory_limit_mib=1024,
            public_load_balancer=True   
        )

        fargate_service.service.connections.security_groups[0].add_ingress_rule(
            peer = ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection = ec2.Port.tcp(8080),
            description="Allow http inbound from VPC"
        )

        # Update NAT EC2 Security Group, to allow only HTTPS from Fargate Service Security Group.
        nat_ec2.connections.security_groups[0].add_ingress_rule(
            peer = fargate_service.service.connections.security_groups[0],
            connection = ec2.Port.tcp(443),
            description="Allow https from Fargate Service"
        )

        # Grant ECR Access to Fargate by attaching an existing ReadOnly policy. so that Unicorn Docker Image can be pulled.
        #fargate_role.add_managed_policy(iam.ManagedPolicy("AmazonEC2ContainerRegistryReadOnly"))
        fargate_role.add_to_policy(statement=iam.PolicyStatement(
            resources=["*"],
            actions=["ecr:*"]
        ))
    def __init__(
        self, 
        scope: core.Construct, 
        id: str, 
        keycloak_domain: str,
        vpc: ec2.IVpc = None, 
        cluster: ecs.ICluster = None, 
        load_balancer: elbv2.IApplicationLoadBalancer = None, 
        log_group: logs.ILogGroup = None,
        keycloak_database_name: str = 'keykloak',
        keycloak_database_user: str = 'admin',
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        
        keycloak_task_role = iam.Role(
            self, 'KeycloakTastRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
        )

        keycloak_database_secret = secretsmanager.Secret(
            self, 'KeycloakDatabaseSecret',
            description='Keycloak Database Password',
            generate_secret_string=secretsmanager.SecretStringGenerator(exclude_punctuation=True)
        )

        keycloak_database_cluster = rds.DatabaseCluster(
            self, 'KeycloakDatabaseCluster',
            engine= rds.DatabaseClusterEngine.AURORA,
            instance_props=rds.InstanceProps(
                instance_type=ec2.InstanceType.of(
                    instance_class=ec2.InstanceClass.BURSTABLE3, 
                    instance_size=ec2.InstanceSize.SMALL
                ),
                vpc=vpc,
            ),
            master_user= rds.Login(
                username=keycloak_database_user,
                password=keycloak_database_secret.secret_value,
            ),
            instances=1,
            default_database_name=keycloak_database_name,
            removal_policy=core.RemovalPolicy.DESTROY,
        )


        keycloak_hosted_zone = route53.HostedZone.from_lookup(
            self, 'KeycloakHostedZone',
            domain_name=keycloak_domain
        )

        keycloak_certificate = acm.DnsValidatedCertificate(
            self, 'KeycloakCertificate',
            hosted_zone=keycloak_hosted_zone,
            domain_name='keycloak.' + keycloak_domain
        )

        keycloak_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self, 'KeycloakLoadBalancedFargateService',
            load_balancer=load_balancer,
            cluster=cluster,

            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset('keycloak'),
                container_port=8080,
                enable_logging=True,
                task_role=keycloak_task_role,

                log_driver=ecs.AwsLogDriver(
                    stream_prefix='keycloak',
                    log_group=log_group,
                ),

                secrets={
                    'DB_PASSWORD': ecs.Secret.from_secrets_manager(keycloak_database_secret),
                },
                environment={
                    'DB_VENDOR': 'mysql',
                    'DB_USER': keycloak_database_user,
                    'DB_ADDR': keycloak_database_cluster.cluster_endpoint.hostname,
                    'DB_DATABASE': keycloak_database_name,
                    # 'KEYCLOAK_LOGLEVEL': 'DEBUG',
                    'PROXY_ADDRESS_FORWARDING': 'true',
                },
            ),

            memory_limit_mib=512,
            cpu=256,
            desired_count=1,
            public_load_balancer=True,
            domain_name= 'keycloak.' + keycloak_domain,
            domain_zone= keycloak_hosted_zone,
            protocol=elbv2.ApplicationProtocol.HTTPS,
        )

        keycloak_service.target_group.enable_cookie_stickiness(core.Duration.seconds(24 * 60 * 60))
        keycloak_service.target_group.configure_health_check(
            port='8080',
            path='/auth/realms/master/.well-known/openid-configuration',
            timeout=core.Duration.seconds(20),
            healthy_threshold_count=2,
            unhealthy_threshold_count=10,
            interval=core.Duration.seconds(30),
        )

        keycloak_service.listener.add_certificates(
            'KeycloakListenerCertificate',
            certificates= [ keycloak_certificate ]
        )

        keycloak_database_cluster.connections.allow_default_port_from(keycloak_service.service, 'From Keycloak Fargate Service')